Compare commits

...

63 Commits

Author SHA1 Message Date
di-sukharev
1eaf5e2ad7 build 2024-09-03 13:14:29 +03:00
di-sukharev
47eb59d665 Merge remote-tracking branch 'origin/dev' into 378_fix_hook_env 2024-09-03 13:14:10 +03:00
di-sukharev
69b3c00a52 docs(README): update OCO_AI_PROVIDER and OCO_MODEL instructions for clarity and add valid model name options to OCO_MODEL description 2024-09-02 11:13:02 +03:00
di-sukharev
6f4afbfb52 build 2024-09-02 10:19:33 +03:00
di-sukharev
796de7b07e 3.1.2 2024-09-02 10:19:31 +03:00
unconstructive
9ad281a4ee 🔧 (ollama.ts): update client post request to use getUri method for endpoint URL construction (#404) 2024-09-02 10:18:01 +03:00
di-sukharev
1ce357b023 Merge branch 'dev' of github.com:di-sukharev/opencommit into dev 2024-09-01 18:26:33 +03:00
di-sukharev
45dd07d229 Merge branch 'master' into dev 2024-09-01 18:26:20 +03:00
di-sukharev
fa164377e4 build 2024-09-01 18:23:16 +03:00
di-sukharev
0b89767de0 3.1.1 2024-09-01 18:23:14 +03:00
GPT10
2dded4caa4 v 3.1.0 (#397) 2024-09-01 18:21:56 +03:00
GPT10
670f74ebc7 398: make why configurable (#403)
* feat(config): add OCO_WHY configuration option to enable output of change explanations in commit messages
docs(README): document the new OCO_WHY config option and its usage for outputting reasons for changes
2024-09-01 18:17:25 +03:00
di-sukharev
31fdd8473b fix(prepare-commit-msg-hook): update error handling to provide clearer instructions for setting API keys and improve user guidance 2024-09-01 16:38:05 +03:00
Aloha
89d2aa603b feat: support pnpm (#394)
* feat: support pnpm

* fix(commitlint.ts): format commitlint config output as JSON for better readability

* test(commitlint.test.ts): update expected console output for commit message consistency
2024-09-01 16:11:26 +03:00
GPT10
8702c17758 390 add config set tests (#399)
* fix(commit.ts): improve user confirmation handling by exiting on cancel actions to prevent unintended behavior
refactor(commit.ts): streamline conditional checks for user confirmations to enhance code readability and maintainability

* refactor(commit.ts): rename spinner variables for clarity and consistency in commit message generation process
fix(commit.ts): ensure proper stopping of spinners in case of errors during commit message generation and committing process

* refactor(config.ts): extract default configuration to a constant for better maintainability and readability
refactor(config.ts): improve initGlobalConfig function to accept a configPath parameter for flexibility
feat(config.ts): enhance getConfig function to support separate paths for global and environment configurations
test(config.test.ts): update tests to reflect changes in config handling and ensure proper functionality
style(utils.ts): clean up code formatting for consistency and readability
style(tsconfig.json): adjust formatting in tsconfig.json for better clarity and maintainability

* fix(utils.ts): add existsSync check before removing temp directory to prevent errors if directory does not exist (#401)

---------

Co-authored-by: Takanori Matsumoto <matscube@gmail.com>
2024-09-01 13:28:06 +03:00
di-sukharev
60597d23eb pump v to 3.1.0 2024-08-27 17:31:40 +03:00
di-sukharev
6f04927369 build 2024-08-27 17:31:23 +03:00
di-sukharev
0c0cf9c627 Merge remote-tracking branch 'origin/master' into dev 2024-08-27 17:31:13 +03:00
GPT10
8fe8e614ac refactoring_v1 (#391) 2024-08-27 17:04:36 +03:00
Takanori Matsumoto
68c9ed359c fix(dependencies): update tr46, webidl-conversions, and whatwg-url to latest versions for compatibility and security improvements (#395) 2024-08-26 10:28:34 +03:00
di-sukharev
1b29f3a9fd build 2024-08-19 12:37:38 +03:00
GPT10
596dcd7cea 3.0.20 (#389) 2024-08-19 12:37:17 +03:00
di-sukharev
eb3be62a4f 3.0.20 2024-08-19 12:34:35 +03:00
di-sukharev
58ad1880ef Merge remote-tracking branch 'origin/dev' into marcelwiegand-master 2024-08-19 12:29:55 +03:00
marcelwiegand
5ee17235cb 🐛(anthropic.ts): fix incorrect usage of 'typeof' operator for MODEL variable 2024-08-19 11:13:37 +02:00
marcelwiegand
f9c7316eb3 🐛(openAi.ts): fix incorrect usage of 'typeof' operator for MODEL variable 2024-08-19 11:13:09 +02:00
di-sukharev
dfe2730a45 build 2024-08-19 10:42:36 +03:00
GPT10
a979ba091a 3.0.19 (#384)
* fix(config.ts): move 'gpt-4o-mini' in MODEL_LIST
* docs(config.ts): update API key validation message with detailed instructions
* refactor(config.ts): simplify model validation logic to check for string type instead of MODEL_LIST

* 3.0.19
2024-08-19 10:41:26 +03:00
di-sukharev
6bbe07a9a1 build 2024-08-19 10:28:45 +03:00
di-sukharev
0156bb9dc9 3.0.19 2024-08-19 10:26:46 +03:00
di-sukharev
e27bbd0ac1 fix(config.ts): move 'gpt-4o-mini' in MODEL_LIST
docs(config.ts): update API key validation message with detailed instructions
refactor(config.ts): simplify model validation logic to check for string type instead of MODEL_LIST
2024-08-19 10:25:24 +03:00
GPT10
f3371ac1e3 3.0.18 (#382)
* 📝 (README.md): add support for custom AI models and update documentation to reflect new environment variable OCO_ AI_PROVIDER (#351)

* feat/add gemini (#349)

* fix: prompt-module/@commitlint (#336)

* docs: spelling fix (#325)

---------

Co-authored-by: tumf <y.takahara@gmail.com>
Co-authored-by: Drew Payment <drew.payment@gmail.com>
Co-authored-by: Takanori Matsumoto <matscube@gmail.com>
Co-authored-by: Kellan Stevens <kellan@kellanstevens.com>
Co-authored-by: JMN09 <jmn09@mail.aub.edu>
Co-authored-by: JMN09 <157629053+JMN09@users.noreply.github.com>
2024-08-18 14:55:33 +03:00
di-sukharev
9cc9f9d757 build 2024-08-18 14:47:25 +03:00
di-sukharev
c8d5c53db1 3.0.18 2024-08-18 14:47:23 +03:00
di-sukharev
fc4326233d Merge remote-tracking branch 'origin/master' into dev 2024-08-18 14:23:26 +03:00
XIAOTIAN LIU
07f7a05c4b support read OCO_OLLAMA_API_URL from env to OllamaAiEngine and fix bug in getting ollma model name when slash exists (#375) 2024-08-18 14:22:30 +03:00
di-sukharev
9b2e41d255 fix(package.json): add override for whatwg-url to version 13.0.0 to fix punnycode warning 2024-08-18 14:21:27 +03:00
di-sukharev
6fad862aa5 Merge remote-tracking branch 'origin/master' into dev 2024-08-18 14:04:27 +03:00
XiaomaiTX
c425878f21 feat: allow any string as model (#367)
* refactor(anthropic.ts, openAi.ts): remove model validation against predefined list to allow any string as model, simplifying configuration and improving flexibility
2024-08-18 13:47:18 +03:00
di-sukharev
1fb592afb2 Merge branch 'murex-feature/Flowise' into dev 2024-08-18 13:42:19 +03:00
di-sukharev
684f3dadfc build 2024-08-18 13:41:58 +03:00
di-sukharev
76e6070012 Merge branch 'feature/Flowise' of github.com:murex/opencommit into murex-feature/Flowise 2024-08-18 13:41:38 +03:00
di-sukharev
cb72837866 build 2024-08-18 13:36:02 +03:00
di-sukharev
2432ef9de3 Merge remote-tracking branch 'origin/master' into dev 2024-08-18 13:35:38 +03:00
JMN09
026fd1822a Merge branch 'dev' into feature/Flowise
# Conflicts:
#	out/cli.cjs
#	out/github-action.cjs
#	src/commands/config.ts
#	src/utils/engine.ts
2024-08-14 15:30:12 +03:00
JMN09
b42a52ef24 Merge branch 'master' of https://github.com/murex/opencommit into feature/Flowise 2024-08-13 16:26:25 +03:00
XYCode Kerman
10b031ab36 fix(i18n): zh_CN is wrong (#379) 2024-08-13 10:59:48 +03:00
Isaque Borges
d63b825ae5 fix(config.ts): invalid default config (#371)
* 📝 (README.md): add support for custom AI models and update documentation to reflect new environment variable OCO_ AI_PROVIDER (#351)

* Feat/add gemini (#349)

* 🐛Fix: prompt-module/@commitlint (#336)

* fix(commitlint/utils.ts): correct variable used in search for JSON block end tag

* ♻️ (commitlint/config.ts & pwd-commitlint.ts): Refactor commitlint config loading to support both CJS and ESM modules

💡 (pwd-commitlint.ts): Add detailed comments and error handling for better clarity and robustness in commitlint module loading process

*  (package.json): Add setup script for e2e tests to install dependencies for commitlint configurations
🔧 (setup.sh): Add shell script to set up commitlint configurations for e2e tests

*  (config.ts): Add support for OCO_TEST_MOCK_TYPE configuration key to define test mock type for testing purposes
📝 (config.ts): Update documentation for OCO_TEST_MOCK_TYPE configuration key in configValidators and getConfig functions
📝 (testAi.ts): Add TEST_MOCK_TYPES constant array to define supported test mock types
📝 (testAi.ts): Update generateCommitMessage function to use OCO_TEST_MOCK_TYPE from config for different test mock types
📝 (commitlint.test.ts): Add e2e test for running "oco commitlint force" with different @commitlint versions using CJS and ESM
📝 (utils.ts): Add wait function to introduce delay in milliseconds for testing purposes

*  (commitlint.test.ts): refactor setupCommitlint function to accept a version parameter for better code organization and readability
📝 (commitlint.test.ts): add test case for commitlint@9 using CJS to ensure proper functionality and compatibility
📝 (commitlint.test.ts): add test case for commitlint@18 using CJS to ensure proper functionality and compatibility
📝 (commitlint.test.ts): add test case for commitlint@19 using ESM to ensure proper functionality and compatibility

* 🔧 (commitlint.test.ts): remove unnecessary commands to create and add index.ts file before running tests

* refactor(test/e2e/prompt-module/commitlint.test.ts): remove unused import configure
style(test/e2e/prompt-module/commitlint.test.ts): add missing semicolon for consistency
test(test/e2e/prompt-module/commitlint.test.ts): add e2e tests for @commitlint prompt-module integration

*  (e2e tests): add package.json copying to setupCommitlint for version accuracy
♻️ (commitlint config): refactor commitlint.config.js to use ES module syntax
 (package.json): specify "type": "module" to support ES module syntax

* docs: spelling fix (#325)

* 3.0.17

* ♻️ (config.ts): refactor OCO_AZURE_ENDPOINT default value from empty string to undefined resolving #352 - creates invalid config file

---------

Co-authored-by: tumf <y.takahara@gmail.com>
Co-authored-by: Drew Payment <drew.payment@gmail.com>
Co-authored-by: Takanori Matsumoto <matscube@gmail.com>
Co-authored-by: Kellan Stevens <kellan@kellanstevens.com>
Co-authored-by: di-sukharev <dim.sukharev@gmail.com>
2024-08-13 10:25:26 +03:00
JMN09
b66da48106 build 2024-08-02 17:52:16 +03:00
JMN09
6f24afc600 feature(commands/configs.ts, engine/flowise.ts, utils/engine.ts): Incorporated flowise in configuration variables, added it as an AiEngine, and added it as a return value for getEngine() 2024-08-02 17:50:34 +03:00
di-sukharev
a80dcb03c4 build 2024-07-20 11:05:26 +03:00
GPT10
bebbed856f v 3.0.17 (#366) 2024-07-20 11:03:05 +03:00
di-sukharev
2d5882c257 Merge remote-tracking branch 'origin/master' into dev 2024-07-20 11:00:56 +03:00
Ying-Shan Lin
37fb140563 Update Claude Models (#362) 2024-07-20 11:00:10 +03:00
di-sukharev
aebe7e200f 3.0.17 2024-07-20 10:59:52 +03:00
di-sukharev
6f1e4bcec6 Merge remote-tracking branch 'origin/master' into dev 2024-07-20 10:58:16 +03:00
XiaomaiTX
2059549dce feat: Add all of OpenAI GPT models (#365) 2024-07-20 10:48:00 +03:00
Kellan Stevens
8361dc6838 docs: spelling fix (#325) 2024-07-04 11:08:44 +03:00
Takanori Matsumoto
73ccae9de3 🐛Fix: prompt-module/@commitlint (#336)
* fix(commitlint/utils.ts): correct variable used in search for JSON block end tag

* ♻️ (commitlint/config.ts & pwd-commitlint.ts): Refactor commitlint config loading to support both CJS and ESM modules

💡 (pwd-commitlint.ts): Add detailed comments and error handling for better clarity and robustness in commitlint module loading process

*  (package.json): Add setup script for e2e tests to install dependencies for commitlint configurations
🔧 (setup.sh): Add shell script to set up commitlint configurations for e2e tests

*  (config.ts): Add support for OCO_TEST_MOCK_TYPE configuration key to define test mock type for testing purposes
📝 (config.ts): Update documentation for OCO_TEST_MOCK_TYPE configuration key in configValidators and getConfig functions
📝 (testAi.ts): Add TEST_MOCK_TYPES constant array to define supported test mock types
📝 (testAi.ts): Update generateCommitMessage function to use OCO_TEST_MOCK_TYPE from config for different test mock types
📝 (commitlint.test.ts): Add e2e test for running "oco commitlint force" with different @commitlint versions using CJS and ESM
📝 (utils.ts): Add wait function to introduce delay in milliseconds for testing purposes

*  (commitlint.test.ts): refactor setupCommitlint function to accept a version parameter for better code organization and readability
📝 (commitlint.test.ts): add test case for commitlint@9 using CJS to ensure proper functionality and compatibility
📝 (commitlint.test.ts): add test case for commitlint@18 using CJS to ensure proper functionality and compatibility
📝 (commitlint.test.ts): add test case for commitlint@19 using ESM to ensure proper functionality and compatibility

* 🔧 (commitlint.test.ts): remove unnecessary commands to create and add index.ts file before running tests

* refactor(test/e2e/prompt-module/commitlint.test.ts): remove unused import configure
style(test/e2e/prompt-module/commitlint.test.ts): add missing semicolon for consistency
test(test/e2e/prompt-module/commitlint.test.ts): add e2e tests for @commitlint prompt-module integration

*  (e2e tests): add package.json copying to setupCommitlint for version accuracy
♻️ (commitlint config): refactor commitlint.config.js to use ES module syntax
 (package.json): specify "type": "module" to support ES module syntax
2024-07-04 11:08:08 +03:00
Drew Payment
c58e0c62a4 Feat/add gemini (#349) 2024-07-04 11:03:54 +03:00
tumf
a4b4e65011 📝 (README.md): add support for custom AI models and update documentation to reflect new environment variable OCO_ AI_PROVIDER (#351) 2024-07-04 11:03:17 +03:00
senovr
18f52772b3 Make endpoint url for Ollama configurable (#355) 2024-07-02 20:59:55 +03:00
di-sukharev
fef25a2d06 build 2024-05-25 19:23:06 +03:00
48 changed files with 59625 additions and 43714 deletions

3
.gitignore vendored
View File

@@ -10,4 +10,5 @@ uncaughtExceptions.log
.vscode
src/*.json
.idea
test.ts
test.ts
notes.md

View File

@@ -2,7 +2,7 @@
<div>
<img src=".github/logo-grad.svg" alt="OpenCommit logo"/>
<h1 align="center">OpenCommit</h1>
<h4 align="center">Follow the bird <a href="https://twitter.com/_sukharev_"><img src="https://img.shields.io/twitter/follow/_sukharev_?style=flat&label=_sukharev_&logo=twitter&color=0bf&logoColor=fff" align="center"></a>
<h4 align="center">Author <a href="https://twitter.com/_sukharev_"><img src="https://img.shields.io/twitter/follow/_sukharev_?style=flat&label=_sukharev_&logo=twitter&color=0bf&logoColor=fff" align="center"></a>
</div>
<h2>Auto-generate meaningful commits in a second</h2>
<p>Killing lame commits with AI 🤯🔫</p>
@@ -16,7 +16,7 @@
<img src=".github/opencommit-example.png" alt="OpenCommit example"/>
</div>
All the commits in this repo are authored by OpenCommit — look at [the commits](https://github.com/di-sukharev/opencommit/commit/eae7618d575ee8d2e9fff5de56da79d40c4bc5fc) to see how OpenCommit works. Emojis and long commit descriptions are configurable.
All the commits in this repo are authored by OpenCommit — look at [the commits](https://github.com/di-sukharev/opencommit/commit/eae7618d575ee8d2e9fff5de56da79d40c4bc5fc) to see how OpenCommit works. Emojis and long commit descriptions are configurable, basically everything is.
## Setup OpenCommit as a CLI tool
@@ -58,7 +58,9 @@ git add <files...>
oco
```
Link to the GitMoji specification: https://gitmoji.dev/
Running `git add` is optional, `oco` will do it for you.
### Running locally with Ollama
You can also run it with local model through ollama:
@@ -68,16 +70,36 @@ You can also run it with local model through ollama:
```sh
git add <files...>
OCO_AI_PROVIDER='ollama' opencommit
oco config set OCO_AI_PROVIDER='ollama'
```
If you want to use a model other than mistral (default), you can do so by setting the `OCO_AI_PROVIDER` environment variable as follows:
```sh
oco config set OCO_AI_PROVIDER='ollama'
oco config set OCO_MODEL='llama3:8b'
```
If you have ollama that is set up in docker/ on another machine with GPUs (not locally), you can change the default endpoint url.
You can do so by setting the `OCO_OLLAMA_API_URL` environment variable as follows:
```sh
oco config set OCO_OLLAMA_API_URL='http://192.168.1.10:11434/api/chat'
```
where 192.168.1.10 is example of endpoint URL, where you have ollama set up.
### Flags
There are multiple optional flags that can be used with the `oco` command:
#### Use Full GitMoji Specification
Link to the GitMoji specification: https://gitmoji.dev/
This flag can only be used if the `OCO_EMOJI` configuration item is set to `true`. This flag allows users to use all emojis in the GitMoji specification, By default, the GitMoji full specification is set to `false`, which only includes 10 emojis (🐛✨📝🚀✅♻️⬆️🔧🌐💡).
This is due to limit the number of tokens sent in each request. However, if you would like to use the full GitMoji specification, you can use the `--fgm` flag.
```
@@ -99,19 +121,24 @@ oco --yes
Create a `.env` file and add OpenCommit config variables there like this:
```env
...
OCO_OPENAI_API_KEY=<your OpenAI API token>
OCO_TOKENS_MAX_INPUT=<max model token limit (default: 4096)>
OCO_TOKENS_MAX_OUTPUT=<max response tokens (default: 500)>
OCO_OPENAI_BASE_PATH=<may be used to set proxy path to OpenAI api>
OCO_DESCRIPTION=<postface a message with ~3 sentences description of the changes>
OCO_EMOJI=<boolean, add GitMoji>
OCO_MODEL=<either 'gpt-4', 'gpt-4-turbo', 'gpt-3.5-turbo' (default), 'gpt-3.5-turbo-0125', 'gpt-4-1106-preview', 'gpt-4-turbo-preview' or 'gpt-4-0125-preview'>
OCO_MODEL=<either 'gpt-4o', 'gpt-4', 'gpt-4-turbo', 'gpt-3.5-turbo' (default), 'gpt-3.5-turbo-0125', 'gpt-4-1106-preview', 'gpt-4-turbo-preview' or 'gpt-4-0125-preview' or any Anthropic or Ollama model or any string basically, but it should be a valid model name>
OCO_LANGUAGE=<locale, scroll to the bottom to see options>
OCO_MESSAGE_TEMPLATE_PLACEHOLDER=<message template placeholder, default: '$msg'>
OCO_PROMPT_MODULE=<either conventional-commit or @commitlint, default: conventional-commit>
OCO_ONE_LINE_COMMIT=<one line commit message, default: false>
OCO_AI_PROVIDER=<openai (default), anthropic, azure, ollama>
...
```
This are not all the config options, but you get the point.
### Global config for all repos
Local config still has more priority than Global config, but you may set `OCO_MODEL` and `OCO_LOCALE` globally and set local configs for `OCO_EMOJI` and `OCO_DESCRIPTION` per repo which is more convenient.
@@ -119,7 +146,7 @@ Local config still has more priority than Global config, but you may set `OCO_MO
Simply set any of the variables above like this:
```sh
oco config set OCO_MODEL=gpt-4
oco config set OCO_MODEL=gpt-4o-mini
```
Configure [GitMoji](https://gitmoji.dev/) to preface a message.
@@ -134,14 +161,26 @@ To remove preface emojis:
oco config set OCO_EMOJI=false
```
### Switch to GPT-4 or other models
Other config options are behaving the same.
By default, OpenCommit uses `gpt-3.5-turbo` model.
### Output WHY the changes were done (WIP)
You may switch to GPT-4 which performs better, but costs ~x15 times more 🤠
You can set the `OCO_WHY` config to `true` to have OpenCommit output a short description of WHY the changes were done after the commit message. Default is `false`.
To make this perform accurate we must store 'what files do' in some kind of an index or embedding and perform a lookup (kinda RAG) for the accurate git commit message. If you feel like building this comment on this ticket https://github.com/di-sukharev/opencommit/issues/398 and let's go from there together.
```sh
oco config set OCO_MODEL=gpt-4
oco config set OCO_WHY=true
```
### Switch to GPT-4 or other models
By default, OpenCommit uses `gpt-4o-mini` model.
You may switch to gpt-4o which performs better, but costs more 🤠
```sh
oco config set OCO_MODEL=gpt-4o
```
or for as a cheaper option:
@@ -150,14 +189,6 @@ or for as a cheaper option:
oco config set OCO_MODEL=gpt-3.5-turbo
```
or for GPT-4 Turbo (Preview) which is more capable, has knowledge of world events up to April 2023, a 128k context window and 2-3x cheaper vs GPT-4:
```sh
oco config set OCO_MODEL=gpt-4-0125-preview
```
Make sure that you spell it `gpt-4` (lowercase) and that you have API access to the 4th model. Even if you have ChatGPT+, that doesn't necessarily mean that you have API access to GPT-4.
### Switch to Azure OpenAI
By default OpenCommit uses [OpenAI](https://openai.com).
@@ -168,7 +199,7 @@ You could switch to [Azure OpenAI Service](https://learn.microsoft.com/azure/cog
opencommit config set OCO_AI_PROVIDER=azure
```
Of course need to set 'OPENAI_API_KEY'. And also need to set the
Of course need to set 'OCO_OPENAI_API_KEY'. And also need to set the
'OPENAI_BASE_PATH' for the endpoint and set the deployment name to
'model'.
@@ -191,9 +222,9 @@ oco config set OCO_LANGUAGE=française
The default language setting is **English**
All available languages are currently listed in the [i18n](https://github.com/di-sukharev/opencommit/tree/master/src/i18n) folder
### Push to git
### Push to git (gonna be deprecated)
Pushing to git is on by default but if you would like to turn it off just use:
A prompt to ushing to git is on by default but if you would like to turn it off just use:
```sh
oco config set OCO_GITPUSH=false
@@ -213,7 +244,7 @@ Replace `<module>` with either `conventional-commit` or `@commitlint`.
#### Example:
To switch to using th` '@commitlint` prompt module, run:
To switch to using the `'@commitlint` prompt module, run:
```sh
oco config set OCO_PROMPT_MODULE=@commitlint
@@ -281,7 +312,7 @@ In our codebase, the implementation of this feature can be found in the followin
```javascript
commitMessage = messageTemplate.replace(
config?.OCO_MESSAGE_TEMPLATE_PLACEHOLDER,
config.OCO_MESSAGE_TEMPLATE_PLACEHOLDER,
commitMessage
);
```
@@ -338,7 +369,7 @@ Or follow the process of your IDE Source Control feature, when it calls `git com
OpenCommit is now available as a GitHub Action which automatically improves all new commits messages when you push to remote!
This is great if you want to make sure all of the commits in all of your repository branches are meaningful and not lame like `fix1` or `done2`.
This is great if you want to make sure all commits in all of your repository branches are meaningful and not lame like `fix1` or `done2`.
Create a file `.github/workflows/opencommit.yml` with the contents below:
@@ -381,7 +412,7 @@ jobs:
OCO_OPENAI_BASE_PATH: ''
OCO_DESCRIPTION: false
OCO_EMOJI: false
OCO_MODEL: gpt-3.5-turbo
OCO_MODEL: gpt-4o
OCO_LANGUAGE: en
OCO_PROMPT_MODULE: conventional-commit
```

50842
out/cli.cjs

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

109
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "opencommit",
"version": "3.0.16",
"version": "3.1.2",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "opencommit",
"version": "3.0.16",
"version": "3.1.2",
"license": "MIT",
"dependencies": {
"@actions/core": "^1.10.0",
@@ -16,6 +16,7 @@
"@azure/openai": "^1.0.0-beta.12",
"@clack/prompts": "^0.6.1",
"@dqbd/tiktoken": "^1.0.2",
"@google/generative-ai": "^0.11.4",
"@octokit/webhooks-schemas": "^6.11.0",
"@octokit/webhooks-types": "^6.11.0",
"ai": "^2.2.14",
@@ -27,7 +28,7 @@
"ignore": "^5.2.4",
"ini": "^3.0.1",
"inquirer": "^9.1.4",
"openai": "^3.2.1"
"openai": "^4.56.0"
},
"bin": {
"oco": "out/cli.cjs",
@@ -1051,6 +1052,14 @@
"node": ">=14"
}
},
"node_modules/@google/generative-ai": {
"version": "0.11.4",
"resolved": "https://registry.npmjs.org/@google/generative-ai/-/generative-ai-0.11.4.tgz",
"integrity": "sha512-hlw+E9Prv9aUIQISRnLSXi4rukFqKe5WhxPvzBccTvIvXjw2BHMFOJWSC/Gq7WE0W+L/qRHGmYxopmx9qjrB9w==",
"engines": {
"node": ">=18.0.0"
}
},
"node_modules/@humanwhocodes/config-array": {
"version": "0.11.14",
"resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz",
@@ -2648,9 +2657,9 @@
"integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q=="
},
"node_modules/axios": {
"version": "1.6.8",
"resolved": "https://registry.npmjs.org/axios/-/axios-1.6.8.tgz",
"integrity": "sha512-v/ZHtJDU39mDpyBoFVkETcd/uNdxrWRrg3bKpOKzXFA6Bvqopts6ALSMU3y6ijYxbw2B+wPrIv46egTzJXCLGQ==",
"version": "1.7.4",
"resolved": "https://registry.npmjs.org/axios/-/axios-1.7.4.tgz",
"integrity": "sha512-DukmaFRnY6AzAALSH4J2M3k6PkaC+MfaAGdEERRWcC9q3/TWQwLpHR8ZRLKTdQ3aBDL64EdluRDjJqKw+BPZEw==",
"dependencies": {
"follow-redirects": "^1.15.6",
"form-data": "^4.0.0",
@@ -2882,12 +2891,12 @@
}
},
"node_modules/braces": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz",
"integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==",
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
"integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
"dev": true,
"dependencies": {
"fill-range": "^7.0.1"
"fill-range": "^7.1.1"
},
"engines": {
"node": ">=8"
@@ -4537,9 +4546,9 @@
}
},
"node_modules/fill-range": {
"version": "7.0.1",
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz",
"integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==",
"version": "7.1.1",
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
"integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
"dev": true,
"dependencies": {
"to-regex-range": "^5.0.1"
@@ -7145,6 +7154,25 @@
}
}
},
"node_modules/node-fetch/node_modules/tr46": {
"version": "0.0.3",
"resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
"integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="
},
"node_modules/node-fetch/node_modules/webidl-conversions": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
"integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="
},
"node_modules/node-fetch/node_modules/whatwg-url": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
"integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
"dependencies": {
"tr46": "~0.0.3",
"webidl-conversions": "^3.0.0"
}
},
"node_modules/node-int64": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz",
@@ -7214,20 +7242,36 @@
}
},
"node_modules/openai": {
"version": "3.3.0",
"resolved": "https://registry.npmjs.org/openai/-/openai-3.3.0.tgz",
"integrity": "sha512-uqxI/Au+aPRnsaQRe8CojU0eCR7I0mBiKjD3sNMzY6DaC1ZVrc85u98mtJW6voDug8fgGN+DIZmTDxTthxb7dQ==",
"version": "4.56.0",
"resolved": "https://registry.npmjs.org/openai/-/openai-4.56.0.tgz",
"integrity": "sha512-zcag97+3bG890MNNa0DQD9dGmmTWL8unJdNkulZzWRXrl+QeD+YkBI4H58rJcwErxqGK6a0jVPZ4ReJjhDGcmw==",
"dependencies": {
"axios": "^0.26.0",
"form-data": "^4.0.0"
"@types/node": "^18.11.18",
"@types/node-fetch": "^2.6.4",
"abort-controller": "^3.0.0",
"agentkeepalive": "^4.2.1",
"form-data-encoder": "1.7.2",
"formdata-node": "^4.3.2",
"node-fetch": "^2.6.7"
},
"bin": {
"openai": "bin/cli"
},
"peerDependencies": {
"zod": "^3.23.8"
},
"peerDependenciesMeta": {
"zod": {
"optional": true
}
}
},
"node_modules/openai/node_modules/axios": {
"version": "0.26.1",
"resolved": "https://registry.npmjs.org/axios/-/axios-0.26.1.tgz",
"integrity": "sha512-fPwcX4EvnSHuInCMItEhAGnaSEXRBjtzh9fOtsE6E1G6p7vl7edEeZe11QHf18+6+9gR5PbKV/sGKNaD8YaMeA==",
"node_modules/openai/node_modules/@types/node": {
"version": "18.19.45",
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.45.tgz",
"integrity": "sha512-VZxPKNNhjKmaC1SUYowuXSRSMGyQGmQjvvA1xE4QZ0xce2kLtEhPDS+kqpCPBZYgqblCLQ2DAjSzmgCM5auvhA==",
"dependencies": {
"follow-redirects": "^1.14.8"
"undici-types": "~5.26.4"
}
},
"node_modules/optionator": {
@@ -8400,11 +8444,6 @@
"node": ">=8.0"
}
},
"node_modules/tr46": {
"version": "0.0.3",
"resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
"integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="
},
"node_modules/tree-kill": {
"version": "1.2.2",
"resolved": "https://registry.npmjs.org/tree-kill/-/tree-kill-1.2.2.tgz",
@@ -8735,20 +8774,6 @@
"node": ">= 8"
}
},
"node_modules/webidl-conversions": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
"integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="
},
"node_modules/whatwg-url": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
"integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
"dependencies": {
"tr46": "~0.0.3",
"webidl-conversions": "^3.0.0"
}
},
"node_modules/which": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",

View File

@@ -1,6 +1,6 @@
{
"name": "opencommit",
"version": "3.0.16",
"version": "3.1.2",
"description": "Auto-generate impressive commits in 1 second. Killing lame commits with AI 🤯🔫",
"keywords": [
"git",
@@ -43,16 +43,20 @@
"start": "node ./out/cli.cjs",
"ollama:start": "OCO_AI_PROVIDER='ollama' node ./out/cli.cjs",
"dev": "ts-node ./src/cli.ts",
"dev:gemini": "OCO_AI_PROVIDER='gemini' ts-node ./src/cli.ts",
"build": "rimraf out && node esbuild.config.js",
"build:push": "npm run build && git add . && git commit -m 'build' && git push",
"deploy": "npm version patch && npm run build:push && git push --tags && npm publish --tag latest",
"deploy": "npm run build:push && git push --tags && npm publish --tag latest",
"deploy:patch": "npm version patch && npm run deploy",
"lint": "eslint src --ext ts && tsc --noEmit",
"format": "prettier --write src",
"test": "node --no-warnings --experimental-vm-modules $( [ -f ./node_modules/.bin/jest ] && echo ./node_modules/.bin/jest || which jest ) test/unit",
"test:all": "npm run test:unit:docker && npm run test:e2e:docker",
"test:docker-build": "docker build -t oco-test -f test/Dockerfile .",
"test:unit": "NODE_OPTIONS=--experimental-vm-modules jest test/unit",
"test:unit:docker": "npm run test:docker-build && DOCKER_CONTENT_TRUST=0 docker run --rm oco-test npm run test:unit",
"test:e2e": "jest test/e2e",
"test:e2e": "npm run test:e2e:setup && jest test/e2e",
"test:e2e:setup": "sh test/e2e/setup.sh",
"test:e2e:docker": "npm run test:docker-build && DOCKER_CONTENT_TRUST=0 docker run --rm oco-test npm run test:e2e"
},
"devDependencies": {
@@ -77,10 +81,11 @@
"@actions/core": "^1.10.0",
"@actions/exec": "^1.1.1",
"@actions/github": "^5.1.1",
"@azure/openai": "^1.0.0-beta.12",
"@anthropic-ai/sdk": "^0.19.2",
"@azure/openai": "^1.0.0-beta.12",
"@clack/prompts": "^0.6.1",
"@dqbd/tiktoken": "^1.0.2",
"@google/generative-ai": "^0.11.4",
"@octokit/webhooks-schemas": "^6.11.0",
"@octokit/webhooks-types": "^6.11.0",
"ai": "^2.2.14",
@@ -92,6 +97,6 @@
"ignore": "^5.2.4",
"ini": "^3.0.1",
"inquirer": "^9.1.4",
"openai": "^3.2.1"
"openai": "^4.56.0"
}
}

5
src/commands/ENUMS.ts Normal file
View File

@@ -0,0 +1,5 @@
export enum COMMANDS {
config = 'config',
hook = 'hook',
commitlint = 'commitlint'
}

View File

@@ -1,6 +1,3 @@
import chalk from 'chalk';
import { execa } from 'execa';
import {
confirm,
intro,
@@ -10,7 +7,8 @@ import {
select,
spinner
} from '@clack/prompts';
import chalk from 'chalk';
import { execa } from 'execa';
import { generateCommitMessageByDiff } from '../generateCommitMessageFromGitDiff';
import {
assertGitRepo,
@@ -32,21 +30,28 @@ const getGitRemotes = async () => {
// Check for the presence of message templates
const checkMessageTemplate = (extraArgs: string[]): string | false => {
for (const key in extraArgs) {
if (extraArgs[key].includes(config?.OCO_MESSAGE_TEMPLATE_PLACEHOLDER))
if (extraArgs[key].includes(config.OCO_MESSAGE_TEMPLATE_PLACEHOLDER))
return extraArgs[key];
}
return false;
};
const generateCommitMessageFromGitDiff = async (
diff: string,
extraArgs: string[],
fullGitMojiSpec: boolean,
skipCommitConfirmation: boolean
): Promise<void> => {
interface GenerateCommitMessageFromGitDiffParams {
diff: string;
extraArgs: string[];
fullGitMojiSpec?: boolean;
skipCommitConfirmation?: boolean;
}
const generateCommitMessageFromGitDiff = async ({
diff,
extraArgs,
fullGitMojiSpec = false,
skipCommitConfirmation = false
}: GenerateCommitMessageFromGitDiffParams): Promise<void> => {
await assertGitRepo();
const commitSpinner = spinner();
commitSpinner.start('Generating the commit message');
const commitGenerationSpinner = spinner();
commitGenerationSpinner.start('Generating the commit message');
try {
let commitMessage = await generateCommitMessageByDiff(
@@ -56,19 +61,19 @@ const generateCommitMessageFromGitDiff = async (
const messageTemplate = checkMessageTemplate(extraArgs);
if (
config?.OCO_MESSAGE_TEMPLATE_PLACEHOLDER &&
config.OCO_MESSAGE_TEMPLATE_PLACEHOLDER &&
typeof messageTemplate === 'string'
) {
const messageTemplateIndex = extraArgs.indexOf(messageTemplate);
extraArgs.splice(messageTemplateIndex, 1);
commitMessage = messageTemplate.replace(
config?.OCO_MESSAGE_TEMPLATE_PLACEHOLDER,
config.OCO_MESSAGE_TEMPLATE_PLACEHOLDER,
commitMessage
);
}
commitSpinner.stop('📝 Commit message generated');
commitGenerationSpinner.stop('📝 Commit message generated');
outro(
`Generated commit message:
@@ -77,40 +82,45 @@ ${commitMessage}
${chalk.grey('——————————————————')}`
);
const isCommitConfirmedByUser = skipCommitConfirmation || await confirm({
message: 'Confirm the commit message?'
});
const isCommitConfirmedByUser =
skipCommitConfirmation ||
(await confirm({
message: 'Confirm the commit message?'
}));
if (isCommitConfirmedByUser && !isCancel(isCommitConfirmedByUser)) {
if (isCancel(isCommitConfirmedByUser)) process.exit(1);
if (isCommitConfirmedByUser) {
const committingChangesSpinner = spinner();
committingChangesSpinner.start('Committing the changes');
const { stdout } = await execa('git', [
'commit',
'-m',
commitMessage,
...extraArgs
]);
outro(`${chalk.green('✔')} Successfully committed`);
committingChangesSpinner.stop(
`${chalk.green('✔')} Successfully committed`
);
outro(stdout);
const remotes = await getGitRemotes();
// user isn't pushing, return early
if (config?.OCO_GITPUSH === false)
return
if (!remotes.length) {
const { stdout } = await execa('git', ['push']);
if (stdout) outro(stdout);
process.exit(0);
}
if (remotes.length === 1 && config?.OCO_GITPUSH !== true) {
if (remotes.length === 1 && config.OCO_GITPUSH !== true) {
const isPushConfirmedByUser = await confirm({
message: 'Do you want to run `git push`?'
});
if (isPushConfirmedByUser && !isCancel(isPushConfirmedByUser)) {
if (isCancel(isPushConfirmedByUser)) process.exit(1);
if (isPushConfirmedByUser) {
const pushSpinner = spinner();
pushSpinner.start(`Running 'git push ${remotes[0]}'`);
@@ -138,37 +148,39 @@ ${chalk.grey('——————————————————')}`
options: remotes.map((remote) => ({ value: remote, label: remote }))
})) as string;
if (!isCancel(selectedRemote)) {
const pushSpinner = spinner();
if (isCancel(selectedRemote)) process.exit(1);
pushSpinner.start(`Running 'git push ${selectedRemote}'`);
const pushSpinner = spinner();
const { stdout } = await execa('git', ['push', selectedRemote]);
pushSpinner.start(`Running 'git push ${selectedRemote}'`);
pushSpinner.stop(
`${chalk.green(
'✔'
)} Successfully pushed all commits to ${selectedRemote}`
);
const { stdout } = await execa('git', ['push', selectedRemote]);
if (stdout) outro(stdout);
} else outro(`${chalk.gray('✖')} process cancelled`);
pushSpinner.stop(
`${chalk.green(
'✔'
)} Successfully pushed all commits to ${selectedRemote}`
);
if (stdout) outro(stdout);
}
}
if (!isCommitConfirmedByUser && !isCancel(isCommitConfirmedByUser)) {
} else {
const regenerateMessage = await confirm({
message: 'Do you want to regenerate the message ?'
message: 'Do you want to regenerate the message?'
});
if (regenerateMessage && !isCancel(isCommitConfirmedByUser)) {
await generateCommitMessageFromGitDiff(
if (isCancel(regenerateMessage)) process.exit(1);
if (regenerateMessage) {
await generateCommitMessageFromGitDiff({
diff,
extraArgs,
fullGitMojiSpec
)
});
}
}
} catch (error) {
commitSpinner.stop('📝 Commit message generated');
commitGenerationSpinner.stop('📝 Commit message generated');
const err = error as Error;
outro(`${chalk.red('✖')} ${err?.message || err}`);
@@ -216,10 +228,9 @@ export async function commit(
message: 'Do you want to stage all files and generate commit message?'
});
if (
isStageAllAndCommitConfirmedByUser &&
!isCancel(isStageAllAndCommitConfirmedByUser)
) {
if (isCancel(isStageAllAndCommitConfirmedByUser)) process.exit(1);
if (isStageAllAndCommitConfirmedByUser) {
await commit(extraArgs, true, fullGitMojiSpec);
process.exit(1);
}
@@ -249,12 +260,12 @@ export async function commit(
);
const [, generateCommitError] = await trytm(
generateCommitMessageFromGitDiff(
await getDiff({ files: stagedFiles }),
generateCommitMessageFromGitDiff({
diff: await getDiff({ files: stagedFiles }),
extraArgs,
fullGitMojiSpec,
skipCommitConfirmation
)
})
);
if (generateCommitError) {

View File

@@ -1,11 +1,9 @@
import { intro, outro } from '@clack/prompts';
import chalk from 'chalk';
import { command } from 'cleye';
import { intro, outro } from '@clack/prompts';
import { COMMANDS } from '../CommandsEnum';
import { configureCommitlintIntegration } from '../modules/commitlint/config';
import { getCommitlintLLMConfig } from '../modules/commitlint/utils';
import { COMMANDS } from './ENUMS';
export enum CONFIG_MODES {
get = 'get',
@@ -25,7 +23,7 @@ export const commitlintConfigCommand = command(
if (mode === CONFIG_MODES.get) {
const commitLintConfig = await getCommitlintLLMConfig();
outro(commitLintConfig.toString());
outro(JSON.stringify(commitLintConfig, null, 2));
return;
}

View File

@@ -1,3 +1,4 @@
import { intro, outro } from '@clack/prompts';
import chalk from 'chalk';
import { command } from 'cleye';
import * as dotenv from 'dotenv';
@@ -5,16 +6,16 @@ import { existsSync, readFileSync, writeFileSync } from 'fs';
import { parse as iniParse, stringify as iniStringify } from 'ini';
import { homedir } from 'os';
import { join as pathJoin, resolve as pathResolve } from 'path';
import { intro, outro } from '@clack/prompts';
import { COMMANDS } from '../CommandsEnum';
import { getI18nLocal } from '../i18n';
import { COMMANDS } from './ENUMS';
import { TEST_MOCK_TYPES } from '../engine/testAi';
import { getI18nLocal, i18n } from '../i18n';
export enum CONFIG_KEYS {
OCO_OPENAI_API_KEY = 'OCO_OPENAI_API_KEY',
OCO_ANTHROPIC_API_KEY = 'OCO_ANTHROPIC_API_KEY',
OCO_AZURE_API_KEY = 'OCO_AZURE_API_KEY',
OCO_GEMINI_API_KEY = 'OCO_GEMINI_API_KEY',
OCO_GEMINI_BASE_PATH = 'OCO_GEMINI_BASE_PATH',
OCO_TOKENS_MAX_INPUT = 'OCO_TOKENS_MAX_INPUT',
OCO_TOKENS_MAX_OUTPUT = 'OCO_TOKENS_MAX_OUTPUT',
OCO_OPENAI_BASE_PATH = 'OCO_OPENAI_BASE_PATH',
@@ -22,12 +23,18 @@ export enum CONFIG_KEYS {
OCO_EMOJI = 'OCO_EMOJI',
OCO_MODEL = 'OCO_MODEL',
OCO_LANGUAGE = 'OCO_LANGUAGE',
OCO_WHY = 'OCO_WHY',
OCO_MESSAGE_TEMPLATE_PLACEHOLDER = 'OCO_MESSAGE_TEMPLATE_PLACEHOLDER',
OCO_PROMPT_MODULE = 'OCO_PROMPT_MODULE',
OCO_AI_PROVIDER = 'OCO_AI_PROVIDER',
OCO_GITPUSH = 'OCO_GITPUSH',
OCO_GITPUSH = 'OCO_GITPUSH', // todo: deprecate
OCO_ONE_LINE_COMMIT = 'OCO_ONE_LINE_COMMIT',
OCO_AZURE_ENDPOINT = 'OCO_AZURE_ENDPOINT'
OCO_AZURE_ENDPOINT = 'OCO_AZURE_ENDPOINT',
OCO_TEST_MOCK_TYPE = 'OCO_TEST_MOCK_TYPE',
OCO_API_URL = 'OCO_API_URL',
OCO_OLLAMA_API_URL = 'OCO_OLLAMA_API_URL',
OCO_FLOWISE_ENDPOINT = 'OCO_FLOWISE_ENDPOINT',
OCO_FLOWISE_API_KEY = 'OCO_FLOWISE_API_KEY'
}
export enum CONFIG_MODES {
@@ -36,19 +43,50 @@ export enum CONFIG_MODES {
}
export const MODEL_LIST = {
openai: ['gpt-3.5-turbo',
'gpt-3.5-turbo-0125',
'gpt-4',
'gpt-4-turbo',
'gpt-4-1106-preview',
'gpt-4-turbo-preview',
'gpt-4-0125-preview',
'gpt-4o'],
openai: [
'gpt-4o-mini',
'gpt-3.5-turbo',
'gpt-3.5-turbo-instruct',
'gpt-3.5-turbo-0613',
'gpt-3.5-turbo-0301',
'gpt-3.5-turbo-1106',
'gpt-3.5-turbo-0125',
'gpt-3.5-turbo-16k',
'gpt-3.5-turbo-16k-0613',
'gpt-3.5-turbo-16k-0301',
'gpt-4',
'gpt-4-0314',
'gpt-4-0613',
'gpt-4-1106-preview',
'gpt-4-0125-preview',
'gpt-4-turbo-preview',
'gpt-4-vision-preview',
'gpt-4-1106-vision-preview',
'gpt-4-turbo',
'gpt-4-turbo-2024-04-09',
'gpt-4-32k',
'gpt-4-32k-0314',
'gpt-4-32k-0613',
'gpt-4o',
'gpt-4o-2024-05-13',
'gpt-4o-mini-2024-07-18'
],
anthropic: ['claude-3-haiku-20240307',
'claude-3-sonnet-20240229',
'claude-3-opus-20240229']
}
anthropic: [
'claude-3-5-sonnet-20240620',
'claude-3-opus-20240229',
'claude-3-sonnet-20240229',
'claude-3-haiku-20240307'
],
gemini: [
'gemini-1.5-flash',
'gemini-1.5-pro',
'gemini-1.0-pro',
'gemini-pro-vision',
'text-embedding-004'
]
};
const getDefaultModel = (provider: string | undefined): string => {
switch (provider) {
@@ -56,14 +94,16 @@ const getDefaultModel = (provider: string | undefined): string => {
return '';
case 'anthropic':
return MODEL_LIST.anthropic[0];
case 'gemini':
return MODEL_LIST.gemini[0];
default:
return MODEL_LIST.openai[0];
}
};
export enum DEFAULT_TOKEN_LIMITS {
DEFAULT_MAX_TOKENS_INPUT = 4096,
DEFAULT_MAX_TOKENS_OUTPUT = 500
DEFAULT_MAX_TOKENS_INPUT = 40960,
DEFAULT_MAX_TOKENS_OUTPUT = 4096
}
const validateConfig = (
@@ -72,8 +112,10 @@ const validateConfig = (
validationMessage: string
) => {
if (!condition) {
outro(`${chalk.red('✖')} wrong value for ${key}: ${validationMessage}.`);
outro(
`${chalk.red('✖')} Unsupported config key ${key}: ${validationMessage}`
'For more help refer to docs https://github.com/di-sukharev/opencommit'
);
process.exit(1);
@@ -82,36 +124,64 @@ const validateConfig = (
export const configValidators = {
[CONFIG_KEYS.OCO_OPENAI_API_KEY](value: any, config: any = {}) {
//need api key unless running locally with ollama
if (config.OCO_AI_PROVIDER !== 'openai') return value;
validateConfig(
'OpenAI API_KEY',
value || config.OCO_ANTHROPIC_API_KEY || config.OCO_AI_PROVIDER.startsWith('ollama') || config.OCO_AZURE_API_KEY || config.OCO_AI_PROVIDER == 'test' ,
'You need to provide an OpenAI/Anthropic/Azure API key'
'OCO_OPENAI_API_KEY',
typeof value === 'string' && value.length > 0,
'Empty value is not allowed'
);
validateConfig(
CONFIG_KEYS.OCO_OPENAI_API_KEY,
value.startsWith('sk-') || config.OCO_AI_PROVIDER != 'openai',
'Must start with "sk-" for openai provider'
'OCO_OPENAI_API_KEY',
value,
'You need to provide the OCO_OPENAI_API_KEY when OCO_AI_PROVIDER is set to "openai" (default). Run `oco config set OCO_OPENAI_API_KEY=your_key`'
);
return value;
},
[CONFIG_KEYS.OCO_AZURE_API_KEY](value: any, config: any = {}) {
if (config.OCO_AI_PROVIDER !== 'azure') return value;
validateConfig(
'ANTHROPIC_API_KEY',
value || config.OCO_OPENAI_API_KEY || config.OCO_AZURE_API_KEY || config.OCO_AI_PROVIDER == 'ollama' || config.OCO_AI_PROVIDER == 'test',
'You need to provide an OpenAI/Anthropic/Azure API key'
'OCO_AZURE_API_KEY',
!!value,
'You need to provide the OCO_AZURE_API_KEY when OCO_AI_PROVIDER is set to "azure". Run: `oco config set OCO_AZURE_API_KEY=your_key`'
);
return value;
},
[CONFIG_KEYS.OCO_GEMINI_API_KEY](value: any, config: any = {}) {
if (config.OCO_AI_PROVIDER !== 'gemini') return value;
validateConfig(
'OCO_GEMINI_API_KEY',
value || config.OCO_GEMINI_API_KEY || config.OCO_AI_PROVIDER === 'test',
'You need to provide the OCO_GEMINI_API_KEY when OCO_AI_PROVIDER is set to "gemini". Run: `oco config set OCO_GEMINI_API_KEY=your_key`'
);
return value;
},
[CONFIG_KEYS.OCO_ANTHROPIC_API_KEY](value: any, config: any = {}) {
if (config.OCO_AI_PROVIDER !== 'anthropic') return value;
validateConfig(
'ANTHROPIC_API_KEY',
value || config.OCO_OPENAI_API_KEY || config.OCO_AI_PROVIDER == 'ollama' || config.OCO_AI_PROVIDER == 'test',
'You need to provide an OpenAI/Anthropic/Azure API key'
!!value,
'You need to provide the OCO_ANTHROPIC_API_KEY key when OCO_AI_PROVIDER is set to "anthropic". Run: `oco config set OCO_ANTHROPIC_API_KEY=your_key`'
);
return value;
},
[CONFIG_KEYS.OCO_FLOWISE_API_KEY](value: any, config: any = {}) {
validateConfig(
CONFIG_KEYS.OCO_FLOWISE_API_KEY,
value || config.OCO_AI_PROVIDER !== 'flowise',
'You need to provide the OCO_FLOWISE_API_KEY when OCO_AI_PROVIDER is set to "flowise". Run: `oco config set OCO_FLOWISE_API_KEY=your_key`'
);
return value;
@@ -121,25 +191,17 @@ export const configValidators = {
validateConfig(
CONFIG_KEYS.OCO_DESCRIPTION,
typeof value === 'boolean',
'Must be true or false'
'Must be boolean: true or false'
);
return value;
},
[CONFIG_KEYS.OCO_TOKENS_MAX_INPUT](value: any) {
// If the value is a string, convert it to a number.
if (typeof value === 'string') {
value = parseInt(value);
validateConfig(
CONFIG_KEYS.OCO_TOKENS_MAX_INPUT,
!isNaN(value),
'Must be a number'
);
}
value = parseInt(value);
validateConfig(
CONFIG_KEYS.OCO_TOKENS_MAX_INPUT,
value ? typeof value === 'number' : undefined,
!isNaN(value),
'Must be a number'
);
@@ -147,18 +209,10 @@ export const configValidators = {
},
[CONFIG_KEYS.OCO_TOKENS_MAX_OUTPUT](value: any) {
// If the value is a string, convert it to a number.
if (typeof value === 'string') {
value = parseInt(value);
validateConfig(
CONFIG_KEYS.OCO_TOKENS_MAX_OUTPUT,
!isNaN(value),
'Must be a number'
);
}
value = parseInt(value);
validateConfig(
CONFIG_KEYS.OCO_TOKENS_MAX_OUTPUT,
value ? typeof value === 'number' : undefined,
!isNaN(value),
'Must be a number'
);
@@ -169,18 +223,21 @@ export const configValidators = {
validateConfig(
CONFIG_KEYS.OCO_EMOJI,
typeof value === 'boolean',
'Must be true or false'
'Must be boolean: true or false'
);
return value;
},
[CONFIG_KEYS.OCO_LANGUAGE](value: any) {
const supportedLanguages = Object.keys(i18n);
validateConfig(
CONFIG_KEYS.OCO_LANGUAGE,
getI18nLocal(value),
`${value} is not supported yet`
`${value} is not supported yet. Supported languages: ${supportedLanguages}`
);
return getI18nLocal(value);
},
@@ -196,15 +253,12 @@ export const configValidators = {
[CONFIG_KEYS.OCO_MODEL](value: any, config: any = {}) {
validateConfig(
CONFIG_KEYS.OCO_MODEL,
[...MODEL_LIST.openai, ...MODEL_LIST.anthropic].includes(value) || config.OCO_AI_PROVIDER == 'ollama' || config.OCO_AI_PROVIDER == 'test'|| config.OCO_AI_PROVIDER == 'azure',
`${value} is not supported yet, use 'gpt-4o', 'gpt-4', 'gpt-4-turbo', 'gpt-3.5-turbo' (default), 'gpt-3.5-turbo-0125', 'gpt-4-1106-preview', 'gpt-4-turbo-preview', 'gpt-4-0125-preview', 'claude-3-opus-20240229', 'claude-3-sonnet-20240229' or 'claude-3-haiku-20240307'`
);
validateConfig(
CONFIG_KEYS.OCO_MODEL,
typeof value === 'string' &&
value.match(/^[a-zA-Z0-9~\-]{1,63}[a-zA-Z0-9]$/) ||
config.OCO_AI_PROVIDER != 'azure',
`${value} is not model deployed name.`
typeof value === 'string',
`${value} is not supported yet, use:\n\n ${[
...MODEL_LIST.openai,
...MODEL_LIST.anthropic,
...MODEL_LIST.gemini
].join('\n')}`
);
return value;
},
@@ -227,6 +281,7 @@ export const configValidators = {
return value;
},
// todo: deprecate
[CONFIG_KEYS.OCO_GITPUSH](value: any) {
validateConfig(
CONFIG_KEYS.OCO_GITPUSH,
@@ -237,18 +292,16 @@ export const configValidators = {
},
[CONFIG_KEYS.OCO_AI_PROVIDER](value: any) {
if (!value) value = 'openai';
validateConfig(
CONFIG_KEYS.OCO_AI_PROVIDER,
[
'',
'openai',
'anthropic',
'azure',
'ollama',
'test'
].includes(value) || value.startsWith('ollama'),
`${value} is not supported yet, use 'ollama/{model}', 'azure', 'anthropic' or 'openai' (default)`
['openai', 'anthropic', 'gemini', 'azure', 'test', 'flowise'].includes(
value
) || value.startsWith('ollama'),
`${value} is not supported yet, use 'ollama', 'anthropic', 'azure', 'gemini', 'flowise' or 'openai' (default)`
);
return value;
},
@@ -261,6 +314,7 @@ export const configValidators = {
return value;
},
[CONFIG_KEYS.OCO_AZURE_ENDPOINT](value: any) {
validateConfig(
CONFIG_KEYS.OCO_AZURE_ENDPOINT,
@@ -270,105 +324,251 @@ export const configValidators = {
return value;
},
[CONFIG_KEYS.OCO_FLOWISE_ENDPOINT](value: any) {
validateConfig(
CONFIG_KEYS.OCO_FLOWISE_ENDPOINT,
typeof value === 'string' && value.includes(':'),
'Value must be string and should include both I.P. and port number' // Considering the possibility of DNS lookup or feeding the I.P. explicitly, there is no pattern to verify, except a column for the port number
);
return value;
},
[CONFIG_KEYS.OCO_TEST_MOCK_TYPE](value: any) {
validateConfig(
CONFIG_KEYS.OCO_TEST_MOCK_TYPE,
TEST_MOCK_TYPES.includes(value),
`${value} is not supported yet, use ${TEST_MOCK_TYPES.map(
(t) => `'${t}'`
).join(', ')}`
);
return value;
},
[CONFIG_KEYS.OCO_OLLAMA_API_URL](value: any) {
validateConfig(
CONFIG_KEYS.OCO_OLLAMA_API_URL,
typeof value === 'string' && value.startsWith('http'),
`${value} is not a valid URL. It should start with 'http://' or 'https://'.`
);
return value;
}
};
export enum OCO_AI_PROVIDER_ENUM {
OLLAMA = 'ollama',
OPENAI = 'openai',
ANTHROPIC = 'anthropic',
GEMINI = 'gemini',
AZURE = 'azure',
TEST = 'test',
FLOWISE = 'flowise'
}
export type ConfigType = {
[key in CONFIG_KEYS]?: any;
[CONFIG_KEYS.OCO_OPENAI_API_KEY]?: string;
[CONFIG_KEYS.OCO_ANTHROPIC_API_KEY]?: string;
[CONFIG_KEYS.OCO_AZURE_API_KEY]?: string;
[CONFIG_KEYS.OCO_GEMINI_API_KEY]?: string;
[CONFIG_KEYS.OCO_GEMINI_BASE_PATH]?: string;
[CONFIG_KEYS.OCO_TOKENS_MAX_INPUT]: number;
[CONFIG_KEYS.OCO_TOKENS_MAX_OUTPUT]: number;
[CONFIG_KEYS.OCO_OPENAI_BASE_PATH]?: string;
[CONFIG_KEYS.OCO_DESCRIPTION]: boolean;
[CONFIG_KEYS.OCO_EMOJI]: boolean;
[CONFIG_KEYS.OCO_WHY]: boolean;
[CONFIG_KEYS.OCO_MODEL]: string;
[CONFIG_KEYS.OCO_LANGUAGE]: string;
[CONFIG_KEYS.OCO_MESSAGE_TEMPLATE_PLACEHOLDER]: string;
[CONFIG_KEYS.OCO_PROMPT_MODULE]: OCO_PROMPT_MODULE_ENUM;
[CONFIG_KEYS.OCO_AI_PROVIDER]: OCO_AI_PROVIDER_ENUM;
[CONFIG_KEYS.OCO_GITPUSH]: boolean;
[CONFIG_KEYS.OCO_ONE_LINE_COMMIT]: boolean;
[CONFIG_KEYS.OCO_AZURE_ENDPOINT]?: string;
[CONFIG_KEYS.OCO_TEST_MOCK_TYPE]: string;
[CONFIG_KEYS.OCO_API_URL]?: string;
[CONFIG_KEYS.OCO_OLLAMA_API_URL]?: string;
[CONFIG_KEYS.OCO_FLOWISE_ENDPOINT]: string;
[CONFIG_KEYS.OCO_FLOWISE_API_KEY]?: string;
};
const defaultConfigPath = pathJoin(homedir(), '.opencommit');
const defaultEnvPath = pathResolve(process.cwd(), '.env');
export const getConfig = ({
configPath = defaultConfigPath,
envPath = defaultEnvPath
}: {
configPath?: string
envPath?: string
} = {}): ConfigType | null => {
dotenv.config({ path: envPath });
const configFromEnv = {
OCO_OPENAI_API_KEY: process.env.OCO_OPENAI_API_KEY,
OCO_ANTHROPIC_API_KEY: process.env.OCO_ANTHROPIC_API_KEY,
OCO_AZURE_API_KEY: process.env.OCO_AZURE_API_KEY,
OCO_TOKENS_MAX_INPUT: process.env.OCO_TOKENS_MAX_INPUT
? Number(process.env.OCO_TOKENS_MAX_INPUT)
: undefined,
OCO_TOKENS_MAX_OUTPUT: process.env.OCO_TOKENS_MAX_OUTPUT
? Number(process.env.OCO_TOKENS_MAX_OUTPUT)
: undefined,
OCO_OPENAI_BASE_PATH: process.env.OCO_OPENAI_BASE_PATH,
OCO_DESCRIPTION: process.env.OCO_DESCRIPTION === 'true' ? true : false,
OCO_EMOJI: process.env.OCO_EMOJI === 'true' ? true : false,
OCO_MODEL: process.env.OCO_MODEL || getDefaultModel(process.env.OCO_AI_PROVIDER),
OCO_LANGUAGE: process.env.OCO_LANGUAGE || 'en',
OCO_MESSAGE_TEMPLATE_PLACEHOLDER:
process.env.OCO_MESSAGE_TEMPLATE_PLACEHOLDER || '$msg',
OCO_PROMPT_MODULE: process.env.OCO_PROMPT_MODULE || 'conventional-commit',
OCO_AI_PROVIDER: process.env.OCO_AI_PROVIDER || 'openai',
OCO_GITPUSH: process.env.OCO_GITPUSH === 'false' ? false : true,
OCO_ONE_LINE_COMMIT:
process.env.OCO_ONE_LINE_COMMIT === 'true' ? true : false,
OCO_AZURE_ENDPOINT: process.env.OCO_AZURE_ENDPOINT || '',
};
const assertConfigsAreValid = (config: Record<string, any>) => {
for (const [key, value] of Object.entries(config)) {
if (!value) continue;
const configExists = existsSync(configPath);
if (!configExists) return configFromEnv;
const configFile = readFileSync(configPath, 'utf8');
const config = iniParse(configFile);
for (const configKey of Object.keys(config)) {
if (
['null', 'undefined'].includes(config[configKey])
) {
config[configKey] = undefined;
if (typeof value === 'string' && ['null', 'undefined'].includes(value)) {
config[key] = undefined;
continue;
}
try {
const validator = configValidators[configKey as CONFIG_KEYS];
const validValue = validator(
config[configKey] ?? configFromEnv[configKey as CONFIG_KEYS],
config
);
config[configKey] = validValue;
try {
const validate = configValidators[key as CONFIG_KEYS];
validate(value, config);
} catch (error) {
outro(`Unknown '${configKey}' config option or missing validator.`);
outro(`Unknown '${key}' config option or missing validator.`);
outro(
`Manually fix the '.env' file or global '~/.opencommit' config file.`
);
process.exit(1);
}
}
};
enum OCO_PROMPT_MODULE_ENUM {
CONVENTIONAL_COMMIT = 'conventional-commit',
COMMITLINT = '@commitlint'
}
export const DEFAULT_CONFIG = {
OCO_TOKENS_MAX_INPUT: DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_INPUT,
OCO_TOKENS_MAX_OUTPUT: DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT,
OCO_DESCRIPTION: false,
OCO_EMOJI: false,
OCO_MODEL: getDefaultModel('openai'),
OCO_LANGUAGE: 'en',
OCO_MESSAGE_TEMPLATE_PLACEHOLDER: '$msg',
OCO_PROMPT_MODULE: OCO_PROMPT_MODULE_ENUM.CONVENTIONAL_COMMIT,
OCO_AI_PROVIDER: OCO_AI_PROVIDER_ENUM.OPENAI,
OCO_ONE_LINE_COMMIT: false,
OCO_TEST_MOCK_TYPE: 'commit-message',
OCO_FLOWISE_ENDPOINT: ':',
OCO_WHY: false,
OCO_GITPUSH: true // todo: deprecate
};
const initGlobalConfig = (configPath: string = defaultConfigPath) => {
writeFileSync(configPath, iniStringify(DEFAULT_CONFIG), 'utf8');
return DEFAULT_CONFIG;
};
const parseEnvVarValue = (value?: any) => {
try {
return JSON.parse(value);
} catch (error) {
return value;
}
};
const getEnvConfig = (envPath: string) => {
dotenv.config({ path: envPath });
return {
OCO_MODEL: process.env.OCO_MODEL,
OCO_OPENAI_API_KEY: process.env.OCO_OPENAI_API_KEY,
OCO_ANTHROPIC_API_KEY: process.env.OCO_ANTHROPIC_API_KEY,
OCO_AZURE_API_KEY: process.env.OCO_AZURE_API_KEY,
OCO_GEMINI_API_KEY: process.env.OCO_GEMINI_API_KEY,
OCO_FLOWISE_API_KEY: process.env.OCO_FLOWISE_API_KEY,
OCO_TOKENS_MAX_INPUT: parseEnvVarValue(process.env.OCO_TOKENS_MAX_INPUT),
OCO_TOKENS_MAX_OUTPUT: parseEnvVarValue(process.env.OCO_TOKENS_MAX_OUTPUT),
OCO_OPENAI_BASE_PATH: process.env.OCO_OPENAI_BASE_PATH,
OCO_GEMINI_BASE_PATH: process.env.OCO_GEMINI_BASE_PATH,
OCO_AZURE_ENDPOINT: process.env.OCO_AZURE_ENDPOINT,
OCO_FLOWISE_ENDPOINT: process.env.OCO_FLOWISE_ENDPOINT,
OCO_OLLAMA_API_URL: process.env.OCO_OLLAMA_API_URL,
OCO_DESCRIPTION: parseEnvVarValue(process.env.OCO_DESCRIPTION),
OCO_EMOJI: parseEnvVarValue(process.env.OCO_EMOJI),
OCO_LANGUAGE: process.env.OCO_LANGUAGE,
OCO_MESSAGE_TEMPLATE_PLACEHOLDER:
process.env.OCO_MESSAGE_TEMPLATE_PLACEHOLDER,
OCO_PROMPT_MODULE: process.env.OCO_PROMPT_MODULE as OCO_PROMPT_MODULE_ENUM,
OCO_AI_PROVIDER: process.env.OCO_AI_PROVIDER as OCO_AI_PROVIDER_ENUM,
OCO_ONE_LINE_COMMIT: parseEnvVarValue(process.env.OCO_ONE_LINE_COMMIT),
OCO_TEST_MOCK_TYPE: process.env.OCO_TEST_MOCK_TYPE,
OCO_GITPUSH: parseEnvVarValue(process.env.OCO_GITPUSH) // todo: deprecate
};
};
const getGlobalConfig = (configPath: string) => {
let globalConfig: ConfigType;
const isGlobalConfigFileExist = existsSync(configPath);
if (!isGlobalConfigFileExist) globalConfig = initGlobalConfig(configPath);
else {
const configFile = readFileSync(configPath, 'utf8');
globalConfig = iniParse(configFile) as ConfigType;
}
return globalConfig;
};
/**
* Merges two configs.
* Env config takes precedence over global ~/.opencommit config file
* @param main - env config
* @param fallback - global ~/.opencommit config file
* @returns merged config
*/
const mergeConfigs = (main: Partial<ConfigType>, fallback: ConfigType) =>
Object.keys(CONFIG_KEYS).reduce((acc, key) => {
acc[key] = parseEnvVarValue(main[key] ?? fallback[key]);
return acc;
}, {} as ConfigType);
interface GetConfigOptions {
globalPath?: string;
envPath?: string;
}
export const getConfig = ({
envPath = defaultEnvPath,
globalPath = defaultConfigPath
}: GetConfigOptions = {}): ConfigType => {
const envConfig = getEnvConfig(envPath);
const globalConfig = getGlobalConfig(globalPath);
const config = mergeConfigs(envConfig, globalConfig);
return config;
};
export const setConfig = (keyValues: [key: string, value: string][], configPath: string = defaultConfigPath) => {
const config = getConfig() || {};
export const setConfig = (
keyValues: [key: string, value: string][],
globalConfigPath: string = defaultConfigPath
) => {
const config = getConfig({
globalPath: globalConfigPath
});
for (const [configKey, configValue] of keyValues) {
if (!configValidators.hasOwnProperty(configKey)) {
throw new Error(`Unsupported config key: ${configKey}`);
for (let [key, value] of keyValues) {
if (!configValidators.hasOwnProperty(key)) {
const supportedKeys = Object.keys(configValidators).join('\n');
throw new Error(
`Unsupported config key: ${key}. Expected keys are:\n\n${supportedKeys}.\n\nFor more help refer to our docs: https://github.com/di-sukharev/opencommit`
);
}
let parsedConfigValue;
try {
parsedConfigValue = JSON.parse(configValue);
parsedConfigValue = JSON.parse(value);
} catch (error) {
parsedConfigValue = configValue;
parsedConfigValue = value;
}
const validValue =
configValidators[configKey as CONFIG_KEYS](parsedConfigValue);
config[configKey as CONFIG_KEYS] = validValue;
const validValue = configValidators[key as CONFIG_KEYS](
parsedConfigValue,
config
);
config[key] = validValue;
}
writeFileSync(configPath, iniStringify(config), 'utf8');
writeFileSync(globalConfigPath, iniStringify(config), 'utf8');
outro(`${chalk.green('✔')} Config successfully set`);
outro(`${chalk.green('✔')} config successfully set`);
};
export const configCommand = command(
@@ -377,9 +577,9 @@ export const configCommand = command(
parameters: ['<mode>', '<key=values...>']
},
async (argv) => {
intro('opencommit — config');
try {
const { mode, keyValues } = argv._;
intro(`COMMAND: config ${mode} ${keyValues}`);
if (mode === CONFIG_MODES.get) {
const config = getConfig() || {};

View File

@@ -1,13 +1,11 @@
import { intro, outro } from '@clack/prompts';
import chalk from 'chalk';
import { command } from 'cleye';
import { existsSync } from 'fs';
import fs from 'fs/promises';
import path from 'path';
import { intro, outro } from '@clack/prompts';
import { COMMANDS } from '../CommandsEnum.js';
import { assertGitRepo, getCoreHooksPath } from '../utils/git.js';
import { COMMANDS } from './ENUMS';
const HOOK_NAME = 'prepare-commit-msg';
const DEFAULT_SYMLINK_URL = path.join('.git', 'hooks', HOOK_NAME);
@@ -94,7 +92,7 @@ export const hookCommand = command(
}
throw new Error(
`Unsupported mode: ${mode}. Supported modes are: 'set' or 'unset', do: \`oco hook set\``
`Unsupported mode: ${mode}. Supported modes are: 'set' or 'unset'. Run: \`oco hook set\``
);
} catch (error) {
outro(`${chalk.red('✖')} ${error}`);

View File

@@ -39,10 +39,15 @@ export const prepareCommitMessageHook = async (
const config = getConfig();
if (!config?.OCO_OPENAI_API_KEY && !config?.OCO_ANTHROPIC_API_KEY && !config?.OCO_AZURE_API_KEY) {
throw new Error(
'No OPEN_AI_API or OCO_ANTHROPIC_API_KEY or OCO_AZURE_API_KEY exists. Set your key in ~/.opencommit'
if (
!config.OCO_OPENAI_API_KEY &&
!config.OCO_ANTHROPIC_API_KEY &&
!config.OCO_AZURE_API_KEY
) {
outro(
'No OCO_OPENAI_API_KEY or OCO_ANTHROPIC_API_KEY or OCO_AZURE_API_KEY exists. Set your key via `oco config set <key>=<value>, e.g. `oco config set OCO_OPENAI_API_KEY=<value>`. For more info see https://github.com/di-sukharev/opencommit'
);
return;
}
const spin = spinner();

View File

@@ -1,7 +1,28 @@
import { ChatCompletionRequestMessage } from 'openai';
import AnthropicClient from '@anthropic-ai/sdk';
import { OpenAIClient as AzureOpenAIClient } from '@azure/openai';
import { GoogleGenerativeAI as GeminiClient } from '@google/generative-ai';
import { AxiosInstance as RawAxiosClient } from 'axios';
import { OpenAI as OpenAIClient } from 'openai';
export interface AiEngineConfig {
apiKey: string;
model: string;
maxTokensOutput: number;
maxTokensInput: number;
baseURL?: string;
}
type Client =
| OpenAIClient
| AzureOpenAIClient
| AnthropicClient
| RawAxiosClient
| GeminiClient;
export interface AiEngine {
config: AiEngineConfig;
client: Client;
generateCommitMessage(
messages: Array<ChatCompletionRequestMessage>
): Promise<string | undefined>;
messages: Array<OpenAIClient.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | null | undefined>;
}

View File

@@ -1,106 +1,62 @@
import AnthropicClient from '@anthropic-ai/sdk';
import {
MessageCreateParamsNonStreaming,
MessageParam
} from '@anthropic-ai/sdk/resources/messages.mjs';
import { outro } from '@clack/prompts';
import axios from 'axios';
import chalk from 'chalk';
import Anthropic from '@anthropic-ai/sdk';
import {ChatCompletionRequestMessage} from 'openai'
import { MessageCreateParamsNonStreaming, MessageParam } from '@anthropic-ai/sdk/resources';
import { intro, outro } from '@clack/prompts';
import {
CONFIG_MODES,
DEFAULT_TOKEN_LIMITS,
getConfig
} from '../commands/config';
import { OpenAI } from 'openai';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { tokenCount } from '../utils/tokenCount';
import { AiEngine } from './Engine';
import { MODEL_LIST } from '../commands/config';
import { AiEngine, AiEngineConfig } from './Engine';
const config = getConfig();
interface AnthropicConfig extends AiEngineConfig {}
const MAX_TOKENS_OUTPUT =
config?.OCO_TOKENS_MAX_OUTPUT ||
DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT;
const MAX_TOKENS_INPUT =
config?.OCO_TOKENS_MAX_INPUT || DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_INPUT;
export class AnthropicEngine implements AiEngine {
config: AnthropicConfig;
client: AnthropicClient;
let provider = config?.OCO_AI_PROVIDER;
let apiKey = config?.OCO_ANTHROPIC_API_KEY;
const [command, mode] = process.argv.slice(2);
if (
provider === 'anthropic' &&
!apiKey &&
command !== 'config' &&
mode !== CONFIG_MODES.set
) {
intro('opencommit');
outro(
'OCO_ANTHROPIC_API_KEY is not set, please run `oco config set OCO_ANTHROPIC_API_KEY=<your token> . If you are using Claude, make sure you add payment details, so API works.`'
);
outro(
'For help look into README https://github.com/di-sukharev/opencommit#setup'
);
process.exit(1);
}
const MODEL = config?.OCO_MODEL;
if (provider === 'anthropic' &&
!MODEL_LIST.anthropic.includes(MODEL) &&
command !== 'config' &&
mode !== CONFIG_MODES.set) {
outro(
`${chalk.red('✖')} Unsupported model ${MODEL} for Anthropic. Supported models are: ${MODEL_LIST.anthropic.join(
', '
)}`
);
process.exit(1);
}
class AnthropicAi implements AiEngine {
private anthropicAiApiConfiguration = {
apiKey: apiKey
};
private anthropicAI!: Anthropic;
constructor() {
this.anthropicAI = new Anthropic(this.anthropicAiApiConfiguration);
constructor(config) {
this.config = config;
this.client = new AnthropicClient({ apiKey: this.config.apiKey });
}
public generateCommitMessage = async (
messages: Array<ChatCompletionRequestMessage>
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | undefined> => {
const systemMessage = messages.find(msg => msg.role === 'system')?.content as string;
const restMessages = messages.filter((msg) => msg.role !== 'system') as MessageParam[];
const systemMessage = messages.find((msg) => msg.role === 'system')
?.content as string;
const restMessages = messages.filter(
(msg) => msg.role !== 'system'
) as MessageParam[];
const params: MessageCreateParamsNonStreaming = {
model: MODEL,
model: this.config.model,
system: systemMessage,
messages: restMessages,
temperature: 0,
top_p: 0.1,
max_tokens: MAX_TOKENS_OUTPUT
max_tokens: this.config.maxTokensOutput
};
try {
const REQUEST_TOKENS = messages
.map((msg) => tokenCount(msg.content as string) + 4)
.reduce((a, b) => a + b, 0);
if (REQUEST_TOKENS > MAX_TOKENS_INPUT - MAX_TOKENS_OUTPUT) {
if (
REQUEST_TOKENS >
this.config.maxTokensInput - this.config.maxTokensOutput
) {
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
}
const data = await this.anthropicAI.messages.create(params);
const data = await this.client.messages.create(params);
const message = data?.content[0].text;
return message;
} catch (error) {
outro(`${chalk.red('✖')} ${JSON.stringify(params)}`);
const err = error as Error;
outro(`${chalk.red('✖')} ${err?.message || err}`);
@@ -120,5 +76,3 @@ class AnthropicAi implements AiEngine {
}
};
}
export const anthropicAi = new AnthropicAi();

View File

@@ -1,81 +1,51 @@
import {
AzureKeyCredential,
OpenAIClient as AzureOpenAIClient
} from '@azure/openai';
import { outro } from '@clack/prompts';
import axios from 'axios';
import chalk from 'chalk';
import { execa } from 'execa';
import {
ChatCompletionRequestMessage,
} from 'openai';
import { OpenAIClient, AzureKeyCredential } from '@azure/openai';
import { intro, outro } from '@clack/prompts';
import {
CONFIG_MODES,
DEFAULT_TOKEN_LIMITS,
getConfig
} from '../commands/config';
import { OpenAI } from 'openai';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { tokenCount } from '../utils/tokenCount';
import { AiEngine } from './Engine';
import { AiEngine, AiEngineConfig } from './Engine';
const config = getConfig();
const MAX_TOKENS_OUTPUT =
config?.OCO_TOKENS_MAX_OUTPUT ||
DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT;
const MAX_TOKENS_INPUT =
config?.OCO_TOKENS_MAX_INPUT || DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_INPUT;
let basePath = config?.OCO_OPENAI_BASE_PATH;
let apiKey = config?.OCO_AZURE_API_KEY;
let apiEndpoint = config?.OCO_AZURE_ENDPOINT;
const [command, mode] = process.argv.slice(2);
const provider = config?.OCO_AI_PROVIDER;
if (
provider === 'azure' &&
!apiKey &&
!apiEndpoint &&
command !== 'config' &&
mode !== CONFIG_MODES.set
) {
intro('opencommit');
outro(
'OCO_AZURE_API_KEY or OCO_AZURE_ENDPOINT are not set, please run `oco config set OCO_AZURE_API_KEY=<your token> . If you are using GPT, make sure you add payment details, so API works.`'
);
outro(
'For help look into README https://github.com/di-sukharev/opencommit#setup'
);
process.exit(1);
interface AzureAiEngineConfig extends AiEngineConfig {
baseURL: string;
apiKey: string;
}
const MODEL = config?.OCO_MODEL || 'gpt-3.5-turbo';
export class AzureEngine implements AiEngine {
config: AzureAiEngineConfig;
client: AzureOpenAIClient;
class Azure implements AiEngine {
private openAI!: OpenAIClient;
constructor() {
if (provider === 'azure') {
this.openAI = new OpenAIClient(apiEndpoint, new AzureKeyCredential(apiKey));
}
constructor(config: AzureAiEngineConfig) {
this.config = config;
this.client = new AzureOpenAIClient(
this.config.baseURL,
new AzureKeyCredential(this.config.apiKey)
);
}
public generateCommitMessage = async (
messages: Array<ChatCompletionRequestMessage>
generateCommitMessage = async (
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | undefined> => {
try {
const REQUEST_TOKENS = messages
.map((msg) => tokenCount(msg.content) + 4)
.map((msg) => tokenCount(msg.content as string) + 4)
.reduce((a, b) => a + b, 0);
if (REQUEST_TOKENS > MAX_TOKENS_INPUT - MAX_TOKENS_OUTPUT) {
if (
REQUEST_TOKENS >
this.config.maxTokensInput - this.config.maxTokensOutput
) {
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
}
const data = await this.openAI.getChatCompletions(MODEL, messages);
const data = await this.client.getChatCompletions(
this.config.model,
messages
);
const message = data.choices[0].message;
@@ -84,10 +54,10 @@ class Azure implements AiEngine {
}
return message?.content;
} catch (error) {
outro(`${chalk.red('✖')} ${MODEL}`);
outro(`${chalk.red('✖')} ${this.config.model}`);
const err = error as Error;
outro(`${chalk.red('✖')} ${err?.message || err}`);
outro(`${chalk.red('✖')} ${JSON.stringify(error)}`);
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
@@ -105,5 +75,3 @@ class Azure implements AiEngine {
}
};
}
export const azure = new Azure();

45
src/engine/flowise.ts Normal file
View File

@@ -0,0 +1,45 @@
import axios, { AxiosInstance } from 'axios';
import { OpenAI } from 'openai';
import { AiEngine, AiEngineConfig } from './Engine';
interface FlowiseAiConfig extends AiEngineConfig {}
export class FlowiseAi implements AiEngine {
config: FlowiseAiConfig;
client: AxiosInstance;
constructor(config) {
this.config = config;
this.client = axios.create({
url: `${config.baseURL}/${config.apiKey}`,
headers: { 'Content-Type': 'application/json' }
});
}
async generateCommitMessage(
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | undefined> {
const gitDiff = (messages[messages.length - 1]?.content as string)
.replace(/\\/g, '\\\\')
.replace(/"/g, '\\"')
.replace(/\n/g, '\\n')
.replace(/\r/g, '\\r')
.replace(/\t/g, '\\t');
const payload = {
question: gitDiff,
overrideConfig: {
systemMessagePrompt: messages[0]?.content
},
history: messages.slice(1, -1)
};
try {
const response = await this.client.post('', payload);
const message = response.data;
return message?.text;
} catch (err: any) {
const message = err.response?.data?.error ?? err.message;
throw new Error('local model issues. details: ' + message);
}
}
}

88
src/engine/gemini.ts Normal file
View File

@@ -0,0 +1,88 @@
import {
Content,
GoogleGenerativeAI,
HarmBlockThreshold,
HarmCategory,
Part
} from '@google/generative-ai';
import axios from 'axios';
import { OpenAI } from 'openai';
import { AiEngine, AiEngineConfig } from './Engine';
interface GeminiConfig extends AiEngineConfig {}
export class Gemini implements AiEngine {
config: GeminiConfig;
client: GoogleGenerativeAI;
constructor(config) {
this.client = new GoogleGenerativeAI(config.apiKey);
this.config = config;
}
async generateCommitMessage(
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | undefined> {
const systemInstruction = messages
.filter((m) => m.role === 'system')
.map((m) => m.content)
.join('\n');
const gemini = this.client.getGenerativeModel({
model: this.config.model,
systemInstruction
});
const contents = messages
.filter((m) => m.role !== 'system')
.map(
(m) =>
({
parts: [{ text: m.content } as Part],
role: m.role === 'user' ? m.role : 'model'
} as Content)
);
try {
const result = await gemini.generateContent({
contents,
safetySettings: [
{
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
},
{
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
},
{
category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
},
{
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
}
],
generationConfig: {
maxOutputTokens: this.config.maxTokensOutput,
temperature: 0,
topP: 0.1
}
});
return result.response.text();
} catch (error) {
const err = error as Error;
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401
) {
const geminiError = error.response.data.error;
if (geminiError) throw new Error(geminiError?.message);
}
throw err;
}
}
}

View File

@@ -1,49 +1,44 @@
import axios, { AxiosError } from 'axios';
import { ChatCompletionRequestMessage } from 'openai';
import { AiEngine } from './Engine';
import axios, { AxiosInstance } from 'axios';
import { OpenAI } from 'openai';
import { AiEngine, AiEngineConfig } from './Engine';
import {
getConfig
} from '../commands/config';
const config = getConfig();
interface OllamaConfig extends AiEngineConfig {}
export class OllamaAi implements AiEngine {
private model = "mistral"; // as default model of Ollama
config: OllamaConfig;
client: AxiosInstance;
setModel(model: string) {
this.model = model ?? config?.OCO_MODEL ?? 'mistral';
constructor(config) {
this.config = config;
this.client = axios.create({
url: config.baseURL
? `${config.baseURL}/${config.apiKey}`
: 'http://localhost:11434/api/chat',
headers: { 'Content-Type': 'application/json' }
});
}
async generateCommitMessage(
messages: Array<ChatCompletionRequestMessage>
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | undefined> {
const model = this.model;
//console.log(messages);
//process.exit()
const url = 'http://localhost:11434/api/chat';
const p = {
model,
const params = {
model: this.config.model ?? 'mistral',
messages,
options: { temperature: 0, top_p: 0.1 },
stream: false
};
try {
const response = await axios.post(url, p, {
headers: {
'Content-Type': 'application/json'
}
});
const response = await this.client.post(
this.client.getUri(this.config),
params
);
const message = response.data.message;
return message?.content;
} catch (err: any) {
const message = err.response?.data?.error ?? err.message;
throw new Error('local model issues. details: ' + message);
throw new Error(`Ollama provider error: ${message}`);
}
}
}
export const ollamaAi = new OllamaAi();

View File

@@ -1,129 +1,59 @@
import axios from 'axios';
import chalk from 'chalk';
import { execa } from 'execa';
import {
ChatCompletionRequestMessage,
Configuration as OpenAiApiConfiguration,
OpenAIApi
} from 'openai';
import { intro, outro } from '@clack/prompts';
import {
CONFIG_MODES,
DEFAULT_TOKEN_LIMITS,
getConfig
} from '../commands/config';
import { OpenAI } from 'openai';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { tokenCount } from '../utils/tokenCount';
import { AiEngine } from './Engine';
import { MODEL_LIST } from '../commands/config';
import { AiEngine, AiEngineConfig } from './Engine';
const config = getConfig();
interface OpenAiConfig extends AiEngineConfig {}
const MAX_TOKENS_OUTPUT =
config?.OCO_TOKENS_MAX_OUTPUT ||
DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT;
const MAX_TOKENS_INPUT =
config?.OCO_TOKENS_MAX_INPUT || DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_INPUT;
let basePath = config?.OCO_OPENAI_BASE_PATH;
let apiKey = config?.OCO_OPENAI_API_KEY;
export class OpenAiEngine implements AiEngine {
config: OpenAiConfig;
client: OpenAI;
const [command, mode] = process.argv.slice(2);
const provider = config?.OCO_AI_PROVIDER;
if (
provider === 'openai' &&
!apiKey &&
command !== 'config' &&
mode !== CONFIG_MODES.set
) {
intro('opencommit');
outro(
'OCO_OPENAI_API_KEY is not set, please run `oco config set OCO_OPENAI_API_KEY=<your token> . If you are using GPT, make sure you add payment details, so API works.`'
);
outro(
'For help look into README https://github.com/di-sukharev/opencommit#setup'
);
process.exit(1);
}
const MODEL = config?.OCO_MODEL || 'gpt-3.5-turbo';
if (provider === 'openai' &&
!MODEL_LIST.openai.includes(MODEL) &&
command !== 'config' &&
mode !== CONFIG_MODES.set) {
outro(
`${chalk.red('✖')} Unsupported model ${MODEL} for OpenAI. Supported models are: ${MODEL_LIST.openai.join(
', '
)}`
);
process.exit(1);
}
class OpenAi implements AiEngine {
private openAiApiConfiguration = new OpenAiApiConfiguration({
apiKey: apiKey
});
private openAI!: OpenAIApi;
constructor() {
if (basePath) {
this.openAiApiConfiguration.basePath = basePath;
}
this.openAI = new OpenAIApi(this.openAiApiConfiguration);
constructor(config: OpenAiConfig) {
this.config = config;
this.client = new OpenAI({ apiKey: config.apiKey });
}
public generateCommitMessage = async (
messages: Array<ChatCompletionRequestMessage>
): Promise<string | undefined> => {
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | null> => {
const params = {
model: MODEL,
model: this.config.model,
messages,
temperature: 0,
top_p: 0.1,
max_tokens: MAX_TOKENS_OUTPUT
max_tokens: this.config.maxTokensOutput
};
try {
const REQUEST_TOKENS = messages
.map((msg) => tokenCount(msg.content) + 4)
.map((msg) => tokenCount(msg.content as string) + 4)
.reduce((a, b) => a + b, 0);
if (REQUEST_TOKENS > MAX_TOKENS_INPUT - MAX_TOKENS_OUTPUT) {
if (
REQUEST_TOKENS >
this.config.maxTokensInput - this.config.maxTokensOutput
)
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
}
const { data } = await this.openAI.createChatCompletion(params);
const completion = await this.client.chat.completions.create(params);
const message = data.choices[0].message;
const message = completion.choices[0].message;
return message?.content;
} catch (error) {
outro(`${chalk.red('✖')} ${JSON.stringify(params)}`);
const err = error as Error;
outro(`${chalk.red('✖')} ${err?.message || err}`);
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401
) {
const openAiError = error.response.data.error;
if (openAiError?.message) outro(openAiError.message);
outro(
'For help look into README https://github.com/di-sukharev/opencommit#setup'
);
if (openAiError) throw new Error(openAiError.message);
}
throw err;
}
};
}
export const api = new OpenAi();

View File

@@ -1,12 +1,47 @@
import { ChatCompletionRequestMessage } from 'openai';
import { OpenAI } from 'openai';
import { AiEngine } from './Engine';
export class TestAi implements AiEngine {
export const TEST_MOCK_TYPES = [
'commit-message',
'prompt-module-commitlint-config'
] as const;
export type TestMockType = (typeof TEST_MOCK_TYPES)[number];
type TestAiEngine = Partial<AiEngine> & {
mockType: TestMockType;
};
export class TestAi implements TestAiEngine {
mockType: TestMockType;
// those are not used in the test engine
config: any;
client: any;
// ---
constructor(mockType: TestMockType) {
this.mockType = mockType;
}
async generateCommitMessage(
messages: Array<ChatCompletionRequestMessage>
_messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | undefined> {
return 'test commit message';
switch (this.mockType) {
case 'commit-message':
return 'fix(testAi.ts): test commit message';
case 'prompt-module-commitlint-config':
return (
`{\n` +
` "localLanguage": "english",\n` +
` "commitFix": "fix(server): Change 'port' variable to uppercase 'PORT'",\n` +
` "commitFeat": "feat(server): Allow server to listen on a port specified through environment variable",\n` +
` "commitDescription": "Change 'port' variable to uppercase 'PORT'. Allow server to listen on a port specified through environment variable."\n` +
`}`
);
default:
throw Error('unsupported test mock type');
}
}
}
export const testAi = new TestAi();

View File

@@ -1,31 +1,27 @@
import {
ChatCompletionRequestMessage,
ChatCompletionRequestMessageRoleEnum
} from 'openai';
import { OpenAI } from 'openai';
import { DEFAULT_TOKEN_LIMITS, getConfig } from './commands/config';
import { getMainCommitPrompt } from './prompts';
import { getEngine } from './utils/engine';
import { mergeDiffs } from './utils/mergeDiffs';
import { tokenCount } from './utils/tokenCount';
import { getEngine } from './utils/engine';
const config = getConfig();
const MAX_TOKENS_INPUT =
config?.OCO_TOKENS_MAX_INPUT || DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_INPUT;
config.OCO_TOKENS_MAX_INPUT || DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_INPUT;
const MAX_TOKENS_OUTPUT =
config?.OCO_TOKENS_MAX_OUTPUT ||
config.OCO_TOKENS_MAX_OUTPUT ||
DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT;
const generateCommitMessageChatCompletionPrompt = async (
diff: string,
fullGitMojiSpec: boolean
): Promise<Array<ChatCompletionRequestMessage>> => {
): Promise<Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>> => {
const INIT_MESSAGES_PROMPT = await getMainCommitPrompt(fullGitMojiSpec);
const chatContextAsCompletionRequest = [...INIT_MESSAGES_PROMPT];
chatContextAsCompletionRequest.push({
role: ChatCompletionRequestMessageRoleEnum.User,
role: 'user',
content: diff
});
@@ -43,13 +39,13 @@ const ADJUSTMENT_FACTOR = 20;
export const generateCommitMessageByDiff = async (
diff: string,
fullGitMojiSpec: boolean
fullGitMojiSpec: boolean = false
): Promise<string> => {
try {
const INIT_MESSAGES_PROMPT = await getMainCommitPrompt(fullGitMojiSpec);
const INIT_MESSAGES_PROMPT_LENGTH = INIT_MESSAGES_PROMPT.map(
(msg) => tokenCount(msg.content) + 4
(msg) => tokenCount(msg.content as string) + 4
).reduce((a, b) => a + b, 0);
const MAX_REQUEST_TOKENS =
@@ -65,9 +61,9 @@ export const generateCommitMessageByDiff = async (
fullGitMojiSpec
);
const commitMessages = [];
const commitMessages = [] as string[];
for (const promise of commitMessagePromises) {
commitMessages.push(await promise);
commitMessages.push((await promise) as string);
await delay(2000);
}
@@ -106,7 +102,7 @@ function getMessagesPromisesByChangesInFile(
maxChangeLength
);
const lineDiffsWithHeader = [];
const lineDiffsWithHeader = [] as string[];
for (const change of mergedChanges) {
const totalChange = fileHeader + change;
if (tokenCount(totalChange) > maxChangeLength) {
@@ -135,7 +131,7 @@ function getMessagesPromisesByChangesInFile(
function splitDiff(diff: string, maxChangeLength: number) {
const lines = diff.split('\n');
const splitDiffs = [];
const splitDiffs = [] as string[];
let currentDiff = '';
if (maxChangeLength <= 0) {
@@ -181,7 +177,7 @@ export const getCommitMsgsPromisesFromFileDiffs = async (
// merge multiple files-diffs into 1 prompt to save tokens
const mergedFilesDiffs = mergeDiffs(diffByFiles, maxDiffLength);
const commitMessagePromises = [];
const commitMessagePromises = [] as Promise<string | null | undefined>[];
for (const fileDiff of mergedFilesDiffs) {
if (tokenCount(fileDiff) >= maxDiffLength) {

View File

@@ -1,11 +1,9 @@
import { unlinkSync, writeFileSync } from 'fs';
import core from '@actions/core';
import exec from '@actions/exec';
import github from '@actions/github';
import { intro, outro } from '@clack/prompts';
import { PushEvent } from '@octokit/webhooks-types';
import { unlinkSync, writeFileSync } from 'fs';
import { generateCommitMessageByDiff } from './generateCommitMessageFromGitDiff';
import { randomIntFromInterval } from './utils/randomIntFromInterval';
import { sleep } from './utils/sleep';
@@ -54,7 +52,7 @@ async function improveMessagesInChunks(diffsAndSHAs: DiffAndSHA[]) {
const chunkSize = diffsAndSHAs!.length % 2 === 0 ? 4 : 3;
outro(`Improving commit messages in chunks of ${chunkSize}.`);
const improvePromises = diffsAndSHAs!.map((commit) =>
generateCommitMessageByDiff(commit.diff)
generateCommitMessageByDiff(commit.diff, false)
);
let improvedMessagesAndSHAs: MsgAndSHA[] = [];

View File

@@ -1,6 +1,6 @@
{
"localLanguage": "简体中文",
"commitFix": "修复(server.ts)将端口变量从小写port改为大写PORT",
"commitFeat": "功能(server.ts)添加对process.env.PORT环境变量的支持",
"commitFix": "fix(server.ts)将端口变量从小写port改为大写PORT",
"commitFeat": "feat(server.ts)添加对process.env.PORT环境变量的支持",
"commitDescription": "现在端口变量被命名为PORT这提高了命名约定的一致性因为PORT是一个常量。环境变量的支持使应用程序更加灵活因为它现在可以通过process.env.PORT环境变量在任何可用端口上运行。"
}

View File

@@ -2,16 +2,16 @@ import { spinner } from '@clack/prompts';
import { getConfig } from '../../commands/config';
import { i18n, I18nLocals } from '../../i18n';
import { getEngine } from '../../utils/engine';
import { COMMITLINT_LLM_CONFIG_PATH } from './constants';
import { computeHash } from './crypto';
import { commitlintPrompts, inferPromptsFromCommitlintConfig } from './prompts';
import { getCommitLintPWDConfig } from './pwd-commitlint';
import { CommitlintLLMConfig } from './types';
import * as utils from './utils';
import { getEngine } from '../../utils/engine';
const config = getConfig();
const translation = i18n[(config?.OCO_LANGUAGE as I18nLocals) || 'en'];
const translation = i18n[(config.OCO_LANGUAGE as I18nLocals) || 'en'];
export const configureCommitlintIntegration = async (force = false) => {
const spin = spinner();
@@ -19,7 +19,16 @@ export const configureCommitlintIntegration = async (force = false) => {
const fileExists = await utils.commitlintLLMConfigExists();
let commitLintConfig = await getCommitLintPWDConfig();
const commitLintConfig = await getCommitLintPWDConfig();
if (commitLintConfig === null) {
throw new Error(
`Failed to load @commitlint config. Please check the following:
* @commitlint >= 9.0.0 is installed in the local directory.
* 'node_modules/@commitlint/load' package exists.
* A valid @commitlint configuration exists.
`
);
}
// debug complete @commitlint configuration
// await fs.writeFile(

View File

@@ -1,8 +1,5 @@
import chalk from 'chalk';
import {
ChatCompletionRequestMessage,
ChatCompletionRequestMessageRoleEnum
} from 'openai';
import { OpenAI } from 'openai';
import { outro } from '@clack/prompts';
import {
@@ -17,7 +14,7 @@ import { i18n, I18nLocals } from '../../i18n';
import { IDENTITY, INIT_DIFF_PROMPT } from '../../prompts';
const config = getConfig();
const translation = i18n[(config?.OCO_LANGUAGE as I18nLocals) || 'en'];
const translation = i18n[(config.OCO_LANGUAGE as I18nLocals) || 'en'];
type DeepPartial<T> = {
[P in keyof T]?: {
@@ -214,11 +211,10 @@ const STRUCTURE_OF_COMMIT = `
// Prompt to generate LLM-readable rules based on @commitlint rules.
const GEN_COMMITLINT_CONSISTENCY_PROMPT = (
prompts: string[]
): ChatCompletionRequestMessage[] => [
): OpenAI.Chat.Completions.ChatCompletionMessageParam[] => [
{
role: ChatCompletionRequestMessageRoleEnum.Assistant,
// prettier-ignore
content: `${IDENTITY} Your mission is to create clean and comprehensive commit messages for two different changes in a single codebase and output them in the provided JSON format: one for a bug fix and another for a new feature.
role: 'system',
content: `${IDENTITY} Your mission is to create clean and comprehensive commit messages for two different changes in a single codebase and output them in the provided JSON format: one for a bug fix and another for a new feature.
Here are the specific requirements and conventions that should be strictly followed:
@@ -260,22 +256,33 @@ Example Git Diff is to follow:`
const INIT_MAIN_PROMPT = (
language: string,
prompts: string[]
): ChatCompletionRequestMessage => ({
role: ChatCompletionRequestMessageRoleEnum.System,
// prettier-ignore
content: `${IDENTITY} Your mission is to create clean and comprehensive commit messages in the given @commitlint convention and explain WHAT were the changes and WHY the changes were done. I'll send you an output of 'git diff --staged' command, and you convert it into a commit message.
${config?.OCO_EMOJI ? 'Use GitMoji convention to preface the commit.' : 'Do not preface the commit with anything.'}
${config?.OCO_DESCRIPTION ? 'Add a short description of WHY the changes are done after the commit message. Don\'t start it with "This commit", just describe the changes.' : "Don't add any descriptions to the commit, only commit message."}
): OpenAI.Chat.Completions.ChatCompletionMessageParam => ({
role: 'system',
content: `${IDENTITY} Your mission is to create clean and comprehensive commit messages in the given @commitlint convention and explain WHAT were the changes ${
config.OCO_WHY ? 'and WHY the changes were done' : ''
}. I'll send you an output of 'git diff --staged' command, and you convert it into a commit message.
${
config.OCO_EMOJI
? 'Use GitMoji convention to preface the commit.'
: 'Do not preface the commit with anything.'
}
${
config.OCO_DESCRIPTION
? 'Add a short description of WHY the changes are done after the commit message. Don\'t start it with "This commit", just describe the changes.'
: "Don't add any descriptions to the commit, only commit message."
}
Use the present tense. Use ${language} to answer.
${ config?.OCO_ONE_LINE_COMMIT ? 'Craft a concise commit message that encapsulates all changes made, with an emphasis on the primary updates. If the modifications share a common theme or scope, mention it succinctly; otherwise, leave the scope out to maintain focus. The goal is to provide a clear and unified overview of the changes in a one single message, without diverging into a list of commit per file change.' : ""}
${
config.OCO_ONE_LINE_COMMIT
? 'Craft a concise commit message that encapsulates all changes made, with an emphasis on the primary updates. If the modifications share a common theme or scope, mention it succinctly; otherwise, leave the scope out to maintain focus. The goal is to provide a clear and unified overview of the changes in a one single message, without diverging into a list of commit per file change.'
: ''
}
You will strictly follow the following conventions to generate the content of the commit message:
- ${prompts.join('\n- ')}
The conventions refers to the following structure of commit message:
${STRUCTURE_OF_COMMIT}
`
${STRUCTURE_OF_COMMIT}`
});
export const commitlintPrompts = {

View File

@@ -1,11 +1,41 @@
import fs from 'fs/promises';
import path from 'path';
const nodeModulesPath = path.join(
process.env.PWD || process.cwd(),
'node_modules',
'@commitlint',
'load'
);
const findModulePath = (moduleName: string) => {
const searchPaths = [
path.join('node_modules', moduleName),
path.join('node_modules', '.pnpm')
];
for (const basePath of searchPaths) {
try {
const resolvedPath = require.resolve(moduleName, { paths: [basePath] });
return resolvedPath;
} catch {
// Continue to the next search path if the module is not found
}
}
throw new Error(`Cannot find module ${moduleName}`);
};
const getCommitLintModuleType = async (): Promise<'cjs' | 'esm'> => {
const packageFile = '@commitlint/load/package.json';
const packageJsonPath = findModulePath(packageFile);
const packageJson = JSON.parse(await fs.readFile(packageJsonPath, 'utf8'));
if (!packageJson) {
throw new Error(`Failed to parse ${packageFile}`);
}
return packageJson.type === 'module' ? 'esm' : 'cjs';
};
/**
* QualifiedConfig from any version of @commitlint/types
* @see https://github.com/conventional-changelog/commitlint/blob/master/@commitlint/types/src/load.ts
*/
type QualifiedConfigOnAnyVersion = { [key: string]: unknown };
/**
* This code is loading the configuration for the `@commitlint` package from the current working
@@ -13,13 +43,31 @@ const nodeModulesPath = path.join(
*
* @returns
*/
export const getCommitLintPWDConfig = async () => {
const load = require(nodeModulesPath).default;
export const getCommitLintPWDConfig =
async (): Promise<QualifiedConfigOnAnyVersion | null> => {
let load: Function, modulePath: string;
switch (await getCommitLintModuleType()) {
case 'cjs':
/**
* CommonJS (<= commitlint@v18.x.x.)
*/
modulePath = findModulePath('@commitlint/load');
load = require(modulePath).default;
break;
case 'esm':
/**
* ES Module (commitlint@v19.x.x. <= )
* Directory import is not supported in ES Module resolution, so import the file directly
*/
modulePath = await findModulePath('@commitlint/load/lib/load.js');
load = (await import(modulePath)).default;
break;
}
if (load && typeof load === 'function') {
return await load();
}
if (load && typeof load === 'function') {
return await load();
}
// @commitlint/load is not a function
return null;
};
// @commitlint/load is not a function
return null;
};

View File

@@ -20,8 +20,8 @@ export const getJSONBlock = (input: string): string => {
const jsonIndex = input.search('```json');
if (jsonIndex > -1) {
input = input.slice(jsonIndex + 8);
const endJsonIndex = consistency.search('```');
input = input.slice(0, endJsonIndex);
const endJsonIndex = input.search('```');
input = input.slice(0, endJsonIndex);
}
return input;
};

View File

@@ -1,10 +1,5 @@
import {
ChatCompletionRequestMessage,
ChatCompletionRequestMessageRoleEnum
} from 'openai';
import { note } from '@clack/prompts';
import { OpenAI } from 'openai';
import { getConfig } from './commands/config';
import { i18n, I18nLocals } from './i18n';
import { configureCommitlintIntegration } from './modules/commitlint/config';
@@ -14,118 +9,133 @@ import * as utils from './modules/commitlint/utils';
import { removeConventionalCommitWord } from './utils/removeConventionalCommitWord';
const config = getConfig();
const translation = i18n[(config?.OCO_LANGUAGE as I18nLocals) || 'en'];
const translation = i18n[(config.OCO_LANGUAGE as I18nLocals) || 'en'];
export const IDENTITY =
'You are to act as the author of a commit message in git.';
'You are to act as an author of a commit message in git.';
const GITMOJI_HELP = `Use GitMoji convention to preface the commit. Here are some help to choose the right emoji (emoji, description):
🐛, Fix a bug;
✨, Introduce new features;
📝, Add or update documentation;
🚀, Deploy stuff;
✅, Add, update, or pass tests;
♻️, Refactor code;
⬆️, Upgrade dependencies;
🔧, Add or update configuration files;
🌐, Internationalization and localization;
💡, Add or update comments in source code;`;
const FULL_GITMOJI_SPEC = `${GITMOJI_HELP}
🎨, Improve structure / format of the code;
⚡️, Improve performance;
🔥, Remove code or files;
🚑️, Critical hotfix;
💄, Add or update the UI and style files;
🎉, Begin a project;
🔒️, Fix security issues;
🔐, Add or update secrets;
🔖, Release / Version tags;
🚨, Fix compiler / linter warnings;
🚧, Work in progress;
💚, Fix CI Build;
⬇️, Downgrade dependencies;
📌, Pin dependencies to specific versions;
👷, Add or update CI build system;
📈, Add or update analytics or track code;
, Add a dependency;
, Remove a dependency;
🔨, Add or update development scripts;
✏️, Fix typos;
💩, Write bad code that needs to be improved;
⏪️, Revert changes;
🔀, Merge branches;
📦️, Add or update compiled files or packages;
👽️, Update code due to external API changes;
🚚, Move or rename resources (e.g.: files, paths, routes);
📄, Add or update license;
💥, Introduce breaking changes;
🍱, Add or update assets;
♿️, Improve accessibility;
🍻, Write code drunkenly;
💬, Add or update text and literals;
🗃️, Perform database related changes;
🔊, Add or update logs;
🔇, Remove logs;
👥, Add or update contributor(s);
🚸, Improve user experience / usability;
🏗️, Make architectural changes;
📱, Work on responsive design;
🤡, Mock things;
🥚, Add or update an easter egg;
🙈, Add or update a .gitignore file;
📸, Add or update snapshots;
⚗️, Perform experiments;
🔍️, Improve SEO;
🏷️, Add or update types;
🌱, Add or update seed files;
🚩, Add, update, or remove feature flags;
🥅, Catch errors;
💫, Add or update animations and transitions;
🗑️, Deprecate code that needs to be cleaned up;
🛂, Work on code related to authorization, roles and permissions;
🩹, Simple fix for a non-critical issue;
🧐, Data exploration/inspection;
⚰️, Remove dead code;
🧪, Add a failing test;
👔, Add or update business logic;
🩺, Add or update healthcheck;
🧱, Infrastructure related changes;
🧑‍💻, Improve developer experience;
💸, Add sponsorships or money related infrastructure;
🧵, Add or update code related to multithreading or concurrency;
🦺, Add or update code related to validation.`;
const CONVENTIONAL_COMMIT_KEYWORDS =
'Do not preface the commit with anything, except for the conventional commit keywords: fix, feat, build, chore, ci, docs, style, refactor, perf, test.';
const getCommitConvention = (fullGitMojiSpec: boolean) =>
config.OCO_EMOJI
? fullGitMojiSpec
? FULL_GITMOJI_SPEC
: GITMOJI_HELP
: CONVENTIONAL_COMMIT_KEYWORDS;
const getDescriptionInstruction = () =>
config.OCO_DESCRIPTION
? 'Add a short description of WHY the changes are done after the commit message. Don\'t start it with "This commit", just describe the changes.'
: "Don't add any descriptions to the commit, only commit message.";
const getOneLineCommitInstruction = () =>
config.OCO_ONE_LINE_COMMIT
? 'Craft a concise commit message that encapsulates all changes made, with an emphasis on the primary updates. If the modifications share a common theme or scope, mention it succinctly; otherwise, leave the scope out to maintain focus. The goal is to provide a clear and unified overview of the changes in a one single message, without diverging into a list of commit per file change.'
: '';
const INIT_MAIN_PROMPT = (
language: string,
fullGitMojiSpec: boolean
): ChatCompletionRequestMessage => ({
role: ChatCompletionRequestMessageRoleEnum.System,
content: `${IDENTITY} Your mission is to create clean and comprehensive commit messages as per the ${
fullGitMojiSpec ? 'GitMoji specification' : 'conventional commit convention'
} and explain WHAT were the changes and mainly WHY the changes were done. I'll send you an output of 'git diff --staged' command, and you are to convert it into a commit message.
${
config?.OCO_EMOJI
? 'Use GitMoji convention to preface the commit. Here are some help to choose the right emoji (emoji, description): ' +
'🐛, Fix a bug; ' +
'✨, Introduce new features; ' +
'📝, Add or update documentation; ' +
'🚀, Deploy stuff; ' +
'✅, Add, update, or pass tests; ' +
'♻️, Refactor code; ' +
'⬆️, Upgrade dependencies; ' +
'🔧, Add or update configuration files; ' +
'🌐, Internationalization and localization; ' +
'💡, Add or update comments in source code; ' +
`${
fullGitMojiSpec
? '🎨, Improve structure / format of the code; ' +
'⚡️, Improve performance; ' +
'🔥, Remove code or files; ' +
'🚑️, Critical hotfix; ' +
'💄, Add or update the UI and style files; ' +
'🎉, Begin a project; ' +
'🔒️, Fix security issues; ' +
'🔐, Add or update secrets; ' +
'🔖, Release / Version tags; ' +
'🚨, Fix compiler / linter warnings; ' +
'🚧, Work in progress; ' +
'💚, Fix CI Build; ' +
'⬇️, Downgrade dependencies; ' +
'📌, Pin dependencies to specific versions; ' +
'👷, Add or update CI build system; ' +
'📈, Add or update analytics or track code; ' +
', Add a dependency; ' +
', Remove a dependency; ' +
'🔨, Add or update development scripts; ' +
'✏️, Fix typos; ' +
'💩, Write bad code that needs to be improved; ' +
'⏪️, Revert changes; ' +
'🔀, Merge branches; ' +
'📦️, Add or update compiled files or packages; ' +
'👽️, Update code due to external API changes; ' +
'🚚, Move or rename resources (e.g.: files, paths, routes); ' +
'📄, Add or update license; ' +
'💥, Introduce breaking changes; ' +
'🍱, Add or update assets; ' +
'♿️, Improve accessibility; ' +
'🍻, Write code drunkenly; ' +
'💬, Add or update text and literals; ' +
'🗃️, Perform database related changes; ' +
'🔊, Add or update logs; ' +
'🔇, Remove logs; ' +
'👥, Add or update contributor(s); ' +
'🚸, Improve user experience / usability; ' +
'🏗️, Make architectural changes; ' +
'📱, Work on responsive design; ' +
'🤡, Mock things; ' +
'🥚, Add or update an easter egg; ' +
'🙈, Add or update a .gitignore file; ' +
'📸, Add or update snapshots; ' +
'⚗️, Perform experiments; ' +
'🔍️, Improve SEO; ' +
'🏷️, Add or update types; ' +
'🌱, Add or update seed files; ' +
'🚩, Add, update, or remove feature flags; ' +
'🥅, Catch errors; ' +
'💫, Add or update animations and transitions; ' +
'🗑️, Deprecate code that needs to be cleaned up; ' +
'🛂, Work on code related to authorization, roles and permissions; ' +
'🩹, Simple fix for a non-critical issue; ' +
'🧐, Data exploration/inspection; ' +
'⚰️, Remove dead code; ' +
'🧪, Add a failing test; ' +
'👔, Add or update business logic; ' +
'🩺, Add or update healthcheck; ' +
'🧱, Infrastructure related changes; ' +
'🧑‍💻, Improve developer experience; ' +
'💸, Add sponsorships or money related infrastructure; ' +
'🧵, Add or update code related to multithreading or concurrency; ' +
'🦺, Add or update code related to validation.'
: ''
}`
: 'Do not preface the commit with anything. Conventional commit keywords:' +
'fix, feat, build, chore, ci, docs, style, refactor, perf, test.'
}
${
config?.OCO_DESCRIPTION
? 'Add a short description of WHY the changes are done after the commit message. Don\'t start it with "This commit", just describe the changes.'
: "Don't add any descriptions to the commit, only commit message."
}
${
config?.OCO_ONE_LINE_COMMIT
? 'Craft a concise commit message that encapsulates all changes made, with an emphasis on the primary updates. If the modifications share a common theme or scope, mention it succinctly; otherwise, leave the scope out to maintain focus. The goal is to provide a clear and unified overview of the changes in a one single message, without diverging into a list of commit per file change.'
: ''
}
Use the present tense. Lines must not be longer than 74 characters. Use ${language} for the commit message.`
): OpenAI.Chat.Completions.ChatCompletionMessageParam => ({
role: 'system',
content: (() => {
const commitConvention = fullGitMojiSpec
? 'GitMoji specification'
: 'Conventional Commit Convention';
const missionStatement = `${IDENTITY} Your mission is to create clean and comprehensive commit messages as per the ${commitConvention} and explain WHAT were the changes and mainly WHY the changes were done.`;
const diffInstruction =
"I'll send you an output of 'git diff --staged' command, and you are to convert it into a commit message.";
const conventionGuidelines = getCommitConvention(fullGitMojiSpec);
const descriptionGuideline = getDescriptionInstruction();
const oneLineCommitGuideline = getOneLineCommitInstruction();
const generalGuidelines = `Use the present tense. Lines must not be longer than 74 characters. Use ${language} for the commit message.`;
return `${missionStatement}\n${diffInstruction}\n${conventionGuidelines}\n${descriptionGuideline}\n${oneLineCommitGuideline}\n${generalGuidelines}`;
})()
});
export const INIT_DIFF_PROMPT: ChatCompletionRequestMessage = {
role: ChatCompletionRequestMessageRoleEnum.User,
content: `diff --git a/src/server.ts b/src/server.ts
export const INIT_DIFF_PROMPT: OpenAI.Chat.Completions.ChatCompletionMessageParam =
{
role: 'user',
content: `diff --git a/src/server.ts b/src/server.ts
index ad4db42..f3b18a9 100644
--- a/src/server.ts
+++ b/src/server.ts
@@ -149,29 +159,35 @@ export const INIT_DIFF_PROMPT: ChatCompletionRequestMessage = {
+app.listen(process.env.PORT || PORT, () => {
+ console.log(\`Server listening on port \${PORT}\`);
});`
};
const getContent = (translation: ConsistencyPrompt) => {
const fix = config.OCO_EMOJI
? `🐛 ${removeConventionalCommitWord(translation.commitFix)}`
: translation.commitFix;
const feat = config.OCO_EMOJI
? `${removeConventionalCommitWord(translation.commitFeat)}`
: translation.commitFeat;
const description = config.OCO_DESCRIPTION
? translation.commitDescription
: '';
return `${fix}\n${feat}\n${description}`;
};
const INIT_CONSISTENCY_PROMPT = (
translation: ConsistencyPrompt
): ChatCompletionRequestMessage => ({
role: ChatCompletionRequestMessageRoleEnum.Assistant,
content: `${
config?.OCO_EMOJI
? `🐛 ${removeConventionalCommitWord(translation.commitFix)}`
: translation.commitFix
}
${
config?.OCO_EMOJI
? `${removeConventionalCommitWord(translation.commitFeat)}`
: translation.commitFeat
}
${config?.OCO_DESCRIPTION ? translation.commitDescription : ''}`
): OpenAI.Chat.Completions.ChatCompletionMessageParam => ({
role: 'assistant',
content: getContent(translation)
});
export const getMainCommitPrompt = async (
fullGitMojiSpec: boolean
): Promise<ChatCompletionRequestMessage[]> => {
switch (config?.OCO_PROMPT_MODULE) {
): Promise<Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>> => {
switch (config.OCO_PROMPT_MODULE) {
case '@commitlint':
if (!(await utils.commitlintLLMConfigExists())) {
note(
@@ -197,7 +213,6 @@ export const getMainCommitPrompt = async (
];
default:
// conventional-commit
return [
INIT_MAIN_PROMPT(translation.localLanguage, fullGitMojiSpec),
INIT_DIFF_PROMPT,

View File

@@ -1,26 +1,65 @@
import { getConfig, OCO_AI_PROVIDER_ENUM } from '../commands/config';
import { AnthropicEngine } from '../engine/anthropic';
import { AzureEngine } from '../engine/azure';
import { AiEngine } from '../engine/Engine';
import { api } from '../engine/openAi';
import { getConfig } from '../commands/config';
import { ollamaAi } from '../engine/ollama';
import { azure } from '../engine/azure';
import { anthropicAi } from '../engine/anthropic'
import { testAi } from '../engine/testAi';
import { FlowiseAi } from '../engine/flowise';
import { Gemini } from '../engine/gemini';
import { OllamaAi } from '../engine/ollama';
import { OpenAiEngine } from '../engine/openAi';
import { TestAi, TestMockType } from '../engine/testAi';
export function getEngine(): AiEngine {
const config = getConfig();
const provider = config?.OCO_AI_PROVIDER;
if (provider?.startsWith('ollama')) {
const model = provider.split('/')[1];
if (model) ollamaAi.setModel(model);
return ollamaAi;
} else if (config?.OCO_AI_PROVIDER == 'anthropic') {
return anthropicAi;
} else if (config?.OCO_AI_PROVIDER == 'test') {
return testAi;
} else if (config?.OCO_AI_PROVIDER == 'azure') {
return azure;
const provider = config.OCO_AI_PROVIDER;
const DEFAULT_CONFIG = {
model: config.OCO_MODEL!,
maxTokensOutput: config.OCO_TOKENS_MAX_OUTPUT!,
maxTokensInput: config.OCO_TOKENS_MAX_INPUT!,
baseURL: config.OCO_OPENAI_BASE_PATH!
};
switch (provider) {
case OCO_AI_PROVIDER_ENUM.OLLAMA:
return new OllamaAi({
...DEFAULT_CONFIG,
apiKey: '',
baseURL: config.OCO_OLLAMA_API_URL!
});
case OCO_AI_PROVIDER_ENUM.ANTHROPIC:
return new AnthropicEngine({
...DEFAULT_CONFIG,
apiKey: config.OCO_ANTHROPIC_API_KEY!
});
case OCO_AI_PROVIDER_ENUM.TEST:
return new TestAi(config.OCO_TEST_MOCK_TYPE as TestMockType);
case OCO_AI_PROVIDER_ENUM.GEMINI:
return new Gemini({
...DEFAULT_CONFIG,
apiKey: config.OCO_GEMINI_API_KEY!,
baseURL: config.OCO_GEMINI_BASE_PATH!
});
case OCO_AI_PROVIDER_ENUM.AZURE:
return new AzureEngine({
...DEFAULT_CONFIG,
apiKey: config.OCO_AZURE_API_KEY!
});
case OCO_AI_PROVIDER_ENUM.FLOWISE:
return new FlowiseAi({
...DEFAULT_CONFIG,
baseURL: config.OCO_FLOWISE_ENDPOINT || DEFAULT_CONFIG.baseURL,
apiKey: config.OCO_FLOWISE_API_KEY!
});
default:
return new OpenAiEngine({
...DEFAULT_CONFIG,
apiKey: config.OCO_OPENAI_API_KEY!
});
}
// open ai gpt by default
return api;
}

View File

@@ -5,7 +5,6 @@ import { prepareEnvironment } from './utils';
it('cli flow when there are no changes', async () => {
const { gitDir, cleanup } = await prepareEnvironment();
const { findByText } = await render(`OCO_AI_PROVIDER='test' node`, [resolve('./out/cli.cjs')], { cwd: gitDir });
expect(await findByText('No changes detected')).toBeInTheConsole();

View File

@@ -10,7 +10,6 @@ it('cli flow to generate commit message for 1 new file (staged)', async () => {
await render('git' ,['add index.ts'], { cwd: gitDir });
const { queryByText, findByText, userEvent } = await render(`OCO_AI_PROVIDER='test' node`, [resolve('./out/cli.cjs')], { cwd: gitDir });
expect(await queryByText('No files are staged')).not.toBeInTheConsole();
expect(await queryByText('Do you want to stage all files and generate commit message?')).not.toBeInTheConsole();

View File

@@ -0,0 +1,222 @@
import { resolve } from 'path';
import { render } from 'cli-testing-library';
import 'cli-testing-library/extend-expect';
import { prepareEnvironment, wait } from '../utils';
import path from 'path';
function getAbsolutePath(relativePath: string) {
const scriptDir = path.dirname(__filename);
return path.resolve(scriptDir, relativePath);
}
async function setupCommitlint(dir: string, ver: 9 | 18 | 19) {
let packagePath, packageJsonPath, configPath;
switch (ver) {
case 9:
packagePath = getAbsolutePath('./data/commitlint_9/node_modules');
packageJsonPath = getAbsolutePath('./data/commitlint_9/package.json');
configPath = getAbsolutePath('./data/commitlint_9/commitlint.config.js');
break;
case 18:
packagePath = getAbsolutePath('./data/commitlint_18/node_modules');
packageJsonPath = getAbsolutePath('./data/commitlint_18/package.json');
configPath = getAbsolutePath('./data/commitlint_18/commitlint.config.js');
break;
case 19:
packagePath = getAbsolutePath('./data/commitlint_19/node_modules');
packageJsonPath = getAbsolutePath('./data/commitlint_19/package.json');
configPath = getAbsolutePath('./data/commitlint_19/commitlint.config.js');
break;
}
await render('cp', ['-r', packagePath, '.'], { cwd: dir });
await render('cp', [packageJsonPath, '.'], { cwd: dir });
await render('cp', [configPath, '.'], { cwd: dir });
await wait(3000); // Avoid flakiness by waiting
}
describe('cli flow to run "oco commitlint force"', () => {
it('on commitlint@9 using CJS', async () => {
const { gitDir, cleanup } = await prepareEnvironment();
await setupCommitlint(gitDir, 9);
const npmList = await render('npm', ['list', '@commitlint/load'], {
cwd: gitDir
});
expect(await npmList.findByText('@commitlint/load@9')).toBeInTheConsole();
const { findByText } = await render(
`
OCO_TEST_MOCK_TYPE='prompt-module-commitlint-config' \
OCO_PROMPT_MODULE='@commitlint' \
OCO_AI_PROVIDER='test' \
node ${resolve('./out/cli.cjs')} commitlint force \
`,
[],
{ cwd: gitDir }
);
expect(
await findByText('opencommit — configure @commitlint')
).toBeInTheConsole();
expect(
await findByText('Read @commitlint configuration')
).toBeInTheConsole();
expect(
await findByText('Generating consistency with given @commitlint rules')
).toBeInTheConsole();
expect(
await findByText('Done - please review contents of')
).toBeInTheConsole();
await cleanup();
});
it('on commitlint@18 using CJS', async () => {
const { gitDir, cleanup } = await prepareEnvironment();
await setupCommitlint(gitDir, 18);
const npmList = await render('npm', ['list', '@commitlint/load'], {
cwd: gitDir
});
expect(await npmList.findByText('@commitlint/load@18')).toBeInTheConsole();
const { findByText } = await render(
`
OCO_TEST_MOCK_TYPE='prompt-module-commitlint-config' \
OCO_PROMPT_MODULE='@commitlint' \
OCO_AI_PROVIDER='test' \
node ${resolve('./out/cli.cjs')} commitlint force \
`,
[],
{ cwd: gitDir }
);
expect(
await findByText('opencommit — configure @commitlint')
).toBeInTheConsole();
expect(
await findByText('Read @commitlint configuration')
).toBeInTheConsole();
expect(
await findByText('Generating consistency with given @commitlint rules')
).toBeInTheConsole();
expect(
await findByText('Done - please review contents of')
).toBeInTheConsole();
await cleanup();
});
it('on commitlint@19 using ESM', async () => {
const { gitDir, cleanup } = await prepareEnvironment();
await setupCommitlint(gitDir, 19);
const npmList = await render('npm', ['list', '@commitlint/load'], {
cwd: gitDir
});
expect(await npmList.findByText('@commitlint/load@19')).toBeInTheConsole();
const { findByText } = await render(
`
OCO_TEST_MOCK_TYPE='prompt-module-commitlint-config' \
OCO_PROMPT_MODULE='@commitlint' \
OCO_AI_PROVIDER='test' \
node ${resolve('./out/cli.cjs')} commitlint force \
`,
[],
{ cwd: gitDir }
);
expect(
await findByText('opencommit — configure @commitlint')
).toBeInTheConsole();
expect(
await findByText('Read @commitlint configuration')
).toBeInTheConsole();
expect(
await findByText('Generating consistency with given @commitlint rules')
).toBeInTheConsole();
expect(
await findByText('Done - please review contents of')
).toBeInTheConsole();
await cleanup();
});
});
describe('cli flow to generate commit message using @commitlint prompt-module', () => {
it('on commitlint@19 using ESM', async () => {
const { gitDir, cleanup } = await prepareEnvironment();
// Setup commitlint@19
await setupCommitlint(gitDir, 19);
const npmList = await render('npm', ['list', '@commitlint/load'], {
cwd: gitDir
});
expect(await npmList.findByText('@commitlint/load@19')).toBeInTheConsole();
// Run `oco commitlint force`
const commitlintForce = await render(
`
OCO_TEST_MOCK_TYPE='prompt-module-commitlint-config' \
OCO_PROMPT_MODULE='@commitlint' \
OCO_AI_PROVIDER='test' \
node ${resolve('./out/cli.cjs')} commitlint force \
`,
[],
{ cwd: gitDir }
);
expect(
await commitlintForce.findByText('Done - please review contents of')
).toBeInTheConsole();
// Run `oco commitlint get`
const commitlintGet = await render(
`
OCO_TEST_MOCK_TYPE='prompt-module-commitlint-config' \
OCO_PROMPT_MODULE='@commitlint' \
OCO_AI_PROVIDER='test' \
node ${resolve('./out/cli.cjs')} commitlint get \
`,
[],
{ cwd: gitDir }
);
expect(await commitlintGet.findByText('consistency')).toBeInTheConsole();
// Run 'oco' using .opencommit-commitlint
await render('echo', [`'console.log("Hello World");' > index.ts`], {
cwd: gitDir
});
await render('git', ['add index.ts'], { cwd: gitDir });
const oco = await render(
`
OCO_TEST_MOCK_TYPE='commit-message' \
OCO_PROMPT_MODULE='@commitlint' \
OCO_AI_PROVIDER='test' \
node ${resolve('./out/cli.cjs')} \
`,
[],
{ cwd: gitDir }
);
expect(
await oco.findByText('Generating the commit message')
).toBeInTheConsole();
expect(
await oco.findByText('Confirm the commit message?')
).toBeInTheConsole();
oco.userEvent.keyboard('[Enter]');
expect(
await oco.findByText('Choose a remote to push to')
).toBeInTheConsole();
oco.userEvent.keyboard('[Enter]');
expect(
await oco.findByText('Successfully pushed all commits to origin')
).toBeInTheConsole();
await cleanup();
});
});

View File

@@ -0,0 +1,3 @@
module.exports = {
extends: ['@commitlint/config-conventional']
};

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,15 @@
{
"name": "commitlint-test",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "",
"license": "ISC",
"devDependencies": {
"@commitlint/cli": "^18.0.0",
"@commitlint/config-conventional": "^18.0.0"
}
}

View File

@@ -0,0 +1,3 @@
export default {
extends: ['@commitlint/config-conventional']
};

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,16 @@
{
"name": "commitlint-test",
"version": "1.0.0",
"description": "",
"main": "index.js",
"type": "module",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "",
"license": "ISC",
"devDependencies": {
"@commitlint/cli": "^19.0.0",
"@commitlint/config-conventional": "^19.0.0"
}
}

View File

@@ -0,0 +1,3 @@
module.exports = {
extends: ['@commitlint/config-conventional']
};

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,15 @@
{
"name": "commitlint-test",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "",
"license": "ISC",
"devDependencies": {
"@commitlint/cli": "^9.0.0",
"@commitlint/config-conventional": "^9.0.0"
}
}

11
test/e2e/setup.sh Executable file
View File

@@ -0,0 +1,11 @@
#!/bin/sh
current_dir=$(pwd)
setup_dir="$(cd "$(dirname "$0")" && pwd)"
# Set up for prompt-module/commitlint
cd $setup_dir && cd prompt-module/data/commitlint_9 && npm ci
cd $setup_dir && cd prompt-module/data/commitlint_18 && npm ci
cd $setup_dir && cd prompt-module/data/commitlint_19 && npm ci
cd $current_dir

View File

@@ -29,3 +29,5 @@ export const prepareEnvironment = async (): Promise<{
cleanup,
}
}
export const wait = (ms: number) => new Promise(resolve => setTimeout(resolve, ms));

View File

@@ -1,5 +1,8 @@
import 'cli-testing-library/extend-expect'
import { configure } from 'cli-testing-library'
import { jest } from '@jest/globals';
global.jest = jest;
/**
* Adjusted the wait time for waitFor/findByText to 2000ms, because the default 1000ms makes the test results flaky

View File

@@ -1,8 +1,17 @@
import { getConfig } from '../../src/commands/config';
import { existsSync, readFileSync, rmSync } from 'fs';
import {
DEFAULT_CONFIG,
getConfig,
setConfig
} from '../../src/commands/config';
import { prepareFile } from './utils';
import { dirname } from 'path';
describe('getConfig', () => {
describe('config', () => {
const originalEnv = { ...process.env };
let globalConfigFile: { filePath: string; cleanup: () => Promise<void> };
let envConfigFile: { filePath: string; cleanup: () => Promise<void> };
function resetEnv(env: NodeJS.ProcessEnv) {
Object.keys(process.env).forEach((key) => {
if (!(key in env)) {
@@ -13,93 +22,274 @@ describe('getConfig', () => {
});
}
beforeEach(() => {
beforeEach(async () => {
resetEnv(originalEnv);
if (globalConfigFile) await globalConfigFile.cleanup();
if (envConfigFile) await envConfigFile.cleanup();
});
afterEach(async () => {
if (globalConfigFile) await globalConfigFile.cleanup();
if (envConfigFile) await envConfigFile.cleanup();
});
afterAll(() => {
resetEnv(originalEnv);
});
it('return config values from the global config file', async () => {
const configFile = await prepareFile(
'.opencommit',
`
OCO_OPENAI_API_KEY="sk-key"
OCO_ANTHROPIC_API_KEY="secret-key"
OCO_TOKENS_MAX_INPUT="8192"
OCO_TOKENS_MAX_OUTPUT="1000"
OCO_OPENAI_BASE_PATH="/openai/api"
OCO_DESCRIPTION="true"
OCO_EMOJI="true"
OCO_MODEL="gpt-4"
OCO_LANGUAGE="de"
OCO_MESSAGE_TEMPLATE_PLACEHOLDER="$m"
OCO_PROMPT_MODULE="@commitlint"
OCO_AI_PROVIDER="ollama"
OCO_GITPUSH="false"
OCO_ONE_LINE_COMMIT="true"
`
);
const config = getConfig({ configPath: configFile.filePath, envPath: '' });
const generateConfig = async (
fileName: string,
content: Record<string, string>
) => {
const fileContent = Object.entries(content)
.map(([key, value]) => `${key}="${value}"`)
.join('\n');
return await prepareFile(fileName, fileContent);
};
expect(config).not.toEqual(null);
expect(config!['OCO_OPENAI_API_KEY']).toEqual('sk-key');
expect(config!['OCO_ANTHROPIC_API_KEY']).toEqual('secret-key');
expect(config!['OCO_TOKENS_MAX_INPUT']).toEqual(8192);
expect(config!['OCO_TOKENS_MAX_OUTPUT']).toEqual(1000);
expect(config!['OCO_OPENAI_BASE_PATH']).toEqual('/openai/api');
expect(config!['OCO_DESCRIPTION']).toEqual(true);
expect(config!['OCO_EMOJI']).toEqual(true);
expect(config!['OCO_MODEL']).toEqual('gpt-4');
expect(config!['OCO_LANGUAGE']).toEqual('de');
expect(config!['OCO_MESSAGE_TEMPLATE_PLACEHOLDER']).toEqual('$m');
expect(config!['OCO_PROMPT_MODULE']).toEqual('@commitlint');
expect(config!['OCO_AI_PROVIDER']).toEqual('ollama');
expect(config!['OCO_GITPUSH']).toEqual(false);
expect(config!['OCO_ONE_LINE_COMMIT']).toEqual(true);
describe('getConfig', () => {
it('should prioritize local .env over global .opencommit config', async () => {
globalConfigFile = await generateConfig('.opencommit', {
OCO_OPENAI_API_KEY: 'global-key',
OCO_MODEL: 'gpt-3.5-turbo',
OCO_LANGUAGE: 'en'
});
await configFile.cleanup();
envConfigFile = await generateConfig('.env', {
OCO_OPENAI_API_KEY: 'local-key',
OCO_ANTHROPIC_API_KEY: 'local-anthropic-key',
OCO_LANGUAGE: 'fr'
});
const config = getConfig({
globalPath: globalConfigFile.filePath,
envPath: envConfigFile.filePath
});
expect(config).not.toEqual(null);
expect(config.OCO_OPENAI_API_KEY).toEqual('local-key');
expect(config.OCO_MODEL).toEqual('gpt-3.5-turbo');
expect(config.OCO_LANGUAGE).toEqual('fr');
expect(config.OCO_ANTHROPIC_API_KEY).toEqual('local-anthropic-key');
});
it('should fallback to global config when local config is not set', async () => {
globalConfigFile = await generateConfig('.opencommit', {
OCO_OPENAI_API_KEY: 'global-key',
OCO_MODEL: 'gpt-4',
OCO_LANGUAGE: 'de',
OCO_DESCRIPTION: 'true'
});
envConfigFile = await generateConfig('.env', {
OCO_ANTHROPIC_API_KEY: 'local-anthropic-key'
});
const config = getConfig({
globalPath: globalConfigFile.filePath,
envPath: envConfigFile.filePath
});
expect(config).not.toEqual(null);
expect(config.OCO_OPENAI_API_KEY).toEqual('global-key');
expect(config.OCO_ANTHROPIC_API_KEY).toEqual('local-anthropic-key');
expect(config.OCO_MODEL).toEqual('gpt-4');
expect(config.OCO_LANGUAGE).toEqual('de');
expect(config.OCO_DESCRIPTION).toEqual(true);
});
it('should handle boolean and numeric values correctly', async () => {
globalConfigFile = await generateConfig('.opencommit', {
OCO_TOKENS_MAX_INPUT: '4096',
OCO_TOKENS_MAX_OUTPUT: '500',
OCO_GITPUSH: 'true'
});
envConfigFile = await generateConfig('.env', {
OCO_TOKENS_MAX_INPUT: '8192',
OCO_ONE_LINE_COMMIT: 'false'
});
const config = getConfig({
globalPath: globalConfigFile.filePath,
envPath: envConfigFile.filePath
});
expect(config).not.toEqual(null);
expect(config.OCO_TOKENS_MAX_INPUT).toEqual(8192);
expect(config.OCO_TOKENS_MAX_OUTPUT).toEqual(500);
expect(config.OCO_GITPUSH).toEqual(true);
expect(config.OCO_ONE_LINE_COMMIT).toEqual(false);
});
it('should handle empty local config correctly', async () => {
globalConfigFile = await generateConfig('.opencommit', {
OCO_OPENAI_API_KEY: 'global-key',
OCO_MODEL: 'gpt-4',
OCO_LANGUAGE: 'es'
});
envConfigFile = await generateConfig('.env', {});
const config = getConfig({
globalPath: globalConfigFile.filePath,
envPath: envConfigFile.filePath
});
expect(config).not.toEqual(null);
expect(config.OCO_OPENAI_API_KEY).toEqual('global-key');
expect(config.OCO_MODEL).toEqual('gpt-4');
expect(config.OCO_LANGUAGE).toEqual('es');
});
it('should override global config with null values in local .env', async () => {
globalConfigFile = await generateConfig('.opencommit', {
OCO_OPENAI_API_KEY: 'global-key',
OCO_MODEL: 'gpt-4',
OCO_LANGUAGE: 'es'
});
envConfigFile = await generateConfig('.env', {
OCO_OPENAI_API_KEY: 'null'
});
const config = getConfig({
globalPath: globalConfigFile.filePath,
envPath: envConfigFile.filePath
});
expect(config).not.toEqual(null);
expect(config.OCO_OPENAI_API_KEY).toEqual(null);
});
it('should handle empty global config', async () => {
globalConfigFile = await generateConfig('.opencommit', {});
envConfigFile = await generateConfig('.env', {});
const config = getConfig({
globalPath: globalConfigFile.filePath,
envPath: envConfigFile.filePath
});
expect(config).not.toEqual(null);
expect(config.OCO_OPENAI_API_KEY).toEqual(undefined);
});
});
it('return config values from the local env file', async () => {
const envFile = await prepareFile(
'.env',
`
OCO_OPENAI_API_KEY="sk-key"
OCO_ANTHROPIC_API_KEY="secret-key"
OCO_TOKENS_MAX_INPUT="8192"
OCO_TOKENS_MAX_OUTPUT="1000"
OCO_OPENAI_BASE_PATH="/openai/api"
OCO_DESCRIPTION="true"
OCO_EMOJI="true"
OCO_MODEL="gpt-4"
OCO_LANGUAGE="de"
OCO_MESSAGE_TEMPLATE_PLACEHOLDER="$m"
OCO_PROMPT_MODULE="@commitlint"
OCO_AI_PROVIDER="ollama"
OCO_GITPUSH="false"
OCO_ONE_LINE_COMMIT="true"
`
);
const config = getConfig({ configPath: '', envPath: envFile.filePath });
describe('setConfig', () => {
beforeEach(async () => {
// we create and delete the file to have the parent directory, but not the file, to test the creation of the file
globalConfigFile = await generateConfig('.opencommit', {});
rmSync(globalConfigFile.filePath);
});
expect(config).not.toEqual(null);
expect(config!['OCO_OPENAI_API_KEY']).toEqual('sk-key');
expect(config!['OCO_ANTHROPIC_API_KEY']).toEqual('secret-key');
expect(config!['OCO_TOKENS_MAX_INPUT']).toEqual(8192);
expect(config!['OCO_TOKENS_MAX_OUTPUT']).toEqual(1000);
expect(config!['OCO_OPENAI_BASE_PATH']).toEqual('/openai/api');
expect(config!['OCO_DESCRIPTION']).toEqual(true);
expect(config!['OCO_EMOJI']).toEqual(true);
expect(config!['OCO_MODEL']).toEqual('gpt-4');
expect(config!['OCO_LANGUAGE']).toEqual('de');
expect(config!['OCO_MESSAGE_TEMPLATE_PLACEHOLDER']).toEqual('$m');
expect(config!['OCO_PROMPT_MODULE']).toEqual('@commitlint');
expect(config!['OCO_AI_PROVIDER']).toEqual('ollama');
expect(config!['OCO_GITPUSH']).toEqual(false);
expect(config!['OCO_ONE_LINE_COMMIT']).toEqual(true);
it('should create .opencommit file with DEFAULT CONFIG if it does not exist on first setConfig run', async () => {
const isGlobalConfigFileExist = existsSync(globalConfigFile.filePath);
expect(isGlobalConfigFileExist).toBe(false);
await envFile.cleanup();
await setConfig(
[['OCO_OPENAI_API_KEY', 'persisted-key_1']],
globalConfigFile.filePath
);
const fileContent = readFileSync(globalConfigFile.filePath, 'utf8');
expect(fileContent).toContain('OCO_OPENAI_API_KEY=persisted-key_1');
Object.entries(DEFAULT_CONFIG).forEach(([key, value]) => {
expect(fileContent).toContain(`${key}=${value}`);
});
});
it('should set new config values', async () => {
globalConfigFile = await generateConfig('.opencommit', {});
await setConfig(
[
['OCO_OPENAI_API_KEY', 'new-key'],
['OCO_MODEL', 'gpt-4']
],
globalConfigFile.filePath
);
const config = getConfig({ globalPath: globalConfigFile.filePath });
expect(config.OCO_OPENAI_API_KEY).toEqual('new-key');
expect(config.OCO_MODEL).toEqual('gpt-4');
});
it('should update existing config values', async () => {
globalConfigFile = await generateConfig('.opencommit', {
OCO_OPENAI_API_KEY: 'initial-key'
});
await setConfig(
[['OCO_OPENAI_API_KEY', 'updated-key']],
globalConfigFile.filePath
);
const config = getConfig({ globalPath: globalConfigFile.filePath });
expect(config.OCO_OPENAI_API_KEY).toEqual('updated-key');
});
it('should handle boolean and numeric values correctly', async () => {
globalConfigFile = await generateConfig('.opencommit', {});
await setConfig(
[
['OCO_TOKENS_MAX_INPUT', '8192'],
['OCO_DESCRIPTION', 'true'],
['OCO_ONE_LINE_COMMIT', 'false']
],
globalConfigFile.filePath
);
const config = getConfig({ globalPath: globalConfigFile.filePath });
expect(config.OCO_TOKENS_MAX_INPUT).toEqual(8192);
expect(config.OCO_DESCRIPTION).toEqual(true);
expect(config.OCO_ONE_LINE_COMMIT).toEqual(false);
});
it('should throw an error for unsupported config keys', async () => {
globalConfigFile = await generateConfig('.opencommit', {});
try {
await setConfig(
[['UNSUPPORTED_KEY', 'value']],
globalConfigFile.filePath
);
throw new Error('NEVER_REACHED');
} catch (error) {
expect(error.message).toContain(
'Unsupported config key: UNSUPPORTED_KEY'
);
expect(error.message).not.toContain('NEVER_REACHED');
}
});
it('should persist changes to the config file', async () => {
const isGlobalConfigFileExist = existsSync(globalConfigFile.filePath);
expect(isGlobalConfigFileExist).toBe(false);
await setConfig(
[['OCO_OPENAI_API_KEY', 'persisted-key']],
globalConfigFile.filePath
);
const fileContent = readFileSync(globalConfigFile.filePath, 'utf8');
expect(fileContent).toContain('OCO_OPENAI_API_KEY=persisted-key');
});
it('should set multiple configs in a row and keep the changes', async () => {
const isGlobalConfigFileExist = existsSync(globalConfigFile.filePath);
expect(isGlobalConfigFileExist).toBe(false);
await setConfig(
[['OCO_OPENAI_API_KEY', 'persisted-key']],
globalConfigFile.filePath
);
const fileContent1 = readFileSync(globalConfigFile.filePath, 'utf8');
expect(fileContent1).toContain('OCO_OPENAI_API_KEY=persisted-key');
await setConfig([['OCO_MODEL', 'gpt-4']], globalConfigFile.filePath);
const fileContent2 = readFileSync(globalConfigFile.filePath, 'utf8');
expect(fileContent2).toContain('OCO_MODEL=gpt-4');
});
});
});

98
test/unit/gemini.test.ts Normal file
View File

@@ -0,0 +1,98 @@
import { Gemini } from '../../src/engine/gemini';
import { GenerativeModel, GoogleGenerativeAI } from '@google/generative-ai';
import {
ConfigType,
getConfig,
OCO_AI_PROVIDER_ENUM
} from '../../src/commands/config';
import { OpenAI } from 'openai';
describe('Gemini', () => {
let gemini: Gemini;
let mockConfig: ConfigType;
let mockGoogleGenerativeAi: GoogleGenerativeAI;
let mockGenerativeModel: GenerativeModel;
let mockExit: jest.SpyInstance<never, [code?: number | undefined], any>;
const noop: (...args: any[]) => any = (...args: any[]) => {};
const mockGemini = () => {
mockConfig = getConfig() as ConfigType;
gemini = new Gemini({
apiKey: mockConfig.OCO_GEMINI_API_KEY,
model: mockConfig.OCO_MODEL
});
};
const oldEnv = process.env;
beforeEach(() => {
jest.resetModules();
process.env = { ...oldEnv };
jest.mock('@google/generative-ai');
jest.mock('../src/commands/config');
jest.mock('@clack/prompts', () => ({
intro: jest.fn(),
outro: jest.fn()
}));
mockExit = jest.spyOn(process, 'exit').mockImplementation();
mockConfig = getConfig() as ConfigType;
mockConfig.OCO_AI_PROVIDER = OCO_AI_PROVIDER_ENUM.GEMINI;
mockConfig.OCO_GEMINI_API_KEY = 'mock-api-key';
mockConfig.OCO_MODEL = 'gemini-1.5-flash';
mockGoogleGenerativeAi = new GoogleGenerativeAI(
mockConfig.OCO_GEMINI_API_KEY
);
mockGenerativeModel = mockGoogleGenerativeAi.getGenerativeModel({
model: mockConfig.OCO_MODEL
});
});
afterEach(() => {
gemini = undefined as any;
});
afterAll(() => {
mockExit.mockRestore();
process.env = oldEnv;
});
it.skip('should exit process if OCO_GEMINI_API_KEY is not set and command is not config', () => {
process.env.OCO_GEMINI_API_KEY = undefined;
process.env.OCO_AI_PROVIDER = 'gemini';
mockGemini();
expect(mockExit).toHaveBeenCalledWith(1);
});
it('should generate commit message', async () => {
const mockGenerateContent = jest
.fn()
.mockResolvedValue({ response: { text: () => 'generated content' } });
mockGenerativeModel.generateContent = mockGenerateContent;
mockGemini();
const messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam> =
[
{ role: 'system', content: 'system message' },
{ role: 'assistant', content: 'assistant message' }
];
jest
.spyOn(gemini, 'generateCommitMessage')
.mockImplementation(async () => 'generated content');
const result = await gemini.generateCommitMessage(messages);
expect(result).toEqual('generated content');
});
});

View File

@@ -1,7 +1,7 @@
import path from 'path';
import { mkdtemp, rm, writeFile } from 'fs';
import { promisify } from 'util';
import { existsSync, mkdtemp, rm, writeFile } from 'fs';
import { tmpdir } from 'os';
import path from 'path';
import { promisify } from 'util';
const fsMakeTempDir = promisify(mkdtemp);
const fsRemove = promisify(rm);
const fsWriteFile = promisify(writeFile);
@@ -20,8 +20,11 @@ export async function prepareFile(
const filePath = path.resolve(tempDir, fileName);
await fsWriteFile(filePath, content);
const cleanup = async () => {
return fsRemove(tempDir, { recursive: true });
if (existsSync(tempDir)) {
await fsRemove(tempDir, { recursive: true });
}
};
return {
filePath,
cleanup

View File

@@ -1,12 +1,12 @@
{
"compilerOptions": {
"target": "ESNext",
"lib": ["ES5", "ES6"],
"target": "ES2020",
"lib": ["ES6", "ES2020"],
"module": "CommonJS",
"module": "ESNext",
// "rootDir": "./src",
"resolveJsonModule": true,
"moduleResolution": "node",
"moduleResolution": "Node",
"allowJs": true,
@@ -21,9 +21,7 @@
"skipLibCheck": true
},
"include": [
"test/jest-setup.ts"
],
"include": ["test/jest-setup.ts"],
"exclude": ["node_modules"],
"ts-node": {
"esm": true,