Compare commits

...

24 Commits

Author SHA1 Message Date
di-sukharev
676c2b20c1 update package-lock.json 2024-08-27 16:58:22 +03:00
di-sukharev
21da102f6d Merge remote-tracking branch 'origin/dev' into refactoring_v1 2024-08-27 16:57:54 +03:00
di-sukharev
6ae7ce2720 chore(config.ts): remove debug console log to clean up the code and improve readability 2024-08-27 16:49:17 +03:00
di-sukharev
e44610fea3 chore(.gitignore): add notes.md to .gitignore to prevent tracking of notes files 2024-08-27 16:49:03 +03:00
di-sukharev
9dcb264420 test(config.test.ts): refactor generateConfig function to accept an object for content to improve readability and maintainability 2024-08-20 21:36:00 +03:00
di-sukharev
dd7fdba94e fix(config.ts): revert OCO_GITPUSH to its original position in the config object for clarity
refactor(config.ts): rename configFromEnv to envConfig for better readability
refactor(gemini.ts): simplify client initialization in the Gemini constructor
test(config.test.ts): add test case to check overriding global config with null values in local .env
test(gemini.test.ts): update AI provider assignment to use OCO_AI_PROVIDER_ENUM for consistency
2024-08-20 21:32:16 +03:00
di-sukharev
5fa12e2d4a feat(config): export OCO_AI_PROVIDER_ENUM to allow external access to AI provider constants
refactor(config): simplify mergeObjects function to improve readability and maintainability
refactor(setConfig): remove unnecessary keysToSet variable to streamline logging
refactor(engine): update switch cases to use OCO_AI_PROVIDER_ENUM for better consistency and clarity
2024-08-20 15:37:41 +03:00
di-sukharev
42a36492ad build 2024-08-20 15:37:33 +03:00
di-sukharev
443d27fc8d chore(docs): mark "Push to git" section in README as deprecated to inform users of upcoming changes
refactor(commit.ts): remove early return for non-pushing users to streamline commit process
refactor(config.ts): add deprecation comments for OCO_GITPUSH to indicate future removal
test(config.test.ts): enhance tests to ensure correct handling of local and global config priorities
test(gemini.test.ts): improve tests for Gemini class to ensure proper functionality and error handling
2024-08-20 15:34:09 +03:00
di-sukharev
04991dd00f fix(engine.ts): include DEFAULT_CONFIG in Gemini and Azure engine instantiation to ensure consistent configuration across engines 2024-08-20 12:58:00 +03:00
di-sukharev
3ded6062c1 fix: remove optional chaining from config access to ensure compatibility and prevent potential runtime errors
refactor(flowise.ts, ollama.ts): update axios client configuration to use a consistent URL format for API requests
fix: update README example to reflect the removal of optional chaining in config access
2024-08-20 12:32:40 +03:00
di-sukharev
f8584e7b78 refactor(engine): rename basePath to baseURL for consistency across interfaces and implementations
fix(engine): update Azure and Flowise engines to use baseURL instead of basePath for API configuration
fix(engine): adjust Ollama engine to handle baseURL and fallback to default URL
style(engine): clean up constructor formatting in OpenAiEngine for better readability
chore(engine): update getEngine function to use baseURL in configuration for all engines
2024-08-20 12:21:13 +03:00
di-sukharev
94faceefd3 remove mb confusing line 2024-08-20 12:06:01 +03:00
di-sukharev
720cd6f9c1 clear readme 2024-08-20 12:05:15 +03:00
di-sukharev
b6a92d557f docs(README.md): update author section and clarify API key storage details
docs(README.md): improve instructions for using OpenCommit CLI and configuration
fix(README.md): correct default model name to gpt-4o-mini in usage examples
fix(package.json): update openai package version to 4.56.0 for compatibility
2024-08-20 12:04:07 +03:00
di-sukharev
71354e4687 feat: add CommandsEnum to define command constants for better maintainability
refactor(generateCommitMessageFromGitDiff): update types for OpenAI messages to improve type safety
fix(commitlint/config): remove optional chaining for OCO_LANGUAGE to ensure proper access
refactor(commitlint/prompts): update types for OpenAI messages to improve type safety
refactor(prompts): update types for OpenAI messages to improve type safety
2024-08-20 12:03:40 +03:00
di-sukharev
8f85ee8f8e refactor(testAi.ts): update import statements to use OpenAI type for better clarity and maintainability
fix(testAi.ts): change parameter type in generateCommitMessage method to align with OpenAI's updated type definitions
2024-08-20 12:01:51 +03:00
di-sukharev
f9103a3c6a build 2024-08-20 12:01:38 +03:00
di-sukharev
4afd7de7a8 feat(commands): add COMMANDS enum to standardize command names across the application
refactor(commit.ts): restructure generateCommitMessageFromGitDiff function to use an interface for parameters and improve readability
fix(config.ts): update DEFAULT_TOKEN_LIMITS to correct values for max tokens input and output
chore(config.ts): enhance config validation to handle undefined and null values more effectively
style(commit.ts): improve formatting and consistency in the commit confirmation logic
style(config.ts): clean up error messages and improve clarity in config setting process
2024-08-20 12:01:14 +03:00
di-sukharev
5cfa3cded2 feat(engine): refactor AI engine interfaces and implementations to support multiple AI providers and improve configurability
- Introduce `AiEngineConfig` interface for consistent configuration across AI engines.
- Update `generateCommitMessage` method signatures to use `OpenAIClient.Chat.Completions.ChatCompletionMessageParam`.
- Implement specific configurations for each AI provider (Anthropic, Azure, Gemini, Ollama, OpenAI) to enhance flexibility.
- Replace hardcoded values with configurable parameters for model, API key, and token limits.
- Refactor client initialization to use Axios instances for better HTTP request handling.
- Remove deprecated code and improve error handling for better user feedback.
2024-08-20 11:58:19 +03:00
di-sukharev
bb0b0e804e build 2024-08-20 11:56:44 +03:00
di-sukharev
5d87cc514b feat(ENUMS.ts): add ENUMS file to centralize command constants
refactor(commitlint.ts): update import path to use ENUMS for command constants
refactor(config.ts): update import path to use ENUMS for command constants
refactor(githook.ts): update import path to use ENUMS for command constants
fix(prompts.ts): correct conventional commit keywords instruction text
2024-08-19 14:09:27 +03:00
di-sukharev
6f4e8fde93 docs(README.md): update usage examples to remove redundant 'opencommit' command
chore(example.txt): remove unused example.txt file
fix(config.ts): correct import order and improve validation messages
fix(githook.ts): improve error message for unsupported mode
fix(azure.ts): add non-null assertion for message content
fix(gemini.ts): use strict equality for role comparison
refactor(generateCommitMessageFromGitDiff.ts): reorder imports for consistency
refactor(github-action.ts): reorder imports for consistency
refactor(prompts.ts): simplify prompt content generation and improve readability
style(engine.ts): fix inconsistent spacing and import order
2024-08-19 14:00:08 +03:00
di-sukharev
745bb5218f update imports 2024-08-19 13:09:46 +03:00
30 changed files with 41777 additions and 38400 deletions

1
.gitignore vendored
View File

@@ -11,3 +11,4 @@ uncaughtExceptions.log
src/*.json src/*.json
.idea .idea
test.ts test.ts
notes.md

View File

@@ -2,7 +2,7 @@
<div> <div>
<img src=".github/logo-grad.svg" alt="OpenCommit logo"/> <img src=".github/logo-grad.svg" alt="OpenCommit logo"/>
<h1 align="center">OpenCommit</h1> <h1 align="center">OpenCommit</h1>
<h4 align="center">Follow the bird <a href="https://twitter.com/_sukharev_"><img src="https://img.shields.io/twitter/follow/_sukharev_?style=flat&label=_sukharev_&logo=twitter&color=0bf&logoColor=fff" align="center"></a> <h4 align="center">Author <a href="https://twitter.com/_sukharev_"><img src="https://img.shields.io/twitter/follow/_sukharev_?style=flat&label=_sukharev_&logo=twitter&color=0bf&logoColor=fff" align="center"></a>
</div> </div>
<h2>Auto-generate meaningful commits in a second</h2> <h2>Auto-generate meaningful commits in a second</h2>
<p>Killing lame commits with AI 🤯🔫</p> <p>Killing lame commits with AI 🤯🔫</p>
@@ -16,7 +16,7 @@
<img src=".github/opencommit-example.png" alt="OpenCommit example"/> <img src=".github/opencommit-example.png" alt="OpenCommit example"/>
</div> </div>
All the commits in this repo are authored by OpenCommit — look at [the commits](https://github.com/di-sukharev/opencommit/commit/eae7618d575ee8d2e9fff5de56da79d40c4bc5fc) to see how OpenCommit works. Emojis and long commit descriptions are configurable. All the commits in this repo are authored by OpenCommit — look at [the commits](https://github.com/di-sukharev/opencommit/commit/eae7618d575ee8d2e9fff5de56da79d40c4bc5fc) to see how OpenCommit works. Emojis and long commit descriptions are configurable, basically everything is.
## Setup OpenCommit as a CLI tool ## Setup OpenCommit as a CLI tool
@@ -58,6 +58,8 @@ git add <files...>
oco oco
``` ```
Running `git add` is optional, `oco` will do it for you.
### Running locally with Ollama ### Running locally with Ollama
You can also run it with local model through ollama: You can also run it with local model through ollama:
@@ -68,20 +70,21 @@ You can also run it with local model through ollama:
```sh ```sh
git add <files...> git add <files...>
OCO_AI_PROVIDER='ollama' opencommit oco config set OCO_AI_PROVIDER='ollama'
``` ```
If you want to use a model other than mistral, you can do so by setting the `OCO_AI_PROVIDER` environment variable as follows: If you want to use a model other than mistral (default), you can do so by setting the `OCO_AI_PROVIDER` environment variable as follows:
```sh ```sh
OCO_AI_PROVIDER='ollama/llama3:8b' opencommit oco config set OCO_AI_PROVIDER='ollama/llama3:8b'
``` ```
if you have ollama that is set up in docker/ on another machine with GPUs (not locally), you can change the default endpoint url. If you have ollama that is set up in docker/ on another machine with GPUs (not locally), you can change the default endpoint url.
You can do so by setting the `OCO_OLLAMA_API_URL` environment variable as follows: You can do so by setting the `OCO_OLLAMA_API_URL` environment variable as follows:
```sh ```sh
OCO_OLLAMA_API_URL='http://192.168.1.10:11434/api/chat' opencommit oco config set OCO_OLLAMA_API_URL='http://192.168.1.10:11434/api/chat'
``` ```
where 192.168.1.10 is example of endpoint URL, where you have ollama set up. where 192.168.1.10 is example of endpoint URL, where you have ollama set up.
@@ -95,6 +98,7 @@ There are multiple optional flags that can be used with the `oco` command:
Link to the GitMoji specification: https://gitmoji.dev/ Link to the GitMoji specification: https://gitmoji.dev/
This flag can only be used if the `OCO_EMOJI` configuration item is set to `true`. This flag allows users to use all emojis in the GitMoji specification, By default, the GitMoji full specification is set to `false`, which only includes 10 emojis (🐛✨📝🚀✅♻️⬆️🔧🌐💡). This flag can only be used if the `OCO_EMOJI` configuration item is set to `true`. This flag allows users to use all emojis in the GitMoji specification, By default, the GitMoji full specification is set to `false`, which only includes 10 emojis (🐛✨📝🚀✅♻️⬆️🔧🌐💡).
This is due to limit the number of tokens sent in each request. However, if you would like to use the full GitMoji specification, you can use the `--fgm` flag. This is due to limit the number of tokens sent in each request. However, if you would like to use the full GitMoji specification, you can use the `--fgm` flag.
``` ```
@@ -116,20 +120,24 @@ oco --yes
Create a `.env` file and add OpenCommit config variables there like this: Create a `.env` file and add OpenCommit config variables there like this:
```env ```env
...
OCO_OPENAI_API_KEY=<your OpenAI API token> OCO_OPENAI_API_KEY=<your OpenAI API token>
OCO_TOKENS_MAX_INPUT=<max model token limit (default: 4096)> OCO_TOKENS_MAX_INPUT=<max model token limit (default: 4096)>
OCO_TOKENS_MAX_OUTPUT=<max response tokens (default: 500)> OCO_TOKENS_MAX_OUTPUT=<max response tokens (default: 500)>
OCO_OPENAI_BASE_PATH=<may be used to set proxy path to OpenAI api> OCO_OPENAI_BASE_PATH=<may be used to set proxy path to OpenAI api>
OCO_DESCRIPTION=<postface a message with ~3 sentences description of the changes> OCO_DESCRIPTION=<postface a message with ~3 sentences description of the changes>
OCO_EMOJI=<boolean, add GitMoji> OCO_EMOJI=<boolean, add GitMoji>
OCO_MODEL=<either 'gpt-4o', 'gpt-4', 'gpt-4-turbo', 'gpt-3.5-turbo' (default), 'gpt-3.5-turbo-0125', 'gpt-4-1106-preview', 'gpt-4-turbo-preview' or 'gpt-4-0125-preview'> OCO_MODEL=<either 'gpt-4o', 'gpt-4', 'gpt-4-turbo', 'gpt-3.5-turbo' (default), 'gpt-3.5-turbo-0125', 'gpt-4-1106-preview', 'gpt-4-turbo-preview' or 'gpt-4-0125-preview' or any string basically, but it should be a valid model name>
OCO_LANGUAGE=<locale, scroll to the bottom to see options> OCO_LANGUAGE=<locale, scroll to the bottom to see options>
OCO_MESSAGE_TEMPLATE_PLACEHOLDER=<message template placeholder, default: '$msg'> OCO_MESSAGE_TEMPLATE_PLACEHOLDER=<message template placeholder, default: '$msg'>
OCO_PROMPT_MODULE=<either conventional-commit or @commitlint, default: conventional-commit> OCO_PROMPT_MODULE=<either conventional-commit or @commitlint, default: conventional-commit>
OCO_ONE_LINE_COMMIT=<one line commit message, default: false> OCO_ONE_LINE_COMMIT=<one line commit message, default: false>
OCO_AI_PROVIDER=<anthropic, azure, ollama or ollama/model default ollama model: mistral> OCO_AI_PROVIDER=<openai (default), anthropic, azure, ollama or ollama/model>
...
``` ```
This are not all the config options, but you get the point.
### Global config for all repos ### Global config for all repos
Local config still has more priority than Global config, but you may set `OCO_MODEL` and `OCO_LOCALE` globally and set local configs for `OCO_EMOJI` and `OCO_DESCRIPTION` per repo which is more convenient. Local config still has more priority than Global config, but you may set `OCO_MODEL` and `OCO_LOCALE` globally and set local configs for `OCO_EMOJI` and `OCO_DESCRIPTION` per repo which is more convenient.
@@ -137,7 +145,7 @@ Local config still has more priority than Global config, but you may set `OCO_MO
Simply set any of the variables above like this: Simply set any of the variables above like this:
```sh ```sh
oco config set OCO_MODEL=gpt-4o oco config set OCO_MODEL=gpt-4o-mini
``` ```
Configure [GitMoji](https://gitmoji.dev/) to preface a message. Configure [GitMoji](https://gitmoji.dev/) to preface a message.
@@ -152,20 +160,22 @@ To remove preface emojis:
oco config set OCO_EMOJI=false oco config set OCO_EMOJI=false
``` ```
Other config options are behaving the same.
### Switch to GPT-4 or other models ### Switch to GPT-4 or other models
By default, OpenCommit uses `gpt-4o` model. By default, OpenCommit uses `gpt-4o-mini` model.
You may switch to GPT-4 which performs better, but costs ~x15 times more 🤠 You may switch to gpt-4o which performs better, but costs more 🤠
```sh ```sh
oco config set OCO_MODEL=gpt-4 oco config set OCO_MODEL=gpt-4o
``` ```
or for as a cheaper option: or for as a cheaper option:
```sh ```sh
oco config set OCO_MODEL=gpt-4o-mini oco config set OCO_MODEL=gpt-3.5-turbo
``` ```
### Switch to Azure OpenAI ### Switch to Azure OpenAI
@@ -178,7 +188,7 @@ You could switch to [Azure OpenAI Service](https://learn.microsoft.com/azure/cog
opencommit config set OCO_AI_PROVIDER=azure opencommit config set OCO_AI_PROVIDER=azure
``` ```
Of course need to set 'OPENAI_API_KEY'. And also need to set the Of course need to set 'OCO_OPENAI_API_KEY'. And also need to set the
'OPENAI_BASE_PATH' for the endpoint and set the deployment name to 'OPENAI_BASE_PATH' for the endpoint and set the deployment name to
'model'. 'model'.
@@ -201,9 +211,9 @@ oco config set OCO_LANGUAGE=française
The default language setting is **English** The default language setting is **English**
All available languages are currently listed in the [i18n](https://github.com/di-sukharev/opencommit/tree/master/src/i18n) folder All available languages are currently listed in the [i18n](https://github.com/di-sukharev/opencommit/tree/master/src/i18n) folder
### Push to git ### Push to git (gonna be deprecated)
Pushing to git is on by default but if you would like to turn it off just use: A prompt to ushing to git is on by default but if you would like to turn it off just use:
```sh ```sh
oco config set OCO_GITPUSH=false oco config set OCO_GITPUSH=false
@@ -291,7 +301,7 @@ In our codebase, the implementation of this feature can be found in the followin
```javascript ```javascript
commitMessage = messageTemplate.replace( commitMessage = messageTemplate.replace(
config?.OCO_MESSAGE_TEMPLATE_PLACEHOLDER, config.OCO_MESSAGE_TEMPLATE_PLACEHOLDER,
commitMessage commitMessage
); );
``` ```
@@ -348,7 +358,7 @@ Or follow the process of your IDE Source Control feature, when it calls `git com
OpenCommit is now available as a GitHub Action which automatically improves all new commits messages when you push to remote! OpenCommit is now available as a GitHub Action which automatically improves all new commits messages when you push to remote!
This is great if you want to make sure all of the commits in all of your repository branches are meaningful and not lame like `fix1` or `done2`. This is great if you want to make sure all commits in all of your repository branches are meaningful and not lame like `fix1` or `done2`.
Create a file `.github/workflows/opencommit.yml` with the contents below: Create a file `.github/workflows/opencommit.yml` with the contents below:

View File

42215
out/cli.cjs

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

109
package-lock.json generated
View File

@@ -28,7 +28,7 @@
"ignore": "^5.2.4", "ignore": "^5.2.4",
"ini": "^3.0.1", "ini": "^3.0.1",
"inquirer": "^9.1.4", "inquirer": "^9.1.4",
"openai": "^3.2.1" "openai": "^4.56.0"
}, },
"bin": { "bin": {
"oco": "out/cli.cjs", "oco": "out/cli.cjs",
@@ -2657,9 +2657,9 @@
"integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q=="
}, },
"node_modules/axios": { "node_modules/axios": {
"version": "1.6.8", "version": "1.7.4",
"resolved": "https://registry.npmjs.org/axios/-/axios-1.6.8.tgz", "resolved": "https://registry.npmjs.org/axios/-/axios-1.7.4.tgz",
"integrity": "sha512-v/ZHtJDU39mDpyBoFVkETcd/uNdxrWRrg3bKpOKzXFA6Bvqopts6ALSMU3y6ijYxbw2B+wPrIv46egTzJXCLGQ==", "integrity": "sha512-DukmaFRnY6AzAALSH4J2M3k6PkaC+MfaAGdEERRWcC9q3/TWQwLpHR8ZRLKTdQ3aBDL64EdluRDjJqKw+BPZEw==",
"dependencies": { "dependencies": {
"follow-redirects": "^1.15.6", "follow-redirects": "^1.15.6",
"form-data": "^4.0.0", "form-data": "^4.0.0",
@@ -2891,12 +2891,12 @@
} }
}, },
"node_modules/braces": { "node_modules/braces": {
"version": "3.0.2", "version": "3.0.3",
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
"integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
"dev": true, "dev": true,
"dependencies": { "dependencies": {
"fill-range": "^7.0.1" "fill-range": "^7.1.1"
}, },
"engines": { "engines": {
"node": ">=8" "node": ">=8"
@@ -4546,9 +4546,9 @@
} }
}, },
"node_modules/fill-range": { "node_modules/fill-range": {
"version": "7.0.1", "version": "7.1.1",
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
"integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
"dev": true, "dev": true,
"dependencies": { "dependencies": {
"to-regex-range": "^5.0.1" "to-regex-range": "^5.0.1"
@@ -7154,6 +7154,25 @@
} }
} }
}, },
"node_modules/node-fetch/node_modules/tr46": {
"version": "0.0.3",
"resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
"integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="
},
"node_modules/node-fetch/node_modules/webidl-conversions": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
"integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="
},
"node_modules/node-fetch/node_modules/whatwg-url": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
"integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
"dependencies": {
"tr46": "~0.0.3",
"webidl-conversions": "^3.0.0"
}
},
"node_modules/node-int64": { "node_modules/node-int64": {
"version": "0.4.0", "version": "0.4.0",
"resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz",
@@ -7223,20 +7242,36 @@
} }
}, },
"node_modules/openai": { "node_modules/openai": {
"version": "3.3.0", "version": "4.56.0",
"resolved": "https://registry.npmjs.org/openai/-/openai-3.3.0.tgz", "resolved": "https://registry.npmjs.org/openai/-/openai-4.56.0.tgz",
"integrity": "sha512-uqxI/Au+aPRnsaQRe8CojU0eCR7I0mBiKjD3sNMzY6DaC1ZVrc85u98mtJW6voDug8fgGN+DIZmTDxTthxb7dQ==", "integrity": "sha512-zcag97+3bG890MNNa0DQD9dGmmTWL8unJdNkulZzWRXrl+QeD+YkBI4H58rJcwErxqGK6a0jVPZ4ReJjhDGcmw==",
"dependencies": { "dependencies": {
"axios": "^0.26.0", "@types/node": "^18.11.18",
"form-data": "^4.0.0" "@types/node-fetch": "^2.6.4",
"abort-controller": "^3.0.0",
"agentkeepalive": "^4.2.1",
"form-data-encoder": "1.7.2",
"formdata-node": "^4.3.2",
"node-fetch": "^2.6.7"
},
"bin": {
"openai": "bin/cli"
},
"peerDependencies": {
"zod": "^3.23.8"
},
"peerDependenciesMeta": {
"zod": {
"optional": true
}
} }
}, },
"node_modules/openai/node_modules/axios": { "node_modules/openai/node_modules/@types/node": {
"version": "0.26.1", "version": "18.19.45",
"resolved": "https://registry.npmjs.org/axios/-/axios-0.26.1.tgz", "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.45.tgz",
"integrity": "sha512-fPwcX4EvnSHuInCMItEhAGnaSEXRBjtzh9fOtsE6E1G6p7vl7edEeZe11QHf18+6+9gR5PbKV/sGKNaD8YaMeA==", "integrity": "sha512-VZxPKNNhjKmaC1SUYowuXSRSMGyQGmQjvvA1xE4QZ0xce2kLtEhPDS+kqpCPBZYgqblCLQ2DAjSzmgCM5auvhA==",
"dependencies": { "dependencies": {
"follow-redirects": "^1.14.8" "undici-types": "~5.26.4"
} }
}, },
"node_modules/optionator": { "node_modules/optionator": {
@@ -7648,6 +7683,7 @@
"version": "2.3.1", "version": "2.3.1",
"resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz",
"integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==",
"dev": true,
"engines": { "engines": {
"node": ">=6" "node": ">=6"
} }
@@ -8408,17 +8444,6 @@
"node": ">=8.0" "node": ">=8.0"
} }
}, },
"node_modules/tr46": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/tr46/-/tr46-4.1.1.tgz",
"integrity": "sha512-2lv/66T7e5yNyhAAC4NaKe5nVavzuGJQVVtRYLyQ2OI8tsJ61PMLlelehb0wi2Hx6+hT/OJUWZcw8MjlSRnxvw==",
"dependencies": {
"punycode": "^2.3.0"
},
"engines": {
"node": ">=14"
}
},
"node_modules/tree-kill": { "node_modules/tree-kill": {
"version": "1.2.2", "version": "1.2.2",
"resolved": "https://registry.npmjs.org/tree-kill/-/tree-kill-1.2.2.tgz", "resolved": "https://registry.npmjs.org/tree-kill/-/tree-kill-1.2.2.tgz",
@@ -8749,26 +8774,6 @@
"node": ">= 8" "node": ">= 8"
} }
}, },
"node_modules/webidl-conversions": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-7.0.0.tgz",
"integrity": "sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==",
"engines": {
"node": ">=12"
}
},
"node_modules/whatwg-url": {
"version": "13.0.0",
"resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-13.0.0.tgz",
"integrity": "sha512-9WWbymnqj57+XEuqADHrCJ2eSXzn8WXIW/YSGaZtb2WKAInQ6CHfaUUcTyyver0p8BDg5StLQq8h1vtZuwmOig==",
"dependencies": {
"tr46": "^4.1.1",
"webidl-conversions": "^7.0.0"
},
"engines": {
"node": ">=16"
}
},
"node_modules/which": { "node_modules/which": {
"version": "2.0.2", "version": "2.0.2",
"resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",

View File

@@ -81,8 +81,8 @@
"@actions/core": "^1.10.0", "@actions/core": "^1.10.0",
"@actions/exec": "^1.1.1", "@actions/exec": "^1.1.1",
"@actions/github": "^5.1.1", "@actions/github": "^5.1.1",
"@azure/openai": "^1.0.0-beta.12",
"@anthropic-ai/sdk": "^0.19.2", "@anthropic-ai/sdk": "^0.19.2",
"@azure/openai": "^1.0.0-beta.12",
"@clack/prompts": "^0.6.1", "@clack/prompts": "^0.6.1",
"@dqbd/tiktoken": "^1.0.2", "@dqbd/tiktoken": "^1.0.2",
"@google/generative-ai": "^0.11.4", "@google/generative-ai": "^0.11.4",
@@ -97,9 +97,6 @@
"ignore": "^5.2.4", "ignore": "^5.2.4",
"ini": "^3.0.1", "ini": "^3.0.1",
"inquirer": "^9.1.4", "inquirer": "^9.1.4",
"openai": "^3.2.1" "openai": "^4.56.0"
},
"overrides": {
"whatwg-url": "13.0.0"
} }
} }

5
src/commands/ENUMS.ts Normal file
View File

@@ -0,0 +1,5 @@
export enum COMMANDS {
config = 'config',
hook = 'hook',
commitlint = 'commitlint'
}

View File

@@ -1,6 +1,3 @@
import chalk from 'chalk';
import { execa } from 'execa';
import { import {
confirm, confirm,
intro, intro,
@@ -10,7 +7,8 @@ import {
select, select,
spinner spinner
} from '@clack/prompts'; } from '@clack/prompts';
import chalk from 'chalk';
import { execa } from 'execa';
import { generateCommitMessageByDiff } from '../generateCommitMessageFromGitDiff'; import { generateCommitMessageByDiff } from '../generateCommitMessageFromGitDiff';
import { import {
assertGitRepo, assertGitRepo,
@@ -32,18 +30,25 @@ const getGitRemotes = async () => {
// Check for the presence of message templates // Check for the presence of message templates
const checkMessageTemplate = (extraArgs: string[]): string | false => { const checkMessageTemplate = (extraArgs: string[]): string | false => {
for (const key in extraArgs) { for (const key in extraArgs) {
if (extraArgs[key].includes(config?.OCO_MESSAGE_TEMPLATE_PLACEHOLDER)) if (extraArgs[key].includes(config.OCO_MESSAGE_TEMPLATE_PLACEHOLDER))
return extraArgs[key]; return extraArgs[key];
} }
return false; return false;
}; };
const generateCommitMessageFromGitDiff = async ( interface GenerateCommitMessageFromGitDiffParams {
diff: string, diff: string;
extraArgs: string[], extraArgs: string[];
fullGitMojiSpec: boolean, fullGitMojiSpec?: boolean;
skipCommitConfirmation: boolean skipCommitConfirmation?: boolean;
): Promise<void> => { }
const generateCommitMessageFromGitDiff = async ({
diff,
extraArgs,
fullGitMojiSpec = false,
skipCommitConfirmation = false
}: GenerateCommitMessageFromGitDiffParams): Promise<void> => {
await assertGitRepo(); await assertGitRepo();
const commitSpinner = spinner(); const commitSpinner = spinner();
commitSpinner.start('Generating the commit message'); commitSpinner.start('Generating the commit message');
@@ -56,14 +61,14 @@ const generateCommitMessageFromGitDiff = async (
const messageTemplate = checkMessageTemplate(extraArgs); const messageTemplate = checkMessageTemplate(extraArgs);
if ( if (
config?.OCO_MESSAGE_TEMPLATE_PLACEHOLDER && config.OCO_MESSAGE_TEMPLATE_PLACEHOLDER &&
typeof messageTemplate === 'string' typeof messageTemplate === 'string'
) { ) {
const messageTemplateIndex = extraArgs.indexOf(messageTemplate); const messageTemplateIndex = extraArgs.indexOf(messageTemplate);
extraArgs.splice(messageTemplateIndex, 1); extraArgs.splice(messageTemplateIndex, 1);
commitMessage = messageTemplate.replace( commitMessage = messageTemplate.replace(
config?.OCO_MESSAGE_TEMPLATE_PLACEHOLDER, config.OCO_MESSAGE_TEMPLATE_PLACEHOLDER,
commitMessage commitMessage
); );
} }
@@ -77,9 +82,11 @@ ${commitMessage}
${chalk.grey('——————————————————')}` ${chalk.grey('——————————————————')}`
); );
const isCommitConfirmedByUser = skipCommitConfirmation || await confirm({ const isCommitConfirmedByUser =
message: 'Confirm the commit message?' skipCommitConfirmation ||
}); (await confirm({
message: 'Confirm the commit message?'
}));
if (isCommitConfirmedByUser && !isCancel(isCommitConfirmedByUser)) { if (isCommitConfirmedByUser && !isCancel(isCommitConfirmedByUser)) {
const { stdout } = await execa('git', [ const { stdout } = await execa('git', [
@@ -95,17 +102,13 @@ ${chalk.grey('——————————————————')}`
const remotes = await getGitRemotes(); const remotes = await getGitRemotes();
// user isn't pushing, return early
if (config?.OCO_GITPUSH === false)
return
if (!remotes.length) { if (!remotes.length) {
const { stdout } = await execa('git', ['push']); const { stdout } = await execa('git', ['push']);
if (stdout) outro(stdout); if (stdout) outro(stdout);
process.exit(0); process.exit(0);
} }
if (remotes.length === 1 && config?.OCO_GITPUSH !== true) { if (remotes.length === 1 && config.OCO_GITPUSH !== true) {
const isPushConfirmedByUser = await confirm({ const isPushConfirmedByUser = await confirm({
message: 'Do you want to run `git push`?' message: 'Do you want to run `git push`?'
}); });
@@ -157,14 +160,14 @@ ${chalk.grey('——————————————————')}`
} }
if (!isCommitConfirmedByUser && !isCancel(isCommitConfirmedByUser)) { if (!isCommitConfirmedByUser && !isCancel(isCommitConfirmedByUser)) {
const regenerateMessage = await confirm({ const regenerateMessage = await confirm({
message: 'Do you want to regenerate the message ?' message: 'Do you want to regenerate the message?'
}); });
if (regenerateMessage && !isCancel(isCommitConfirmedByUser)) { if (regenerateMessage && !isCancel(isCommitConfirmedByUser)) {
await generateCommitMessageFromGitDiff( await generateCommitMessageFromGitDiff({
diff, diff,
extraArgs, extraArgs,
fullGitMojiSpec fullGitMojiSpec
) });
} }
} }
} catch (error) { } catch (error) {
@@ -249,12 +252,12 @@ export async function commit(
); );
const [, generateCommitError] = await trytm( const [, generateCommitError] = await trytm(
generateCommitMessageFromGitDiff( generateCommitMessageFromGitDiff({
await getDiff({ files: stagedFiles }), diff: await getDiff({ files: stagedFiles }),
extraArgs, extraArgs,
fullGitMojiSpec, fullGitMojiSpec,
skipCommitConfirmation skipCommitConfirmation
) })
); );
if (generateCommitError) { if (generateCommitError) {

View File

@@ -1,11 +1,9 @@
import { intro, outro } from '@clack/prompts';
import chalk from 'chalk'; import chalk from 'chalk';
import { command } from 'cleye'; import { command } from 'cleye';
import { intro, outro } from '@clack/prompts';
import { COMMANDS } from '../CommandsEnum';
import { configureCommitlintIntegration } from '../modules/commitlint/config'; import { configureCommitlintIntegration } from '../modules/commitlint/config';
import { getCommitlintLLMConfig } from '../modules/commitlint/utils'; import { getCommitlintLLMConfig } from '../modules/commitlint/utils';
import { COMMANDS } from './ENUMS';
export enum CONFIG_MODES { export enum CONFIG_MODES {
get = 'get', get = 'get',

View File

@@ -1,3 +1,4 @@
import { intro, outro } from '@clack/prompts';
import chalk from 'chalk'; import chalk from 'chalk';
import { command } from 'cleye'; import { command } from 'cleye';
import * as dotenv from 'dotenv'; import * as dotenv from 'dotenv';
@@ -5,12 +6,9 @@ import { existsSync, readFileSync, writeFileSync } from 'fs';
import { parse as iniParse, stringify as iniStringify } from 'ini'; import { parse as iniParse, stringify as iniStringify } from 'ini';
import { homedir } from 'os'; import { homedir } from 'os';
import { join as pathJoin, resolve as pathResolve } from 'path'; import { join as pathJoin, resolve as pathResolve } from 'path';
import { COMMANDS } from './ENUMS';
import { intro, outro } from '@clack/prompts';
import { COMMANDS } from '../CommandsEnum';
import { getI18nLocal } from '../i18n';
import { TEST_MOCK_TYPES } from '../engine/testAi'; import { TEST_MOCK_TYPES } from '../engine/testAi';
import { getI18nLocal, i18n } from '../i18n';
export enum CONFIG_KEYS { export enum CONFIG_KEYS {
OCO_OPENAI_API_KEY = 'OCO_OPENAI_API_KEY', OCO_OPENAI_API_KEY = 'OCO_OPENAI_API_KEY',
@@ -28,7 +26,7 @@ export enum CONFIG_KEYS {
OCO_MESSAGE_TEMPLATE_PLACEHOLDER = 'OCO_MESSAGE_TEMPLATE_PLACEHOLDER', OCO_MESSAGE_TEMPLATE_PLACEHOLDER = 'OCO_MESSAGE_TEMPLATE_PLACEHOLDER',
OCO_PROMPT_MODULE = 'OCO_PROMPT_MODULE', OCO_PROMPT_MODULE = 'OCO_PROMPT_MODULE',
OCO_AI_PROVIDER = 'OCO_AI_PROVIDER', OCO_AI_PROVIDER = 'OCO_AI_PROVIDER',
OCO_GITPUSH = 'OCO_GITPUSH', OCO_GITPUSH = 'OCO_GITPUSH', // todo: deprecate
OCO_ONE_LINE_COMMIT = 'OCO_ONE_LINE_COMMIT', OCO_ONE_LINE_COMMIT = 'OCO_ONE_LINE_COMMIT',
OCO_AZURE_ENDPOINT = 'OCO_AZURE_ENDPOINT', OCO_AZURE_ENDPOINT = 'OCO_AZURE_ENDPOINT',
OCO_TEST_MOCK_TYPE = 'OCO_TEST_MOCK_TYPE', OCO_TEST_MOCK_TYPE = 'OCO_TEST_MOCK_TYPE',
@@ -103,8 +101,8 @@ const getDefaultModel = (provider: string | undefined): string => {
}; };
export enum DEFAULT_TOKEN_LIMITS { export enum DEFAULT_TOKEN_LIMITS {
DEFAULT_MAX_TOKENS_INPUT = 4096, DEFAULT_MAX_TOKENS_INPUT = 40960,
DEFAULT_MAX_TOKENS_OUTPUT = 500 DEFAULT_MAX_TOKENS_OUTPUT = 4096
} }
const validateConfig = ( const validateConfig = (
@@ -113,8 +111,10 @@ const validateConfig = (
validationMessage: string validationMessage: string
) => { ) => {
if (!condition) { if (!condition) {
outro(`${chalk.red('✖')} wrong value for ${key}: ${validationMessage}.`);
outro( outro(
`${chalk.red('✖')} Unsupported config key ${key}: ${validationMessage}` 'For more help refer to docs https://github.com/di-sukharev/opencommit'
); );
process.exit(1); process.exit(1);
@@ -123,65 +123,54 @@ const validateConfig = (
export const configValidators = { export const configValidators = {
[CONFIG_KEYS.OCO_OPENAI_API_KEY](value: any, config: any = {}) { [CONFIG_KEYS.OCO_OPENAI_API_KEY](value: any, config: any = {}) {
if (config.OCO_AI_PROVIDER == 'gemini') return value; if (config.OCO_AI_PROVIDER !== 'openai') return value;
//need api key unless running locally with ollama
validateConfig( validateConfig(
'OpenAI API_KEY', 'OCO_OPENAI_API_KEY',
value || typeof value === 'string' && value.length > 0,
config.OCO_ANTHROPIC_API_KEY || 'Empty value is not allowed'
config.OCO_AI_PROVIDER.startsWith('ollama') ||
config.OCO_AZURE_API_KEY ||
config.OCO_AI_PROVIDER == 'test' ||
config.OCO_AI_PROVIDER == 'flowise',
'You need to provide an OpenAI/Anthropic/Azure or other provider API key via `oco config set OCO_OPENAI_API_KEY=your_key`, for help refer to docs https://github.com/di-sukharev/opencommit'
); );
validateConfig( validateConfig(
CONFIG_KEYS.OCO_OPENAI_API_KEY, 'OCO_OPENAI_API_KEY',
value.startsWith('sk-') || config.OCO_AI_PROVIDER != 'openai', value,
'Must start with "sk-" for openai provider' 'You need to provide the OCO_OPENAI_API_KEY when OCO_AI_PROVIDER is set to "openai" (default). Run `oco config set OCO_OPENAI_API_KEY=your_key`'
); );
return value; return value;
}, },
[CONFIG_KEYS.OCO_AZURE_API_KEY](value: any, config: any = {}) { [CONFIG_KEYS.OCO_AZURE_API_KEY](value: any, config: any = {}) {
if (config.OCO_AI_PROVIDER !== 'azure') return value;
validateConfig( validateConfig(
'ANTHROPIC_API_KEY', 'OCO_AZURE_API_KEY',
value || !!value,
config.OCO_OPENAI_API_KEY || 'You need to provide the OCO_AZURE_API_KEY when OCO_AI_PROVIDER is set to "azure". Run: `oco config set OCO_AZURE_API_KEY=your_key`'
config.OCO_AZURE_API_KEY ||
config.OCO_AI_PROVIDER == 'ollama' ||
config.OCO_AI_PROVIDER == 'test' ||
config.OCO_AI_PROVIDER == 'flowise',
'You need to provide an OpenAI/Anthropic/Azure API key'
); );
return value; return value;
}, },
[CONFIG_KEYS.OCO_GEMINI_API_KEY](value: any, config: any = {}) { [CONFIG_KEYS.OCO_GEMINI_API_KEY](value: any, config: any = {}) {
// only need to check for gemini api key if using gemini if (config.OCO_AI_PROVIDER !== 'gemini') return value;
if (config.OCO_AI_PROVIDER != 'gemini') return value;
validateConfig( validateConfig(
'Gemini API Key', 'OCO_GEMINI_API_KEY',
value || config.OCO_GEMINI_API_KEY || config.OCO_AI_PROVIDER == 'test', value || config.OCO_GEMINI_API_KEY || config.OCO_AI_PROVIDER === 'test',
'You need to provide an Gemini API key' 'You need to provide the OCO_GEMINI_API_KEY when OCO_AI_PROVIDER is set to "gemini". Run: `oco config set OCO_GEMINI_API_KEY=your_key`'
); );
return value; return value;
}, },
[CONFIG_KEYS.OCO_ANTHROPIC_API_KEY](value: any, config: any = {}) { [CONFIG_KEYS.OCO_ANTHROPIC_API_KEY](value: any, config: any = {}) {
if (config.OCO_AI_PROVIDER !== 'anthropic') return value;
validateConfig( validateConfig(
'ANTHROPIC_API_KEY', 'ANTHROPIC_API_KEY',
value || !!value,
config.OCO_OPENAI_API_KEY || 'You need to provide the OCO_ANTHROPIC_API_KEY key when OCO_AI_PROVIDER is set to "anthropic". Run: `oco config set OCO_ANTHROPIC_API_KEY=your_key`'
config.OCO_AI_PROVIDER == 'ollama' ||
config.OCO_AI_PROVIDER == 'test' ||
config.OCO_AI_PROVIDER == 'flowise',
'You need to provide an OpenAI/Anthropic API key'
); );
return value; return value;
@@ -190,8 +179,8 @@ export const configValidators = {
[CONFIG_KEYS.OCO_FLOWISE_API_KEY](value: any, config: any = {}) { [CONFIG_KEYS.OCO_FLOWISE_API_KEY](value: any, config: any = {}) {
validateConfig( validateConfig(
CONFIG_KEYS.OCO_FLOWISE_API_KEY, CONFIG_KEYS.OCO_FLOWISE_API_KEY,
value || config.OCO_AI_PROVIDER != 'flowise', value || config.OCO_AI_PROVIDER !== 'flowise',
'You need to provide a flowise API key' 'You need to provide the OCO_FLOWISE_API_KEY when OCO_AI_PROVIDER is set to "flowise". Run: `oco config set OCO_FLOWISE_API_KEY=your_key`'
); );
return value; return value;
@@ -201,25 +190,17 @@ export const configValidators = {
validateConfig( validateConfig(
CONFIG_KEYS.OCO_DESCRIPTION, CONFIG_KEYS.OCO_DESCRIPTION,
typeof value === 'boolean', typeof value === 'boolean',
'Must be true or false' 'Must be boolean: true or false'
); );
return value; return value;
}, },
[CONFIG_KEYS.OCO_TOKENS_MAX_INPUT](value: any) { [CONFIG_KEYS.OCO_TOKENS_MAX_INPUT](value: any) {
// If the value is a string, convert it to a number. value = parseInt(value);
if (typeof value === 'string') {
value = parseInt(value);
validateConfig(
CONFIG_KEYS.OCO_TOKENS_MAX_INPUT,
!isNaN(value),
'Must be a number'
);
}
validateConfig( validateConfig(
CONFIG_KEYS.OCO_TOKENS_MAX_INPUT, CONFIG_KEYS.OCO_TOKENS_MAX_INPUT,
value ? typeof value === 'number' : undefined, !isNaN(value),
'Must be a number' 'Must be a number'
); );
@@ -227,18 +208,10 @@ export const configValidators = {
}, },
[CONFIG_KEYS.OCO_TOKENS_MAX_OUTPUT](value: any) { [CONFIG_KEYS.OCO_TOKENS_MAX_OUTPUT](value: any) {
// If the value is a string, convert it to a number. value = parseInt(value);
if (typeof value === 'string') {
value = parseInt(value);
validateConfig(
CONFIG_KEYS.OCO_TOKENS_MAX_OUTPUT,
!isNaN(value),
'Must be a number'
);
}
validateConfig( validateConfig(
CONFIG_KEYS.OCO_TOKENS_MAX_OUTPUT, CONFIG_KEYS.OCO_TOKENS_MAX_OUTPUT,
value ? typeof value === 'number' : undefined, !isNaN(value),
'Must be a number' 'Must be a number'
); );
@@ -249,18 +222,21 @@ export const configValidators = {
validateConfig( validateConfig(
CONFIG_KEYS.OCO_EMOJI, CONFIG_KEYS.OCO_EMOJI,
typeof value === 'boolean', typeof value === 'boolean',
'Must be true or false' 'Must be boolean: true or false'
); );
return value; return value;
}, },
[CONFIG_KEYS.OCO_LANGUAGE](value: any) { [CONFIG_KEYS.OCO_LANGUAGE](value: any) {
const supportedLanguages = Object.keys(i18n);
validateConfig( validateConfig(
CONFIG_KEYS.OCO_LANGUAGE, CONFIG_KEYS.OCO_LANGUAGE,
getI18nLocal(value), getI18nLocal(value),
`${value} is not supported yet` `${value} is not supported yet. Supported languages: ${supportedLanguages}`
); );
return getI18nLocal(value); return getI18nLocal(value);
}, },
@@ -304,6 +280,7 @@ export const configValidators = {
return value; return value;
}, },
// todo: deprecate
[CONFIG_KEYS.OCO_GITPUSH](value: any) { [CONFIG_KEYS.OCO_GITPUSH](value: any) {
validateConfig( validateConfig(
CONFIG_KEYS.OCO_GITPUSH, CONFIG_KEYS.OCO_GITPUSH,
@@ -314,19 +291,16 @@ export const configValidators = {
}, },
[CONFIG_KEYS.OCO_AI_PROVIDER](value: any) { [CONFIG_KEYS.OCO_AI_PROVIDER](value: any) {
if (!value) value = 'openai';
validateConfig( validateConfig(
CONFIG_KEYS.OCO_AI_PROVIDER, CONFIG_KEYS.OCO_AI_PROVIDER,
[ ['openai', 'anthropic', 'gemini', 'azure', 'test', 'flowise'].includes(
'', value
'openai', ) || value.startsWith('ollama'),
'anthropic',
'gemini',
'azure',
'test',
'flowise'
].includes(value) || value.startsWith('ollama'),
`${value} is not supported yet, use 'ollama', 'anthropic', 'azure', 'gemini', 'flowise' or 'openai' (default)` `${value} is not supported yet, use 'ollama', 'anthropic', 'azure', 'gemini', 'flowise' or 'openai' (default)`
); );
return value; return value;
}, },
@@ -354,7 +328,7 @@ export const configValidators = {
validateConfig( validateConfig(
CONFIG_KEYS.OCO_FLOWISE_ENDPOINT, CONFIG_KEYS.OCO_FLOWISE_ENDPOINT,
typeof value === 'string' && value.includes(':'), typeof value === 'string' && value.includes(':'),
'Value must be string and should include both I.P. and port number' // Considering the possibility of DNS lookup or feeding the I.P. explicitely, there is no pattern to verify, except a column for the port number 'Value must be string and should include both I.P. and port number' // Considering the possibility of DNS lookup or feeding the I.P. explicitly, there is no pattern to verify, except a column for the port number
); );
return value; return value;
@@ -372,82 +346,68 @@ export const configValidators = {
}, },
[CONFIG_KEYS.OCO_OLLAMA_API_URL](value: any) { [CONFIG_KEYS.OCO_OLLAMA_API_URL](value: any) {
// add simple api validator
validateConfig( validateConfig(
CONFIG_KEYS.OCO_API_URL, CONFIG_KEYS.OCO_OLLAMA_API_URL,
typeof value === 'string' && value.startsWith('http'), typeof value === 'string' && value.startsWith('http'),
`${value} is not a valid URL` `${value} is not a valid URL. It should start with 'http://' or 'https://'.`
); );
return value; return value;
} }
}; };
export enum OCO_AI_PROVIDER_ENUM {
OLLAMA = 'ollama',
OPENAI = 'openai',
ANTHROPIC = 'anthropic',
GEMINI = 'gemini',
AZURE = 'azure',
TEST = 'test',
FLOWISE = 'flowise'
}
export type ConfigType = { export type ConfigType = {
[key in CONFIG_KEYS]?: any; [CONFIG_KEYS.OCO_OPENAI_API_KEY]?: string;
[CONFIG_KEYS.OCO_ANTHROPIC_API_KEY]?: string;
[CONFIG_KEYS.OCO_AZURE_API_KEY]?: string;
[CONFIG_KEYS.OCO_GEMINI_API_KEY]?: string;
[CONFIG_KEYS.OCO_GEMINI_BASE_PATH]?: string;
[CONFIG_KEYS.OCO_TOKENS_MAX_INPUT]: number;
[CONFIG_KEYS.OCO_TOKENS_MAX_OUTPUT]: number;
[CONFIG_KEYS.OCO_OPENAI_BASE_PATH]?: string;
[CONFIG_KEYS.OCO_DESCRIPTION]: boolean;
[CONFIG_KEYS.OCO_EMOJI]: boolean;
[CONFIG_KEYS.OCO_MODEL]: string;
[CONFIG_KEYS.OCO_LANGUAGE]: string;
[CONFIG_KEYS.OCO_MESSAGE_TEMPLATE_PLACEHOLDER]: string;
[CONFIG_KEYS.OCO_PROMPT_MODULE]: OCO_PROMPT_MODULE_ENUM;
[CONFIG_KEYS.OCO_AI_PROVIDER]: OCO_AI_PROVIDER_ENUM;
[CONFIG_KEYS.OCO_GITPUSH]: boolean;
[CONFIG_KEYS.OCO_ONE_LINE_COMMIT]: boolean;
[CONFIG_KEYS.OCO_AZURE_ENDPOINT]?: string;
[CONFIG_KEYS.OCO_TEST_MOCK_TYPE]: string;
[CONFIG_KEYS.OCO_API_URL]?: string;
[CONFIG_KEYS.OCO_OLLAMA_API_URL]?: string;
[CONFIG_KEYS.OCO_FLOWISE_ENDPOINT]: string;
[CONFIG_KEYS.OCO_FLOWISE_API_KEY]?: string;
}; };
const defaultConfigPath = pathJoin(homedir(), '.opencommit'); const defaultConfigPath = pathJoin(homedir(), '.opencommit');
const defaultEnvPath = pathResolve(process.cwd(), '.env'); const defaultEnvPath = pathResolve(process.cwd(), '.env');
export const getConfig = ({
configPath = defaultConfigPath,
envPath = defaultEnvPath
}: {
configPath?: string;
envPath?: string;
} = {}): ConfigType | null => {
dotenv.config({ path: envPath });
const configFromEnv = {
OCO_OPENAI_API_KEY: process.env.OCO_OPENAI_API_KEY,
OCO_ANTHROPIC_API_KEY: process.env.OCO_ANTHROPIC_API_KEY,
OCO_AZURE_API_KEY: process.env.OCO_AZURE_API_KEY,
OCO_GEMINI_API_KEY: process.env.OCO_GEMINI_API_KEY,
OCO_TOKENS_MAX_INPUT: process.env.OCO_TOKENS_MAX_INPUT
? Number(process.env.OCO_TOKENS_MAX_INPUT)
: undefined,
OCO_TOKENS_MAX_OUTPUT: process.env.OCO_TOKENS_MAX_OUTPUT
? Number(process.env.OCO_TOKENS_MAX_OUTPUT)
: undefined,
OCO_OPENAI_BASE_PATH: process.env.OCO_OPENAI_BASE_PATH,
OCO_GEMINI_BASE_PATH: process.env.OCO_GEMINI_BASE_PATH,
OCO_DESCRIPTION: process.env.OCO_DESCRIPTION === 'true' ? true : false,
OCO_EMOJI: process.env.OCO_EMOJI === 'true' ? true : false,
OCO_MODEL:
process.env.OCO_MODEL || getDefaultModel(process.env.OCO_AI_PROVIDER),
OCO_LANGUAGE: process.env.OCO_LANGUAGE || 'en',
OCO_MESSAGE_TEMPLATE_PLACEHOLDER:
process.env.OCO_MESSAGE_TEMPLATE_PLACEHOLDER || '$msg',
OCO_PROMPT_MODULE: process.env.OCO_PROMPT_MODULE || 'conventional-commit',
OCO_AI_PROVIDER: process.env.OCO_AI_PROVIDER || 'openai',
OCO_GITPUSH: process.env.OCO_GITPUSH === 'false' ? false : true,
OCO_ONE_LINE_COMMIT:
process.env.OCO_ONE_LINE_COMMIT === 'true' ? true : false,
OCO_AZURE_ENDPOINT: process.env.OCO_AZURE_ENDPOINT || undefined,
OCO_TEST_MOCK_TYPE: process.env.OCO_TEST_MOCK_TYPE || 'commit-message',
OCO_FLOWISE_ENDPOINT: process.env.OCO_FLOWISE_ENDPOINT || ':',
OCO_FLOWISE_API_KEY: process.env.OCO_FLOWISE_API_KEY || undefined,
OCO_OLLAMA_API_URL: process.env.OCO_OLLAMA_API_URL || undefined
};
const configExists = existsSync(configPath);
if (!configExists) return configFromEnv;
const configFile = readFileSync(configPath, 'utf8'); const assertConfigsAreValid = (config: Record<string, any>) => {
const config = iniParse(configFile); for (const [key, value] of Object.entries(config)) {
if (!value) continue;
for (const configKey of Object.keys(config)) { if (typeof value === 'string' && ['null', 'undefined'].includes(value)) {
if (['null', 'undefined'].includes(config[configKey])) { config[key] = undefined;
config[configKey] = undefined;
continue; continue;
} }
try {
const validator = configValidators[configKey as CONFIG_KEYS];
const validValue = validator(
config[configKey] ?? configFromEnv[configKey as CONFIG_KEYS],
config
);
config[configKey] = validValue; try {
const validate = configValidators[key as CONFIG_KEYS];
validate(value, config);
} catch (error) { } catch (error) {
outro(`Unknown '${configKey}' config option or missing validator.`); outro(`Unknown '${key}' config option or missing validator.`);
outro( outro(
`Manually fix the '.env' file or global '~/.opencommit' config file.` `Manually fix the '.env' file or global '~/.opencommit' config file.`
); );
@@ -455,6 +415,100 @@ export const getConfig = ({
process.exit(1); process.exit(1);
} }
} }
};
enum OCO_PROMPT_MODULE_ENUM {
CONVENTIONAL_COMMIT = 'conventional-commit',
COMMITLINT = '@commitlint'
}
const initGlobalConfig = () => {
const defaultConfig = {
OCO_TOKENS_MAX_INPUT: DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_INPUT,
OCO_TOKENS_MAX_OUTPUT: DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT,
OCO_DESCRIPTION: false,
OCO_EMOJI: false,
OCO_MODEL: getDefaultModel('openai'),
OCO_LANGUAGE: 'en',
OCO_MESSAGE_TEMPLATE_PLACEHOLDER: '$msg',
OCO_PROMPT_MODULE: OCO_PROMPT_MODULE_ENUM.CONVENTIONAL_COMMIT,
OCO_AI_PROVIDER: OCO_AI_PROVIDER_ENUM.OPENAI,
OCO_ONE_LINE_COMMIT: false,
OCO_TEST_MOCK_TYPE: 'commit-message',
OCO_FLOWISE_ENDPOINT: ':',
OCO_GITPUSH: true // todo: deprecate
};
writeFileSync(defaultConfigPath, iniStringify(defaultConfig), 'utf8');
return defaultConfig;
};
const parseEnvVarValue = (value?: any) => {
try {
return JSON.parse(value);
} catch (error) {
return value;
}
};
export const getConfig = ({
configPath = defaultConfigPath,
envPath = defaultEnvPath
}: {
configPath?: string;
envPath?: string;
} = {}): ConfigType => {
dotenv.config({ path: envPath });
const envConfig = {
OCO_MODEL: process.env.OCO_MODEL,
OCO_OPENAI_API_KEY: process.env.OCO_OPENAI_API_KEY,
OCO_ANTHROPIC_API_KEY: process.env.OCO_ANTHROPIC_API_KEY,
OCO_AZURE_API_KEY: process.env.OCO_AZURE_API_KEY,
OCO_GEMINI_API_KEY: process.env.OCO_GEMINI_API_KEY,
OCO_FLOWISE_API_KEY: process.env.OCO_FLOWISE_API_KEY,
OCO_TOKENS_MAX_INPUT: parseEnvVarValue(process.env.OCO_TOKENS_MAX_INPUT),
OCO_TOKENS_MAX_OUTPUT: parseEnvVarValue(process.env.OCO_TOKENS_MAX_OUTPUT),
OCO_OPENAI_BASE_PATH: process.env.OCO_OPENAI_BASE_PATH,
OCO_GEMINI_BASE_PATH: process.env.OCO_GEMINI_BASE_PATH,
OCO_AZURE_ENDPOINT: process.env.OCO_AZURE_ENDPOINT,
OCO_FLOWISE_ENDPOINT: process.env.OCO_FLOWISE_ENDPOINT,
OCO_OLLAMA_API_URL: process.env.OCO_OLLAMA_API_URL,
OCO_DESCRIPTION: parseEnvVarValue(process.env.OCO_DESCRIPTION),
OCO_EMOJI: parseEnvVarValue(process.env.OCO_EMOJI),
OCO_LANGUAGE: process.env.OCO_LANGUAGE,
OCO_MESSAGE_TEMPLATE_PLACEHOLDER:
process.env.OCO_MESSAGE_TEMPLATE_PLACEHOLDER,
OCO_PROMPT_MODULE: process.env.OCO_PROMPT_MODULE as OCO_PROMPT_MODULE_ENUM,
OCO_AI_PROVIDER: process.env.OCO_AI_PROVIDER as OCO_AI_PROVIDER_ENUM,
OCO_ONE_LINE_COMMIT: parseEnvVarValue(process.env.OCO_ONE_LINE_COMMIT),
OCO_TEST_MOCK_TYPE: process.env.OCO_TEST_MOCK_TYPE,
OCO_GITPUSH: parseEnvVarValue(process.env.OCO_GITPUSH) // todo: deprecate
};
let globalConfig: ConfigType;
const isGlobalConfigFileExist = existsSync(configPath);
if (!isGlobalConfigFileExist) globalConfig = initGlobalConfig();
else {
const configFile = readFileSync(configPath, 'utf8');
globalConfig = iniParse(configFile) as ConfigType;
}
const mergeObjects = (main: Partial<ConfigType>, fallback: ConfigType) =>
Object.keys(CONFIG_KEYS).reduce((acc, key) => {
acc[key] = parseEnvVarValue(main[key] ?? fallback[key]);
return acc;
}, {} as ConfigType);
// env config takes precedence over global ~/.opencommit config file
const config = mergeObjects(envConfig, globalConfig);
return config; return config;
}; };
@@ -463,29 +517,37 @@ export const setConfig = (
keyValues: [key: string, value: string][], keyValues: [key: string, value: string][],
configPath: string = defaultConfigPath configPath: string = defaultConfigPath
) => { ) => {
const config = getConfig() || {}; const config = getConfig();
for (const [configKey, configValue] of keyValues) { for (let [key, value] of keyValues) {
if (!configValidators.hasOwnProperty(configKey)) { if (!configValidators.hasOwnProperty(key)) {
throw new Error(`Unsupported config key: ${configKey}`); const supportedKeys = Object.keys(configValidators).join('\n');
throw new Error(
`Unsupported config key: ${key}. Expected keys are:\n\n${supportedKeys}.\n\nFor more help refer to our docs: https://github.com/di-sukharev/opencommit`
);
} }
let parsedConfigValue; let parsedConfigValue;
try { try {
parsedConfigValue = JSON.parse(configValue); parsedConfigValue = JSON.parse(value);
} catch (error) { } catch (error) {
parsedConfigValue = configValue; parsedConfigValue = value;
} }
const validValue = const validValue = configValidators[key as CONFIG_KEYS](
configValidators[configKey as CONFIG_KEYS](parsedConfigValue); parsedConfigValue,
config[configKey as CONFIG_KEYS] = validValue; config
);
config[key] = validValue;
} }
writeFileSync(configPath, iniStringify(config), 'utf8'); writeFileSync(configPath, iniStringify(config), 'utf8');
outro(`${chalk.green('✔')} Config successfully set`); assertConfigsAreValid(config);
outro(`${chalk.green('✔')} config successfully set`);
}; };
export const configCommand = command( export const configCommand = command(
@@ -494,9 +556,9 @@ export const configCommand = command(
parameters: ['<mode>', '<key=values...>'] parameters: ['<mode>', '<key=values...>']
}, },
async (argv) => { async (argv) => {
intro('opencommit — config');
try { try {
const { mode, keyValues } = argv._; const { mode, keyValues } = argv._;
intro(`COMMAND: config ${mode} ${keyValues}`);
if (mode === CONFIG_MODES.get) { if (mode === CONFIG_MODES.get) {
const config = getConfig() || {}; const config = getConfig() || {};

View File

@@ -1,13 +1,11 @@
import { intro, outro } from '@clack/prompts';
import chalk from 'chalk'; import chalk from 'chalk';
import { command } from 'cleye'; import { command } from 'cleye';
import { existsSync } from 'fs'; import { existsSync } from 'fs';
import fs from 'fs/promises'; import fs from 'fs/promises';
import path from 'path'; import path from 'path';
import { intro, outro } from '@clack/prompts';
import { COMMANDS } from '../CommandsEnum.js';
import { assertGitRepo, getCoreHooksPath } from '../utils/git.js'; import { assertGitRepo, getCoreHooksPath } from '../utils/git.js';
import { COMMANDS } from './ENUMS';
const HOOK_NAME = 'prepare-commit-msg'; const HOOK_NAME = 'prepare-commit-msg';
const DEFAULT_SYMLINK_URL = path.join('.git', 'hooks', HOOK_NAME); const DEFAULT_SYMLINK_URL = path.join('.git', 'hooks', HOOK_NAME);
@@ -94,7 +92,7 @@ export const hookCommand = command(
} }
throw new Error( throw new Error(
`Unsupported mode: ${mode}. Supported modes are: 'set' or 'unset', do: \`oco hook set\`` `Unsupported mode: ${mode}. Supported modes are: 'set' or 'unset'. Run: \`oco hook set\``
); );
} catch (error) { } catch (error) {
outro(`${chalk.red('✖')} ${error}`); outro(`${chalk.red('✖')} ${error}`);

View File

@@ -39,7 +39,11 @@ export const prepareCommitMessageHook = async (
const config = getConfig(); const config = getConfig();
if (!config?.OCO_OPENAI_API_KEY && !config?.OCO_ANTHROPIC_API_KEY && !config?.OCO_AZURE_API_KEY) { if (
!config.OCO_OPENAI_API_KEY &&
!config.OCO_ANTHROPIC_API_KEY &&
!config.OCO_AZURE_API_KEY
) {
throw new Error( throw new Error(
'No OPEN_AI_API or OCO_ANTHROPIC_API_KEY or OCO_AZURE_API_KEY exists. Set your key in ~/.opencommit' 'No OPEN_AI_API or OCO_ANTHROPIC_API_KEY or OCO_AZURE_API_KEY exists. Set your key in ~/.opencommit'
); );

View File

@@ -1,7 +1,28 @@
import { ChatCompletionRequestMessage } from 'openai'; import AnthropicClient from '@anthropic-ai/sdk';
import { OpenAIClient as AzureOpenAIClient } from '@azure/openai';
import { GoogleGenerativeAI as GeminiClient } from '@google/generative-ai';
import { AxiosInstance as RawAxiosClient } from 'axios';
import { OpenAI as OpenAIClient } from 'openai';
export interface AiEngineConfig {
apiKey: string;
model: string;
maxTokensOutput: number;
maxTokensInput: number;
baseURL?: string;
}
type Client =
| OpenAIClient
| AzureOpenAIClient
| AnthropicClient
| RawAxiosClient
| GeminiClient;
export interface AiEngine { export interface AiEngine {
config: AiEngineConfig;
client: Client;
generateCommitMessage( generateCommitMessage(
messages: Array<ChatCompletionRequestMessage> messages: Array<OpenAIClient.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | undefined>; ): Promise<string | null | undefined>;
} }

View File

@@ -1,104 +1,62 @@
import AnthropicClient from '@anthropic-ai/sdk';
import {
MessageCreateParamsNonStreaming,
MessageParam
} from '@anthropic-ai/sdk/resources/messages.mjs';
import { outro } from '@clack/prompts';
import axios from 'axios'; import axios from 'axios';
import chalk from 'chalk'; import chalk from 'chalk';
import { OpenAI } from 'openai';
import Anthropic from '@anthropic-ai/sdk';
import {ChatCompletionRequestMessage} from 'openai'
import { MessageCreateParamsNonStreaming, MessageParam } from '@anthropic-ai/sdk/resources';
import { intro, outro } from '@clack/prompts';
import {
CONFIG_MODES,
DEFAULT_TOKEN_LIMITS,
getConfig
} from '../commands/config';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff'; import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { tokenCount } from '../utils/tokenCount'; import { tokenCount } from '../utils/tokenCount';
import { AiEngine } from './Engine'; import { AiEngine, AiEngineConfig } from './Engine';
import { MODEL_LIST } from '../commands/config';
const config = getConfig(); interface AnthropicConfig extends AiEngineConfig {}
const MAX_TOKENS_OUTPUT = export class AnthropicEngine implements AiEngine {
config?.OCO_TOKENS_MAX_OUTPUT || config: AnthropicConfig;
DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT; client: AnthropicClient;
const MAX_TOKENS_INPUT =
config?.OCO_TOKENS_MAX_INPUT || DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_INPUT;
let provider = config?.OCO_AI_PROVIDER; constructor(config) {
let apiKey = config?.OCO_ANTHROPIC_API_KEY; this.config = config;
const [command, mode] = process.argv.slice(2); this.client = new AnthropicClient({ apiKey: this.config.apiKey });
if (
provider === 'anthropic' &&
!apiKey &&
command !== 'config' &&
mode !== CONFIG_MODES.set
) {
intro('opencommit');
outro(
'OCO_ANTHROPIC_API_KEY is not set, please run `oco config set OCO_ANTHROPIC_API_KEY=<your token> . If you are using Claude, make sure you add payment details, so API works.`'
);
outro(
'For help look into README https://github.com/di-sukharev/opencommit#setup'
);
process.exit(1);
}
const MODEL = config?.OCO_MODEL;
if (provider === 'anthropic' &&
typeof MODEL !== 'string' &&
command !== 'config' &&
mode !== CONFIG_MODES.set) {
outro(
`${chalk.red('✖')} Unsupported model ${MODEL}. The model can be any string, but the current configuration is not supported.`
);
process.exit(1);
}
export class AnthropicAi implements AiEngine {
private anthropicAiApiConfiguration = {
apiKey: apiKey
};
private anthropicAI!: Anthropic;
constructor() {
this.anthropicAI = new Anthropic(this.anthropicAiApiConfiguration);
} }
public generateCommitMessage = async ( public generateCommitMessage = async (
messages: Array<ChatCompletionRequestMessage> messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | undefined> => { ): Promise<string | undefined> => {
const systemMessage = messages.find((msg) => msg.role === 'system')
const systemMessage = messages.find(msg => msg.role === 'system')?.content as string; ?.content as string;
const restMessages = messages.filter((msg) => msg.role !== 'system') as MessageParam[]; const restMessages = messages.filter(
(msg) => msg.role !== 'system'
) as MessageParam[];
const params: MessageCreateParamsNonStreaming = { const params: MessageCreateParamsNonStreaming = {
model: MODEL, model: this.config.model,
system: systemMessage, system: systemMessage,
messages: restMessages, messages: restMessages,
temperature: 0, temperature: 0,
top_p: 0.1, top_p: 0.1,
max_tokens: MAX_TOKENS_OUTPUT max_tokens: this.config.maxTokensOutput
}; };
try { try {
const REQUEST_TOKENS = messages const REQUEST_TOKENS = messages
.map((msg) => tokenCount(msg.content as string) + 4) .map((msg) => tokenCount(msg.content as string) + 4)
.reduce((a, b) => a + b, 0); .reduce((a, b) => a + b, 0);
if (REQUEST_TOKENS > MAX_TOKENS_INPUT - MAX_TOKENS_OUTPUT) { if (
REQUEST_TOKENS >
this.config.maxTokensInput - this.config.maxTokensOutput
) {
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens); throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
} }
const data = await this.anthropicAI.messages.create(params); const data = await this.client.messages.create(params);
const message = data?.content[0].text; const message = data?.content[0].text;
return message; return message;
} catch (error) { } catch (error) {
outro(`${chalk.red('✖')} ${JSON.stringify(params)}`);
const err = error as Error; const err = error as Error;
outro(`${chalk.red('✖')} ${err?.message || err}`); outro(`${chalk.red('✖')} ${err?.message || err}`);

View File

@@ -1,81 +1,51 @@
import {
AzureKeyCredential,
OpenAIClient as AzureOpenAIClient
} from '@azure/openai';
import { outro } from '@clack/prompts';
import axios from 'axios'; import axios from 'axios';
import chalk from 'chalk'; import chalk from 'chalk';
import { execa } from 'execa'; import { OpenAI } from 'openai';
import {
ChatCompletionRequestMessage,
} from 'openai';
import { OpenAIClient, AzureKeyCredential } from '@azure/openai';
import { intro, outro } from '@clack/prompts';
import {
CONFIG_MODES,
DEFAULT_TOKEN_LIMITS,
getConfig
} from '../commands/config';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff'; import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { tokenCount } from '../utils/tokenCount'; import { tokenCount } from '../utils/tokenCount';
import { AiEngine } from './Engine'; import { AiEngine, AiEngineConfig } from './Engine';
const config = getConfig(); interface AzureAiEngineConfig extends AiEngineConfig {
baseURL: string;
const MAX_TOKENS_OUTPUT = apiKey: string;
config?.OCO_TOKENS_MAX_OUTPUT ||
DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT;
const MAX_TOKENS_INPUT =
config?.OCO_TOKENS_MAX_INPUT || DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_INPUT;
let basePath = config?.OCO_OPENAI_BASE_PATH;
let apiKey = config?.OCO_AZURE_API_KEY;
let apiEndpoint = config?.OCO_AZURE_ENDPOINT;
const [command, mode] = process.argv.slice(2);
const provider = config?.OCO_AI_PROVIDER;
if (
provider === 'azure' &&
!apiKey &&
!apiEndpoint &&
command !== 'config' &&
mode !== CONFIG_MODES.set
) {
intro('opencommit');
outro(
'OCO_AZURE_API_KEY or OCO_AZURE_ENDPOINT are not set, please run `oco config set OCO_AZURE_API_KEY=<your token> . If you are using GPT, make sure you add payment details, so API works.`'
);
outro(
'For help look into README https://github.com/di-sukharev/opencommit#setup'
);
process.exit(1);
} }
const MODEL = config?.OCO_MODEL || 'gpt-3.5-turbo'; export class AzureEngine implements AiEngine {
config: AzureAiEngineConfig;
client: AzureOpenAIClient;
export class Azure implements AiEngine { constructor(config: AzureAiEngineConfig) {
private openAI!: OpenAIClient; this.config = config;
this.client = new AzureOpenAIClient(
constructor() { this.config.baseURL,
if (provider === 'azure') { new AzureKeyCredential(this.config.apiKey)
this.openAI = new OpenAIClient(apiEndpoint, new AzureKeyCredential(apiKey)); );
}
} }
public generateCommitMessage = async ( generateCommitMessage = async (
messages: Array<ChatCompletionRequestMessage> messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | undefined> => { ): Promise<string | undefined> => {
try { try {
const REQUEST_TOKENS = messages const REQUEST_TOKENS = messages
.map((msg) => tokenCount(msg.content) + 4) .map((msg) => tokenCount(msg.content as string) + 4)
.reduce((a, b) => a + b, 0); .reduce((a, b) => a + b, 0);
if (REQUEST_TOKENS > MAX_TOKENS_INPUT - MAX_TOKENS_OUTPUT) { if (
REQUEST_TOKENS >
this.config.maxTokensInput - this.config.maxTokensOutput
) {
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens); throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
} }
const data = await this.openAI.getChatCompletions(MODEL, messages); const data = await this.client.getChatCompletions(
this.config.model,
messages
);
const message = data.choices[0].message; const message = data.choices[0].message;
@@ -84,10 +54,10 @@ export class Azure implements AiEngine {
} }
return message?.content; return message?.content;
} catch (error) { } catch (error) {
outro(`${chalk.red('✖')} ${MODEL}`); outro(`${chalk.red('✖')} ${this.config.model}`);
const err = error as Error; const err = error as Error;
outro(`${chalk.red('✖')} ${err?.message || err}`); outro(`${chalk.red('✖')} ${JSON.stringify(error)}`);
if ( if (
axios.isAxiosError<{ error?: { message: string } }>(error) && axios.isAxiosError<{ error?: { message: string } }>(error) &&
@@ -105,5 +75,3 @@ export class Azure implements AiEngine {
} }
}; };
} }
export const azure = new Azure();

View File

@@ -1,38 +1,40 @@
import axios, { AxiosError } from 'axios'; import axios, { AxiosInstance } from 'axios';
import { ChatCompletionRequestMessage } from 'openai'; import { OpenAI } from 'openai';
import { AiEngine } from './Engine'; import { AiEngine, AiEngineConfig } from './Engine';
import { interface FlowiseAiConfig extends AiEngineConfig {}
getConfig
} from '../commands/config';
const config = getConfig();
export class FlowiseAi implements AiEngine { export class FlowiseAi implements AiEngine {
config: FlowiseAiConfig;
client: AxiosInstance;
constructor(config) {
this.config = config;
this.client = axios.create({
url: `${config.baseURL}/${config.apiKey}`,
headers: { 'Content-Type': 'application/json' }
});
}
async generateCommitMessage( async generateCommitMessage(
messages: Array<ChatCompletionRequestMessage> messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | undefined> { ): Promise<string | undefined> {
const gitDiff = (messages[messages.length - 1]?.content as string)
.replace(/\\/g, '\\\\')
.replace(/"/g, '\\"')
.replace(/\n/g, '\\n')
.replace(/\r/g, '\\r')
.replace(/\t/g, '\\t');
const gitDiff = messages[ messages.length - 1 ]?.content?.replace(/\\/g, '\\\\')
.replace(/"/g, '\\"')
.replace(/\n/g, '\\n')
.replace(/\r/g, '\\r')
.replace(/\t/g, '\\t');
const url = `http://${config?.OCO_FLOWISE_ENDPOINT}/api/v1/prediction/${config?.OCO_FLOWISE_API_KEY}`;
const payload = { const payload = {
question : gitDiff, question: gitDiff,
overrideConfig : { overrideConfig: {
systemMessagePrompt: messages[0]?.content, systemMessagePrompt: messages[0]?.content
}, },
history : messages.slice( 1, -1 ) history: messages.slice(1, -1)
} };
try { try {
const response = await axios.post(url, payload, { const response = await this.client.post('', payload);
headers: {
'Content-Type': 'application/json'
}
});
const message = response.data; const message = response.data;
return message?.text; return message?.text;
} catch (err: any) { } catch (err: any) {

View File

@@ -1,54 +1,55 @@
import { ChatCompletionRequestMessage } from 'openai'; import {
import { AiEngine } from './Engine'; Content,
import { Content, GenerativeModel, GoogleGenerativeAI, HarmBlockThreshold, HarmCategory, Part } from '@google/generative-ai'; GoogleGenerativeAI,
import { CONFIG_MODES, ConfigType, DEFAULT_TOKEN_LIMITS, getConfig, MODEL_LIST } from '../commands/config'; HarmBlockThreshold,
import { intro, outro } from '@clack/prompts'; HarmCategory,
import chalk from 'chalk'; Part
} from '@google/generative-ai';
import axios from 'axios'; import axios from 'axios';
import { OpenAI } from 'openai';
import { AiEngine, AiEngineConfig } from './Engine';
interface GeminiConfig extends AiEngineConfig {}
export class Gemini implements AiEngine { export class Gemini implements AiEngine {
config: GeminiConfig;
client: GoogleGenerativeAI;
private readonly config: ConfigType; constructor(config) {
private readonly googleGenerativeAi: GoogleGenerativeAI; this.client = new GoogleGenerativeAI(config.apiKey);
private ai: GenerativeModel; this.config = config;
// vars
private maxTokens = {
input: DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_INPUT,
output: DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT
};
private basePath: string;
private apiKey: string;
private model: string;
constructor() {
this.config = getConfig() as ConfigType;
this.googleGenerativeAi = new GoogleGenerativeAI(this.config.OCO_GEMINI_API_KEY);
this.warmup();
} }
async generateCommitMessage(messages: ChatCompletionRequestMessage[]): Promise<string | undefined> { async generateCommitMessage(
const systemInstruction = messages.filter(m => m.role === 'system') messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
.map(m => m.content) ): Promise<string | undefined> {
const systemInstruction = messages
.filter((m) => m.role === 'system')
.map((m) => m.content)
.join('\n'); .join('\n');
this.ai = this.googleGenerativeAi.getGenerativeModel({ const gemini = this.client.getGenerativeModel({
model: this.model, model: this.config.model,
systemInstruction, systemInstruction
}); });
const contents = messages.filter(m => m.role !== 'system') const contents = messages
.map(m => ({ parts: [{ text: m.content } as Part], role: m.role == 'user' ? m.role : 'model', } as Content)); .filter((m) => m.role !== 'system')
.map(
(m) =>
({
parts: [{ text: m.content } as Part],
role: m.role === 'user' ? m.role : 'model'
} as Content)
);
try { try {
const result = await this.ai.generateContent({ const result = await gemini.generateContent({
contents, contents,
safetySettings: [ safetySettings: [
{ {
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
}, },
{ {
category: HarmCategory.HARM_CATEGORY_HARASSMENT, category: HarmCategory.HARM_CATEGORY_HARASSMENT,
@@ -61,73 +62,27 @@ export class Gemini implements AiEngine {
{ {
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
}, }
], ],
generationConfig: { generationConfig: {
maxOutputTokens: this.maxTokens.output, maxOutputTokens: this.config.maxTokensOutput,
temperature: 0, temperature: 0,
topP: 0.1, topP: 0.1
}, }
}); });
return result.response.text(); return result.response.text();
} catch (error) { } catch (error) {
const err = error as Error; const err = error as Error;
outro(`${chalk.red('✖')} ${err?.message || err}`);
if ( if (
axios.isAxiosError<{ error?: { message: string } }>(error) && axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401 error.response?.status === 401
) { ) {
const geminiError = error.response.data.error; const geminiError = error.response.data.error;
if (geminiError) throw new Error(geminiError?.message);
if (geminiError?.message) outro(geminiError.message);
outro(
'For help look into README https://github.com/di-sukharev/opencommit#setup'
);
} }
throw err; throw err;
} }
} }
private warmup(): void {
if (this.config.OCO_TOKENS_MAX_INPUT !== undefined) this.maxTokens.input = this.config.OCO_TOKENS_MAX_INPUT;
if (this.config.OCO_TOKENS_MAX_OUTPUT !== undefined) this.maxTokens.output = this.config.OCO_TOKENS_MAX_OUTPUT;
this.basePath = this.config.OCO_GEMINI_BASE_PATH;
this.apiKey = this.config.OCO_GEMINI_API_KEY;
const [command, mode] = process.argv.slice(2);
const provider = this.config.OCO_AI_PROVIDER;
if (provider === 'gemini' && !this.apiKey &&
command !== 'config' && mode !== 'set') {
intro('opencommit');
outro('OCO_GEMINI_API_KEY is not set, please run `oco config set OCO_GEMINI_API_KEY=<your token> . If you are using GPT, make sure you add payment details, so API works.');
outro(
'For help look into README https://github.com/di-sukharev/opencommit#setup'
);
process.exit(1);
}
this.model = this.config.OCO_MODEL || MODEL_LIST.gemini[0];
if (provider === 'gemini' &&
!MODEL_LIST.gemini.includes(this.model) &&
command !== 'config' &&
mode !== CONFIG_MODES.set) {
outro(
`${chalk.red('✖')} Unsupported model ${this.model} for Gemini. Supported models are: ${MODEL_LIST.gemini.join(
', '
)}`
);
process.exit(1);
}
}
} }

View File

@@ -1,52 +1,41 @@
import axios, { AxiosError } from 'axios'; import axios, { AxiosInstance } from 'axios';
import { ChatCompletionRequestMessage } from 'openai'; import { OpenAI } from 'openai';
import { AiEngine } from './Engine'; import { AiEngine, AiEngineConfig } from './Engine';
import { interface OllamaConfig extends AiEngineConfig {}
getConfig
} from '../commands/config';
const config = getConfig();
export class OllamaAi implements AiEngine { export class OllamaAi implements AiEngine {
private model = "mistral"; // as default model of Ollama config: OllamaConfig;
private url = "http://localhost:11434/api/chat"; // default URL of Ollama API client: AxiosInstance;
setModel(model: string) { constructor(config) {
this.model = model ?? config?.OCO_MODEL ?? 'mistral'; this.config = config;
this.client = axios.create({
url: config.baseURL
? `${config.baseURL}/${config.apiKey}`
: 'http://localhost:11434/api/chat',
headers: { 'Content-Type': 'application/json' }
});
} }
setUrl(url: string) {
this.url = url ?? config?.OCO_OLLAMA_API_URL ?? 'http://localhost:11434/api/chat';
}
async generateCommitMessage( async generateCommitMessage(
messages: Array<ChatCompletionRequestMessage> messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | undefined> { ): Promise<string | undefined> {
const model = this.model; const params = {
model: this.config.model ?? 'mistral',
//console.log(messages);
//process.exit()
const url = this.url;
const p = {
model,
messages, messages,
options: { temperature: 0, top_p: 0.1 }, options: { temperature: 0, top_p: 0.1 },
stream: false stream: false
}; };
try { try {
const response = await axios.post(url, p, { const response = await this.client.post('', params);
headers: {
'Content-Type': 'application/json'
}
});
const message = response.data.message; const message = response.data.message;
return message?.content; return message?.content;
} catch (err: any) { } catch (err: any) {
const message = err.response?.data?.error ?? err.message; const message = err.response?.data?.error ?? err.message;
throw new Error('local model issues. details: ' + message); throw new Error(`Ollama provider error: ${message}`);
} }
} }
} }

View File

@@ -1,127 +1,59 @@
import axios from 'axios'; import axios from 'axios';
import chalk from 'chalk'; import { OpenAI } from 'openai';
import { execa } from 'execa';
import {
ChatCompletionRequestMessage,
Configuration as OpenAiApiConfiguration,
OpenAIApi
} from 'openai';
import { intro, outro } from '@clack/prompts';
import {
CONFIG_MODES,
DEFAULT_TOKEN_LIMITS,
getConfig
} from '../commands/config';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff'; import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { tokenCount } from '../utils/tokenCount'; import { tokenCount } from '../utils/tokenCount';
import { AiEngine } from './Engine'; import { AiEngine, AiEngineConfig } from './Engine';
import { MODEL_LIST } from '../commands/config';
const config = getConfig(); interface OpenAiConfig extends AiEngineConfig {}
const MAX_TOKENS_OUTPUT = export class OpenAiEngine implements AiEngine {
config?.OCO_TOKENS_MAX_OUTPUT || config: OpenAiConfig;
DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT; client: OpenAI;
const MAX_TOKENS_INPUT =
config?.OCO_TOKENS_MAX_INPUT || DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_INPUT;
let basePath = config?.OCO_OPENAI_BASE_PATH;
let apiKey = config?.OCO_OPENAI_API_KEY;
const [command, mode] = process.argv.slice(2); constructor(config: OpenAiConfig) {
this.config = config;
const provider = config?.OCO_AI_PROVIDER; this.client = new OpenAI({ apiKey: config.apiKey });
if (
provider === 'openai' &&
!apiKey &&
command !== 'config' &&
mode !== CONFIG_MODES.set
) {
intro('opencommit');
outro(
'OCO_OPENAI_API_KEY is not set, please run `oco config set OCO_OPENAI_API_KEY=<your token> . If you are using GPT, make sure you add payment details, so API works.`'
);
outro(
'For help look into README https://github.com/di-sukharev/opencommit#setup'
);
process.exit(1);
}
const MODEL = config?.OCO_MODEL || 'gpt-3.5-turbo';
if (provider === 'openai' &&
typeof MODEL !== 'string' &&
command !== 'config' &&
mode !== CONFIG_MODES.set) {
outro(
`${chalk.red('✖')} Unsupported model ${MODEL}. The model can be any string, but the current configuration is not supported.`
);
process.exit(1);
}
export class OpenAi implements AiEngine {
private openAiApiConfiguration = new OpenAiApiConfiguration({
apiKey: apiKey
});
private openAI!: OpenAIApi;
constructor() {
if (basePath) {
this.openAiApiConfiguration.basePath = basePath;
}
this.openAI = new OpenAIApi(this.openAiApiConfiguration);
} }
public generateCommitMessage = async ( public generateCommitMessage = async (
messages: Array<ChatCompletionRequestMessage> messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | undefined> => { ): Promise<string | null> => {
const params = { const params = {
model: MODEL, model: this.config.model,
messages, messages,
temperature: 0, temperature: 0,
top_p: 0.1, top_p: 0.1,
max_tokens: MAX_TOKENS_OUTPUT max_tokens: this.config.maxTokensOutput
}; };
try { try {
const REQUEST_TOKENS = messages const REQUEST_TOKENS = messages
.map((msg) => tokenCount(msg.content as string) + 4) .map((msg) => tokenCount(msg.content as string) + 4)
.reduce((a, b) => a + b, 0); .reduce((a, b) => a + b, 0);
if (REQUEST_TOKENS > MAX_TOKENS_INPUT - MAX_TOKENS_OUTPUT) { if (
REQUEST_TOKENS >
this.config.maxTokensInput - this.config.maxTokensOutput
)
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens); throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
}
const { data } = await this.openAI.createChatCompletion(params); const completion = await this.client.chat.completions.create(params);
const message = data.choices[0].message; const message = completion.choices[0].message;
return message?.content; return message?.content;
} catch (error) { } catch (error) {
outro(`${chalk.red('✖')} ${JSON.stringify(params)}`);
const err = error as Error; const err = error as Error;
outro(`${chalk.red('✖')} ${err?.message || err}`);
if ( if (
axios.isAxiosError<{ error?: { message: string } }>(error) && axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401 error.response?.status === 401
) { ) {
const openAiError = error.response.data.error; const openAiError = error.response.data.error;
if (openAiError?.message) outro(openAiError.message); if (openAiError) throw new Error(openAiError.message);
outro(
'For help look into README https://github.com/di-sukharev/opencommit#setup'
);
} }
throw err; throw err;
} }
}; };
} }

View File

@@ -1,31 +1,47 @@
import { ChatCompletionRequestMessage } from 'openai'; import { OpenAI } from 'openai';
import { AiEngine } from './Engine'; import { AiEngine } from './Engine';
import { getConfig } from '../commands/config';
export const TEST_MOCK_TYPES = [ export const TEST_MOCK_TYPES = [
'commit-message', 'commit-message',
'prompt-module-commitlint-config', 'prompt-module-commitlint-config'
] as const ] as const;
type TestMockType = typeof TEST_MOCK_TYPES[number];
export type TestMockType = (typeof TEST_MOCK_TYPES)[number];
type TestAiEngine = Partial<AiEngine> & {
mockType: TestMockType;
};
export class TestAi implements TestAiEngine {
mockType: TestMockType;
// those are not used in the test engine
config: any;
client: any;
// ---
constructor(mockType: TestMockType) {
this.mockType = mockType;
}
export class TestAi implements AiEngine {
async generateCommitMessage( async generateCommitMessage(
_messages: Array<ChatCompletionRequestMessage> _messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | undefined> { ): Promise<string | undefined> {
const config = getConfig(); switch (this.mockType) {
switch (config?.OCO_TEST_MOCK_TYPE as TestMockType | undefined) {
case 'commit-message': case 'commit-message':
return 'fix(testAi.ts): test commit message'; return 'fix(testAi.ts): test commit message';
case 'prompt-module-commitlint-config': case 'prompt-module-commitlint-config':
return `{\n` + return (
`{\n` +
` "localLanguage": "english",\n` + ` "localLanguage": "english",\n` +
` "commitFix": "fix(server): Change 'port' variable to uppercase 'PORT'",\n` + ` "commitFix": "fix(server): Change 'port' variable to uppercase 'PORT'",\n` +
` "commitFeat": "feat(server): Allow server to listen on a port specified through environment variable",\n` + ` "commitFeat": "feat(server): Allow server to listen on a port specified through environment variable",\n` +
` "commitDescription": "Change 'port' variable to uppercase 'PORT'. Allow server to listen on a port specified through environment variable."\n` + ` "commitDescription": "Change 'port' variable to uppercase 'PORT'. Allow server to listen on a port specified through environment variable."\n` +
`}` `}`
);
default: default:
throw Error('unsupported test mock type') throw Error('unsupported test mock type');
} }
} }
} }

View File

@@ -1,31 +1,27 @@
import { import { OpenAI } from 'openai';
ChatCompletionRequestMessage,
ChatCompletionRequestMessageRoleEnum
} from 'openai';
import { DEFAULT_TOKEN_LIMITS, getConfig } from './commands/config'; import { DEFAULT_TOKEN_LIMITS, getConfig } from './commands/config';
import { getMainCommitPrompt } from './prompts'; import { getMainCommitPrompt } from './prompts';
import { getEngine } from './utils/engine';
import { mergeDiffs } from './utils/mergeDiffs'; import { mergeDiffs } from './utils/mergeDiffs';
import { tokenCount } from './utils/tokenCount'; import { tokenCount } from './utils/tokenCount';
import { getEngine } from './utils/engine';
const config = getConfig(); const config = getConfig();
const MAX_TOKENS_INPUT = const MAX_TOKENS_INPUT =
config?.OCO_TOKENS_MAX_INPUT || DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_INPUT; config.OCO_TOKENS_MAX_INPUT || DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_INPUT;
const MAX_TOKENS_OUTPUT = const MAX_TOKENS_OUTPUT =
config?.OCO_TOKENS_MAX_OUTPUT || config.OCO_TOKENS_MAX_OUTPUT ||
DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT; DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT;
const generateCommitMessageChatCompletionPrompt = async ( const generateCommitMessageChatCompletionPrompt = async (
diff: string, diff: string,
fullGitMojiSpec: boolean fullGitMojiSpec: boolean
): Promise<Array<ChatCompletionRequestMessage>> => { ): Promise<Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>> => {
const INIT_MESSAGES_PROMPT = await getMainCommitPrompt(fullGitMojiSpec); const INIT_MESSAGES_PROMPT = await getMainCommitPrompt(fullGitMojiSpec);
const chatContextAsCompletionRequest = [...INIT_MESSAGES_PROMPT]; const chatContextAsCompletionRequest = [...INIT_MESSAGES_PROMPT];
chatContextAsCompletionRequest.push({ chatContextAsCompletionRequest.push({
role: ChatCompletionRequestMessageRoleEnum.User, role: 'user',
content: diff content: diff
}); });
@@ -43,7 +39,7 @@ const ADJUSTMENT_FACTOR = 20;
export const generateCommitMessageByDiff = async ( export const generateCommitMessageByDiff = async (
diff: string, diff: string,
fullGitMojiSpec: boolean fullGitMojiSpec: boolean = false
): Promise<string> => { ): Promise<string> => {
try { try {
const INIT_MESSAGES_PROMPT = await getMainCommitPrompt(fullGitMojiSpec); const INIT_MESSAGES_PROMPT = await getMainCommitPrompt(fullGitMojiSpec);
@@ -181,7 +177,7 @@ export const getCommitMsgsPromisesFromFileDiffs = async (
// merge multiple files-diffs into 1 prompt to save tokens // merge multiple files-diffs into 1 prompt to save tokens
const mergedFilesDiffs = mergeDiffs(diffByFiles, maxDiffLength); const mergedFilesDiffs = mergeDiffs(diffByFiles, maxDiffLength);
const commitMessagePromises = [] as Promise<string | undefined>[]; const commitMessagePromises = [] as Promise<string | null | undefined>[];
for (const fileDiff of mergedFilesDiffs) { for (const fileDiff of mergedFilesDiffs) {
if (tokenCount(fileDiff) >= maxDiffLength) { if (tokenCount(fileDiff) >= maxDiffLength) {

View File

@@ -1,11 +1,9 @@
import { unlinkSync, writeFileSync } from 'fs';
import core from '@actions/core'; import core from '@actions/core';
import exec from '@actions/exec'; import exec from '@actions/exec';
import github from '@actions/github'; import github from '@actions/github';
import { intro, outro } from '@clack/prompts'; import { intro, outro } from '@clack/prompts';
import { PushEvent } from '@octokit/webhooks-types'; import { PushEvent } from '@octokit/webhooks-types';
import { unlinkSync, writeFileSync } from 'fs';
import { generateCommitMessageByDiff } from './generateCommitMessageFromGitDiff'; import { generateCommitMessageByDiff } from './generateCommitMessageFromGitDiff';
import { randomIntFromInterval } from './utils/randomIntFromInterval'; import { randomIntFromInterval } from './utils/randomIntFromInterval';
import { sleep } from './utils/sleep'; import { sleep } from './utils/sleep';
@@ -54,7 +52,7 @@ async function improveMessagesInChunks(diffsAndSHAs: DiffAndSHA[]) {
const chunkSize = diffsAndSHAs!.length % 2 === 0 ? 4 : 3; const chunkSize = diffsAndSHAs!.length % 2 === 0 ? 4 : 3;
outro(`Improving commit messages in chunks of ${chunkSize}.`); outro(`Improving commit messages in chunks of ${chunkSize}.`);
const improvePromises = diffsAndSHAs!.map((commit) => const improvePromises = diffsAndSHAs!.map((commit) =>
generateCommitMessageByDiff(commit.diff) generateCommitMessageByDiff(commit.diff, false)
); );
let improvedMessagesAndSHAs: MsgAndSHA[] = []; let improvedMessagesAndSHAs: MsgAndSHA[] = [];

View File

@@ -2,16 +2,16 @@ import { spinner } from '@clack/prompts';
import { getConfig } from '../../commands/config'; import { getConfig } from '../../commands/config';
import { i18n, I18nLocals } from '../../i18n'; import { i18n, I18nLocals } from '../../i18n';
import { getEngine } from '../../utils/engine';
import { COMMITLINT_LLM_CONFIG_PATH } from './constants'; import { COMMITLINT_LLM_CONFIG_PATH } from './constants';
import { computeHash } from './crypto'; import { computeHash } from './crypto';
import { commitlintPrompts, inferPromptsFromCommitlintConfig } from './prompts'; import { commitlintPrompts, inferPromptsFromCommitlintConfig } from './prompts';
import { getCommitLintPWDConfig } from './pwd-commitlint'; import { getCommitLintPWDConfig } from './pwd-commitlint';
import { CommitlintLLMConfig } from './types'; import { CommitlintLLMConfig } from './types';
import * as utils from './utils'; import * as utils from './utils';
import { getEngine } from '../../utils/engine';
const config = getConfig(); const config = getConfig();
const translation = i18n[(config?.OCO_LANGUAGE as I18nLocals) || 'en']; const translation = i18n[(config.OCO_LANGUAGE as I18nLocals) || 'en'];
export const configureCommitlintIntegration = async (force = false) => { export const configureCommitlintIntegration = async (force = false) => {
const spin = spinner(); const spin = spinner();
@@ -26,7 +26,7 @@ export const configureCommitlintIntegration = async (force = false) => {
* @commitlint >= 9.0.0 is installed in the local directory. * @commitlint >= 9.0.0 is installed in the local directory.
* 'node_modules/@commitlint/load' package exists. * 'node_modules/@commitlint/load' package exists.
* A valid @commitlint configuration exists. * A valid @commitlint configuration exists.
`, `
); );
} }

View File

@@ -1,8 +1,5 @@
import chalk from 'chalk'; import chalk from 'chalk';
import { import { OpenAI } from 'openai';
ChatCompletionRequestMessage,
ChatCompletionRequestMessageRoleEnum
} from 'openai';
import { outro } from '@clack/prompts'; import { outro } from '@clack/prompts';
import { import {
@@ -17,7 +14,7 @@ import { i18n, I18nLocals } from '../../i18n';
import { IDENTITY, INIT_DIFF_PROMPT } from '../../prompts'; import { IDENTITY, INIT_DIFF_PROMPT } from '../../prompts';
const config = getConfig(); const config = getConfig();
const translation = i18n[(config?.OCO_LANGUAGE as I18nLocals) || 'en']; const translation = i18n[(config.OCO_LANGUAGE as I18nLocals) || 'en'];
type DeepPartial<T> = { type DeepPartial<T> = {
[P in keyof T]?: { [P in keyof T]?: {
@@ -214,10 +211,9 @@ const STRUCTURE_OF_COMMIT = `
// Prompt to generate LLM-readable rules based on @commitlint rules. // Prompt to generate LLM-readable rules based on @commitlint rules.
const GEN_COMMITLINT_CONSISTENCY_PROMPT = ( const GEN_COMMITLINT_CONSISTENCY_PROMPT = (
prompts: string[] prompts: string[]
): ChatCompletionRequestMessage[] => [ ): OpenAI.Chat.Completions.ChatCompletionMessageParam[] => [
{ {
role: ChatCompletionRequestMessageRoleEnum.Assistant, role: 'system',
// prettier-ignore
content: `${IDENTITY} Your mission is to create clean and comprehensive commit messages for two different changes in a single codebase and output them in the provided JSON format: one for a bug fix and another for a new feature. content: `${IDENTITY} Your mission is to create clean and comprehensive commit messages for two different changes in a single codebase and output them in the provided JSON format: one for a bug fix and another for a new feature.
Here are the specific requirements and conventions that should be strictly followed: Here are the specific requirements and conventions that should be strictly followed:
@@ -260,22 +256,31 @@ Example Git Diff is to follow:`
const INIT_MAIN_PROMPT = ( const INIT_MAIN_PROMPT = (
language: string, language: string,
prompts: string[] prompts: string[]
): ChatCompletionRequestMessage => ({ ): OpenAI.Chat.Completions.ChatCompletionMessageParam => ({
role: ChatCompletionRequestMessageRoleEnum.System, role: 'system',
// prettier-ignore
content: `${IDENTITY} Your mission is to create clean and comprehensive commit messages in the given @commitlint convention and explain WHAT were the changes and WHY the changes were done. I'll send you an output of 'git diff --staged' command, and you convert it into a commit message. content: `${IDENTITY} Your mission is to create clean and comprehensive commit messages in the given @commitlint convention and explain WHAT were the changes and WHY the changes were done. I'll send you an output of 'git diff --staged' command, and you convert it into a commit message.
${config?.OCO_EMOJI ? 'Use GitMoji convention to preface the commit.' : 'Do not preface the commit with anything.'} ${
${config?.OCO_DESCRIPTION ? 'Add a short description of WHY the changes are done after the commit message. Don\'t start it with "This commit", just describe the changes.' : "Don't add any descriptions to the commit, only commit message."} config.OCO_EMOJI
? 'Use GitMoji convention to preface the commit.'
: 'Do not preface the commit with anything.'
}
${
config.OCO_DESCRIPTION
? 'Add a short description of WHY the changes are done after the commit message. Don\'t start it with "This commit", just describe the changes.'
: "Don't add any descriptions to the commit, only commit message."
}
Use the present tense. Use ${language} to answer. Use the present tense. Use ${language} to answer.
${ config?.OCO_ONE_LINE_COMMIT ? 'Craft a concise commit message that encapsulates all changes made, with an emphasis on the primary updates. If the modifications share a common theme or scope, mention it succinctly; otherwise, leave the scope out to maintain focus. The goal is to provide a clear and unified overview of the changes in a one single message, without diverging into a list of commit per file change.' : ""} ${
config.OCO_ONE_LINE_COMMIT
? 'Craft a concise commit message that encapsulates all changes made, with an emphasis on the primary updates. If the modifications share a common theme or scope, mention it succinctly; otherwise, leave the scope out to maintain focus. The goal is to provide a clear and unified overview of the changes in a one single message, without diverging into a list of commit per file change.'
: ''
}
You will strictly follow the following conventions to generate the content of the commit message: You will strictly follow the following conventions to generate the content of the commit message:
- ${prompts.join('\n- ')} - ${prompts.join('\n- ')}
The conventions refers to the following structure of commit message: The conventions refers to the following structure of commit message:
${STRUCTURE_OF_COMMIT} ${STRUCTURE_OF_COMMIT}`
`
}); });
export const commitlintPrompts = { export const commitlintPrompts = {

View File

@@ -1,10 +1,5 @@
import {
ChatCompletionRequestMessage,
ChatCompletionRequestMessageRoleEnum
} from 'openai';
import { note } from '@clack/prompts'; import { note } from '@clack/prompts';
import { OpenAI } from 'openai';
import { getConfig } from './commands/config'; import { getConfig } from './commands/config';
import { i18n, I18nLocals } from './i18n'; import { i18n, I18nLocals } from './i18n';
import { configureCommitlintIntegration } from './modules/commitlint/config'; import { configureCommitlintIntegration } from './modules/commitlint/config';
@@ -14,118 +9,133 @@ import * as utils from './modules/commitlint/utils';
import { removeConventionalCommitWord } from './utils/removeConventionalCommitWord'; import { removeConventionalCommitWord } from './utils/removeConventionalCommitWord';
const config = getConfig(); const config = getConfig();
const translation = i18n[(config?.OCO_LANGUAGE as I18nLocals) || 'en']; const translation = i18n[(config.OCO_LANGUAGE as I18nLocals) || 'en'];
export const IDENTITY = export const IDENTITY =
'You are to act as the author of a commit message in git.'; 'You are to act as an author of a commit message in git.';
const GITMOJI_HELP = `Use GitMoji convention to preface the commit. Here are some help to choose the right emoji (emoji, description):
🐛, Fix a bug;
✨, Introduce new features;
📝, Add or update documentation;
🚀, Deploy stuff;
✅, Add, update, or pass tests;
♻️, Refactor code;
⬆️, Upgrade dependencies;
🔧, Add or update configuration files;
🌐, Internationalization and localization;
💡, Add or update comments in source code;`;
const FULL_GITMOJI_SPEC = `${GITMOJI_HELP}
🎨, Improve structure / format of the code;
⚡️, Improve performance;
🔥, Remove code or files;
🚑️, Critical hotfix;
💄, Add or update the UI and style files;
🎉, Begin a project;
🔒️, Fix security issues;
🔐, Add or update secrets;
🔖, Release / Version tags;
🚨, Fix compiler / linter warnings;
🚧, Work in progress;
💚, Fix CI Build;
⬇️, Downgrade dependencies;
📌, Pin dependencies to specific versions;
👷, Add or update CI build system;
📈, Add or update analytics or track code;
, Add a dependency;
, Remove a dependency;
🔨, Add or update development scripts;
✏️, Fix typos;
💩, Write bad code that needs to be improved;
⏪️, Revert changes;
🔀, Merge branches;
📦️, Add or update compiled files or packages;
👽️, Update code due to external API changes;
🚚, Move or rename resources (e.g.: files, paths, routes);
📄, Add or update license;
💥, Introduce breaking changes;
🍱, Add or update assets;
♿️, Improve accessibility;
🍻, Write code drunkenly;
💬, Add or update text and literals;
🗃️, Perform database related changes;
🔊, Add or update logs;
🔇, Remove logs;
👥, Add or update contributor(s);
🚸, Improve user experience / usability;
🏗️, Make architectural changes;
📱, Work on responsive design;
🤡, Mock things;
🥚, Add or update an easter egg;
🙈, Add or update a .gitignore file;
📸, Add or update snapshots;
⚗️, Perform experiments;
🔍️, Improve SEO;
🏷️, Add or update types;
🌱, Add or update seed files;
🚩, Add, update, or remove feature flags;
🥅, Catch errors;
💫, Add or update animations and transitions;
🗑️, Deprecate code that needs to be cleaned up;
🛂, Work on code related to authorization, roles and permissions;
🩹, Simple fix for a non-critical issue;
🧐, Data exploration/inspection;
⚰️, Remove dead code;
🧪, Add a failing test;
👔, Add or update business logic;
🩺, Add or update healthcheck;
🧱, Infrastructure related changes;
🧑‍💻, Improve developer experience;
💸, Add sponsorships or money related infrastructure;
🧵, Add or update code related to multithreading or concurrency;
🦺, Add or update code related to validation.`;
const CONVENTIONAL_COMMIT_KEYWORDS =
'Do not preface the commit with anything, except for the conventional commit keywords: fix, feat, build, chore, ci, docs, style, refactor, perf, test.';
const getCommitConvention = (fullGitMojiSpec: boolean) =>
config.OCO_EMOJI
? fullGitMojiSpec
? FULL_GITMOJI_SPEC
: GITMOJI_HELP
: CONVENTIONAL_COMMIT_KEYWORDS;
const getDescriptionInstruction = () =>
config.OCO_DESCRIPTION
? 'Add a short description of WHY the changes are done after the commit message. Don\'t start it with "This commit", just describe the changes.'
: "Don't add any descriptions to the commit, only commit message.";
const getOneLineCommitInstruction = () =>
config.OCO_ONE_LINE_COMMIT
? 'Craft a concise commit message that encapsulates all changes made, with an emphasis on the primary updates. If the modifications share a common theme or scope, mention it succinctly; otherwise, leave the scope out to maintain focus. The goal is to provide a clear and unified overview of the changes in a one single message, without diverging into a list of commit per file change.'
: '';
const INIT_MAIN_PROMPT = ( const INIT_MAIN_PROMPT = (
language: string, language: string,
fullGitMojiSpec: boolean fullGitMojiSpec: boolean
): ChatCompletionRequestMessage => ({ ): OpenAI.Chat.Completions.ChatCompletionMessageParam => ({
role: ChatCompletionRequestMessageRoleEnum.System, role: 'system',
content: `${IDENTITY} Your mission is to create clean and comprehensive commit messages as per the ${ content: (() => {
fullGitMojiSpec ? 'GitMoji specification' : 'conventional commit convention' const commitConvention = fullGitMojiSpec
} and explain WHAT were the changes and mainly WHY the changes were done. I'll send you an output of 'git diff --staged' command, and you are to convert it into a commit message. ? 'GitMoji specification'
${ : 'Conventional Commit Convention';
config?.OCO_EMOJI const missionStatement = `${IDENTITY} Your mission is to create clean and comprehensive commit messages as per the ${commitConvention} and explain WHAT were the changes and mainly WHY the changes were done.`;
? 'Use GitMoji convention to preface the commit. Here are some help to choose the right emoji (emoji, description): ' + const diffInstruction =
'🐛, Fix a bug; ' + "I'll send you an output of 'git diff --staged' command, and you are to convert it into a commit message.";
'✨, Introduce new features; ' + const conventionGuidelines = getCommitConvention(fullGitMojiSpec);
'📝, Add or update documentation; ' + const descriptionGuideline = getDescriptionInstruction();
'🚀, Deploy stuff; ' + const oneLineCommitGuideline = getOneLineCommitInstruction();
'✅, Add, update, or pass tests; ' + const generalGuidelines = `Use the present tense. Lines must not be longer than 74 characters. Use ${language} for the commit message.`;
'♻️, Refactor code; ' +
'⬆️, Upgrade dependencies; ' + return `${missionStatement}\n${diffInstruction}\n${conventionGuidelines}\n${descriptionGuideline}\n${oneLineCommitGuideline}\n${generalGuidelines}`;
'🔧, Add or update configuration files; ' + })()
'🌐, Internationalization and localization; ' +
'💡, Add or update comments in source code; ' +
`${
fullGitMojiSpec
? '🎨, Improve structure / format of the code; ' +
'⚡️, Improve performance; ' +
'🔥, Remove code or files; ' +
'🚑️, Critical hotfix; ' +
'💄, Add or update the UI and style files; ' +
'🎉, Begin a project; ' +
'🔒️, Fix security issues; ' +
'🔐, Add or update secrets; ' +
'🔖, Release / Version tags; ' +
'🚨, Fix compiler / linter warnings; ' +
'🚧, Work in progress; ' +
'💚, Fix CI Build; ' +
'⬇️, Downgrade dependencies; ' +
'📌, Pin dependencies to specific versions; ' +
'👷, Add or update CI build system; ' +
'📈, Add or update analytics or track code; ' +
', Add a dependency; ' +
', Remove a dependency; ' +
'🔨, Add or update development scripts; ' +
'✏️, Fix typos; ' +
'💩, Write bad code that needs to be improved; ' +
'⏪️, Revert changes; ' +
'🔀, Merge branches; ' +
'📦️, Add or update compiled files or packages; ' +
'👽️, Update code due to external API changes; ' +
'🚚, Move or rename resources (e.g.: files, paths, routes); ' +
'📄, Add or update license; ' +
'💥, Introduce breaking changes; ' +
'🍱, Add or update assets; ' +
'♿️, Improve accessibility; ' +
'🍻, Write code drunkenly; ' +
'💬, Add or update text and literals; ' +
'🗃️, Perform database related changes; ' +
'🔊, Add or update logs; ' +
'🔇, Remove logs; ' +
'👥, Add or update contributor(s); ' +
'🚸, Improve user experience / usability; ' +
'🏗️, Make architectural changes; ' +
'📱, Work on responsive design; ' +
'🤡, Mock things; ' +
'🥚, Add or update an easter egg; ' +
'🙈, Add or update a .gitignore file; ' +
'📸, Add or update snapshots; ' +
'⚗️, Perform experiments; ' +
'🔍️, Improve SEO; ' +
'🏷️, Add or update types; ' +
'🌱, Add or update seed files; ' +
'🚩, Add, update, or remove feature flags; ' +
'🥅, Catch errors; ' +
'💫, Add or update animations and transitions; ' +
'🗑️, Deprecate code that needs to be cleaned up; ' +
'🛂, Work on code related to authorization, roles and permissions; ' +
'🩹, Simple fix for a non-critical issue; ' +
'🧐, Data exploration/inspection; ' +
'⚰️, Remove dead code; ' +
'🧪, Add a failing test; ' +
'👔, Add or update business logic; ' +
'🩺, Add or update healthcheck; ' +
'🧱, Infrastructure related changes; ' +
'🧑‍💻, Improve developer experience; ' +
'💸, Add sponsorships or money related infrastructure; ' +
'🧵, Add or update code related to multithreading or concurrency; ' +
'🦺, Add or update code related to validation.'
: ''
}`
: 'Do not preface the commit with anything. Conventional commit keywords:' +
'fix, feat, build, chore, ci, docs, style, refactor, perf, test.'
}
${
config?.OCO_DESCRIPTION
? 'Add a short description of WHY the changes are done after the commit message. Don\'t start it with "This commit", just describe the changes.'
: "Don't add any descriptions to the commit, only commit message."
}
${
config?.OCO_ONE_LINE_COMMIT
? 'Craft a concise commit message that encapsulates all changes made, with an emphasis on the primary updates. If the modifications share a common theme or scope, mention it succinctly; otherwise, leave the scope out to maintain focus. The goal is to provide a clear and unified overview of the changes in a one single message, without diverging into a list of commit per file change.'
: ''
}
Use the present tense. Lines must not be longer than 74 characters. Use ${language} for the commit message.`
}); });
export const INIT_DIFF_PROMPT: ChatCompletionRequestMessage = { export const INIT_DIFF_PROMPT: OpenAI.Chat.Completions.ChatCompletionMessageParam =
role: ChatCompletionRequestMessageRoleEnum.User, {
content: `diff --git a/src/server.ts b/src/server.ts role: 'user',
content: `diff --git a/src/server.ts b/src/server.ts
index ad4db42..f3b18a9 100644 index ad4db42..f3b18a9 100644
--- a/src/server.ts --- a/src/server.ts
+++ b/src/server.ts +++ b/src/server.ts
@@ -149,29 +159,35 @@ export const INIT_DIFF_PROMPT: ChatCompletionRequestMessage = {
+app.listen(process.env.PORT || PORT, () => { +app.listen(process.env.PORT || PORT, () => {
+ console.log(\`Server listening on port \${PORT}\`); + console.log(\`Server listening on port \${PORT}\`);
});` });`
};
const getContent = (translation: ConsistencyPrompt) => {
const fix = config.OCO_EMOJI
? `🐛 ${removeConventionalCommitWord(translation.commitFix)}`
: translation.commitFix;
const feat = config.OCO_EMOJI
? `${removeConventionalCommitWord(translation.commitFeat)}`
: translation.commitFeat;
const description = config.OCO_DESCRIPTION
? translation.commitDescription
: '';
return `${fix}\n${feat}\n${description}`;
}; };
const INIT_CONSISTENCY_PROMPT = ( const INIT_CONSISTENCY_PROMPT = (
translation: ConsistencyPrompt translation: ConsistencyPrompt
): ChatCompletionRequestMessage => ({ ): OpenAI.Chat.Completions.ChatCompletionMessageParam => ({
role: ChatCompletionRequestMessageRoleEnum.Assistant, role: 'assistant',
content: `${ content: getContent(translation)
config?.OCO_EMOJI
? `🐛 ${removeConventionalCommitWord(translation.commitFix)}`
: translation.commitFix
}
${
config?.OCO_EMOJI
? `${removeConventionalCommitWord(translation.commitFeat)}`
: translation.commitFeat
}
${config?.OCO_DESCRIPTION ? translation.commitDescription : ''}`
}); });
export const getMainCommitPrompt = async ( export const getMainCommitPrompt = async (
fullGitMojiSpec: boolean fullGitMojiSpec: boolean
): Promise<ChatCompletionRequestMessage[]> => { ): Promise<Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>> => {
switch (config?.OCO_PROMPT_MODULE) { switch (config.OCO_PROMPT_MODULE) {
case '@commitlint': case '@commitlint':
if (!(await utils.commitlintLLMConfigExists())) { if (!(await utils.commitlintLLMConfigExists())) {
note( note(
@@ -197,7 +213,6 @@ export const getMainCommitPrompt = async (
]; ];
default: default:
// conventional-commit
return [ return [
INIT_MAIN_PROMPT(translation.localLanguage, fullGitMojiSpec), INIT_MAIN_PROMPT(translation.localLanguage, fullGitMojiSpec),
INIT_DIFF_PROMPT, INIT_DIFF_PROMPT,

View File

@@ -1,37 +1,65 @@
import { getConfig, OCO_AI_PROVIDER_ENUM } from '../commands/config';
import { AnthropicEngine } from '../engine/anthropic';
import { AzureEngine } from '../engine/azure';
import { AiEngine } from '../engine/Engine'; import { AiEngine } from '../engine/Engine';
import { OpenAi } from '../engine/openAi'; import { FlowiseAi } from '../engine/flowise';
import { Gemini } from '../engine/gemini'; import { Gemini } from '../engine/gemini';
import { getConfig } from '../commands/config';
import { OllamaAi } from '../engine/ollama'; import { OllamaAi } from '../engine/ollama';
import { AnthropicAi } from '../engine/anthropic' import { OpenAiEngine } from '../engine/openAi';
import { TestAi } from '../engine/testAi'; import { TestAi, TestMockType } from '../engine/testAi';
import { Azure } from '../engine/azure';
import { FlowiseAi } from '../engine/flowise'
export function getEngine(): AiEngine { export function getEngine(): AiEngine {
const config = getConfig(); const config = getConfig();
const provider = config?.OCO_AI_PROVIDER; const provider = config.OCO_AI_PROVIDER;
if (provider?.startsWith('ollama')) { const DEFAULT_CONFIG = {
const ollamaAi = new OllamaAi(); model: config.OCO_MODEL!,
const model = provider.substring('ollama/'.length); maxTokensOutput: config.OCO_TOKENS_MAX_OUTPUT!,
if (model) { maxTokensInput: config.OCO_TOKENS_MAX_INPUT!,
ollamaAi.setModel(model); baseURL: config.OCO_OPENAI_BASE_PATH!
ollamaAi.setUrl(config?.OCO_OLLAMA_API_URL); };
}
return ollamaAi; switch (provider) {
} else if (provider == 'anthropic') { case OCO_AI_PROVIDER_ENUM.OLLAMA:
return new AnthropicAi(); return new OllamaAi({
} else if (provider == 'test') { ...DEFAULT_CONFIG,
return new TestAi(); apiKey: '',
} else if (provider == 'gemini') { baseURL: config.OCO_OLLAMA_API_URL!
return new Gemini(); });
} else if (provider == 'azure') {
return new Azure(); case OCO_AI_PROVIDER_ENUM.ANTHROPIC:
} else if( provider == 'flowise'){ return new AnthropicEngine({
return new FlowiseAi(); ...DEFAULT_CONFIG,
apiKey: config.OCO_ANTHROPIC_API_KEY!
});
case OCO_AI_PROVIDER_ENUM.TEST:
return new TestAi(config.OCO_TEST_MOCK_TYPE as TestMockType);
case OCO_AI_PROVIDER_ENUM.GEMINI:
return new Gemini({
...DEFAULT_CONFIG,
apiKey: config.OCO_GEMINI_API_KEY!,
baseURL: config.OCO_GEMINI_BASE_PATH!
});
case OCO_AI_PROVIDER_ENUM.AZURE:
return new AzureEngine({
...DEFAULT_CONFIG,
apiKey: config.OCO_AZURE_API_KEY!
});
case OCO_AI_PROVIDER_ENUM.FLOWISE:
return new FlowiseAi({
...DEFAULT_CONFIG,
baseURL: config.OCO_FLOWISE_ENDPOINT || DEFAULT_CONFIG.baseURL,
apiKey: config.OCO_FLOWISE_API_KEY!
});
default:
return new OpenAiEngine({
...DEFAULT_CONFIG,
apiKey: config.OCO_OPENAI_API_KEY!
});
} }
//open ai gpt by default
return new OpenAi();
} }

View File

@@ -3,6 +3,9 @@ import { prepareFile } from './utils';
describe('getConfig', () => { describe('getConfig', () => {
const originalEnv = { ...process.env }; const originalEnv = { ...process.env };
let globalConfigFile: { filePath: string; cleanup: () => Promise<void> };
let localEnvFile: { filePath: string; cleanup: () => Promise<void> };
function resetEnv(env: NodeJS.ProcessEnv) { function resetEnv(env: NodeJS.ProcessEnv) {
Object.keys(process.env).forEach((key) => { Object.keys(process.env).forEach((key) => {
if (!(key in env)) { if (!(key in env)) {
@@ -13,93 +16,135 @@ describe('getConfig', () => {
}); });
} }
beforeEach(() => { beforeEach(async () => {
resetEnv(originalEnv); resetEnv(originalEnv);
if (globalConfigFile) await globalConfigFile.cleanup();
if (localEnvFile) await localEnvFile.cleanup();
}); });
afterAll(() => { afterAll(() => {
resetEnv(originalEnv); resetEnv(originalEnv);
}); });
it('return config values from the global config file', async () => { const generateConfig = async (
const configFile = await prepareFile( fileName: string,
'.opencommit', content: Record<string, string>
` ) => {
OCO_OPENAI_API_KEY="sk-key" const fileContent = Object.entries(content)
OCO_ANTHROPIC_API_KEY="secret-key" .map(([key, value]) => `${key}="${value}"`)
OCO_TOKENS_MAX_INPUT="8192" .join('\n');
OCO_TOKENS_MAX_OUTPUT="1000" return await prepareFile(fileName, fileContent);
OCO_OPENAI_BASE_PATH="/openai/api" };
OCO_DESCRIPTION="true"
OCO_EMOJI="true" it('should prioritize local .env over global .opencommit config', async () => {
OCO_MODEL="gpt-4" globalConfigFile = await generateConfig('.opencommit', {
OCO_LANGUAGE="de" OCO_OPENAI_API_KEY: 'global-key',
OCO_MESSAGE_TEMPLATE_PLACEHOLDER="$m" OCO_MODEL: 'gpt-3.5-turbo',
OCO_PROMPT_MODULE="@commitlint" OCO_LANGUAGE: 'en'
OCO_AI_PROVIDER="ollama" });
OCO_GITPUSH="false"
OCO_ONE_LINE_COMMIT="true" localEnvFile = await generateConfig('.env', {
` OCO_OPENAI_API_KEY: 'local-key',
); OCO_ANTHROPIC_API_KEY: 'local-anthropic-key',
const config = getConfig({ configPath: configFile.filePath, envPath: '' }); OCO_LANGUAGE: 'fr'
});
const config = getConfig({
configPath: globalConfigFile.filePath,
envPath: localEnvFile.filePath
});
expect(config).not.toEqual(null); expect(config).not.toEqual(null);
expect(config!['OCO_OPENAI_API_KEY']).toEqual('sk-key'); expect(config.OCO_OPENAI_API_KEY).toEqual('local-key');
expect(config!['OCO_ANTHROPIC_API_KEY']).toEqual('secret-key'); expect(config.OCO_MODEL).toEqual('gpt-3.5-turbo');
expect(config!['OCO_TOKENS_MAX_INPUT']).toEqual(8192); expect(config.OCO_LANGUAGE).toEqual('fr');
expect(config!['OCO_TOKENS_MAX_OUTPUT']).toEqual(1000); expect(config.OCO_ANTHROPIC_API_KEY).toEqual('local-anthropic-key');
expect(config!['OCO_OPENAI_BASE_PATH']).toEqual('/openai/api');
expect(config!['OCO_DESCRIPTION']).toEqual(true);
expect(config!['OCO_EMOJI']).toEqual(true);
expect(config!['OCO_MODEL']).toEqual('gpt-4');
expect(config!['OCO_LANGUAGE']).toEqual('de');
expect(config!['OCO_MESSAGE_TEMPLATE_PLACEHOLDER']).toEqual('$m');
expect(config!['OCO_PROMPT_MODULE']).toEqual('@commitlint');
expect(() => ['ollama', 'gemini'].includes(config!['OCO_AI_PROVIDER'])).toBeTruthy();
expect(config!['OCO_GITPUSH']).toEqual(false);
expect(config!['OCO_ONE_LINE_COMMIT']).toEqual(true);
await configFile.cleanup();
}); });
it('return config values from the local env file', async () => { it('should fallback to global config when local config is not set', async () => {
const envFile = await prepareFile( globalConfigFile = await generateConfig('.opencommit', {
'.env', OCO_OPENAI_API_KEY: 'global-key',
` OCO_MODEL: 'gpt-4',
OCO_OPENAI_API_KEY="sk-key" OCO_LANGUAGE: 'de',
OCO_ANTHROPIC_API_KEY="secret-key" OCO_DESCRIPTION: 'true'
OCO_TOKENS_MAX_INPUT="8192" });
OCO_TOKENS_MAX_OUTPUT="1000"
OCO_OPENAI_BASE_PATH="/openai/api" localEnvFile = await generateConfig('.env', {
OCO_DESCRIPTION="true" OCO_ANTHROPIC_API_KEY: 'local-anthropic-key'
OCO_EMOJI="true" });
OCO_MODEL="gpt-4"
OCO_LANGUAGE="de" const config = getConfig({
OCO_MESSAGE_TEMPLATE_PLACEHOLDER="$m" configPath: globalConfigFile.filePath,
OCO_PROMPT_MODULE="@commitlint" envPath: localEnvFile.filePath
OCO_AI_PROVIDER="ollama" });
OCO_GITPUSH="false"
OCO_ONE_LINE_COMMIT="true"
`
);
const config = getConfig({ configPath: '', envPath: envFile.filePath });
expect(config).not.toEqual(null); expect(config).not.toEqual(null);
expect(config!['OCO_OPENAI_API_KEY']).toEqual('sk-key'); expect(config.OCO_OPENAI_API_KEY).toEqual('global-key');
expect(config!['OCO_ANTHROPIC_API_KEY']).toEqual('secret-key'); expect(config.OCO_ANTHROPIC_API_KEY).toEqual('local-anthropic-key');
expect(config!['OCO_TOKENS_MAX_INPUT']).toEqual(8192); expect(config.OCO_MODEL).toEqual('gpt-4');
expect(config!['OCO_TOKENS_MAX_OUTPUT']).toEqual(1000); expect(config.OCO_LANGUAGE).toEqual('de');
expect(config!['OCO_OPENAI_BASE_PATH']).toEqual('/openai/api'); expect(config.OCO_DESCRIPTION).toEqual(true);
expect(config!['OCO_DESCRIPTION']).toEqual(true); });
expect(config!['OCO_EMOJI']).toEqual(true);
expect(config!['OCO_MODEL']).toEqual('gpt-4');
expect(config!['OCO_LANGUAGE']).toEqual('de');
expect(config!['OCO_MESSAGE_TEMPLATE_PLACEHOLDER']).toEqual('$m');
expect(config!['OCO_PROMPT_MODULE']).toEqual('@commitlint');
expect(() => ['ollama', 'gemini'].includes(config!['OCO_AI_PROVIDER'])).toBeTruthy();
expect(config!['OCO_GITPUSH']).toEqual(false);
expect(config!['OCO_ONE_LINE_COMMIT']).toEqual(true);
await envFile.cleanup(); it('should handle boolean and numeric values correctly', async () => {
globalConfigFile = await generateConfig('.opencommit', {
OCO_TOKENS_MAX_INPUT: '4096',
OCO_TOKENS_MAX_OUTPUT: '500',
OCO_GITPUSH: 'true'
});
localEnvFile = await generateConfig('.env', {
OCO_TOKENS_MAX_INPUT: '8192',
OCO_ONE_LINE_COMMIT: 'false'
});
const config = getConfig({
configPath: globalConfigFile.filePath,
envPath: localEnvFile.filePath
});
expect(config).not.toEqual(null);
expect(config.OCO_TOKENS_MAX_INPUT).toEqual(8192);
expect(config.OCO_TOKENS_MAX_OUTPUT).toEqual(500);
expect(config.OCO_GITPUSH).toEqual(true);
expect(config.OCO_ONE_LINE_COMMIT).toEqual(false);
});
it('should handle empty local config correctly', async () => {
globalConfigFile = await generateConfig('.opencommit', {
OCO_OPENAI_API_KEY: 'global-key',
OCO_MODEL: 'gpt-4',
OCO_LANGUAGE: 'es'
});
localEnvFile = await generateConfig('.env', {});
const config = getConfig({
configPath: globalConfigFile.filePath,
envPath: localEnvFile.filePath
});
expect(config).not.toEqual(null);
expect(config.OCO_OPENAI_API_KEY).toEqual('global-key');
expect(config.OCO_MODEL).toEqual('gpt-4');
expect(config.OCO_LANGUAGE).toEqual('es');
});
it('should override global config with null values in local .env', async () => {
globalConfigFile = await generateConfig('.opencommit', {
OCO_OPENAI_API_KEY: 'global-key',
OCO_MODEL: 'gpt-4',
OCO_LANGUAGE: 'es'
});
localEnvFile = await generateConfig('.env', { OCO_OPENAI_API_KEY: 'null' });
const config = getConfig({
configPath: globalConfigFile.filePath,
envPath: localEnvFile.filePath
});
expect(config).not.toEqual(null);
expect(config.OCO_OPENAI_API_KEY).toEqual(null);
}); });
}); });

View File

@@ -1,7 +1,12 @@
import { Gemini } from '../../src/engine/gemini'; import { Gemini } from '../../src/engine/gemini';
import { ChatCompletionRequestMessage } from 'openai';
import { GenerativeModel, GoogleGenerativeAI } from '@google/generative-ai'; import { GenerativeModel, GoogleGenerativeAI } from '@google/generative-ai';
import { ConfigType, getConfig } from '../../src/commands/config'; import {
ConfigType,
getConfig,
OCO_AI_PROVIDER_ENUM
} from '../../src/commands/config';
import { OpenAI } from 'openai';
describe('Gemini', () => { describe('Gemini', () => {
let gemini: Gemini; let gemini: Gemini;
@@ -9,13 +14,17 @@ describe('Gemini', () => {
let mockGoogleGenerativeAi: GoogleGenerativeAI; let mockGoogleGenerativeAi: GoogleGenerativeAI;
let mockGenerativeModel: GenerativeModel; let mockGenerativeModel: GenerativeModel;
let mockExit: jest.SpyInstance<never, [code?: number | undefined], any>; let mockExit: jest.SpyInstance<never, [code?: number | undefined], any>;
let mockWarmup: jest.SpyInstance<any, unknown[], any>;
const noop: (code?: number | undefined) => never = (code?: number | undefined) => {}; const noop: (...args: any[]) => any = (...args: any[]) => {};
const mockGemini = () => { const mockGemini = () => {
gemini = new Gemini(); mockConfig = getConfig() as ConfigType;
}
gemini = new Gemini({
apiKey: mockConfig.OCO_GEMINI_API_KEY,
model: mockConfig.OCO_MODEL
});
};
const oldEnv = process.env; const oldEnv = process.env;
@@ -28,53 +37,35 @@ describe('Gemini', () => {
jest.mock('@clack/prompts', () => ({ jest.mock('@clack/prompts', () => ({
intro: jest.fn(), intro: jest.fn(),
outro: jest.fn(), outro: jest.fn()
})); }));
if (mockWarmup) mockWarmup.mockRestore();
mockExit = jest.spyOn(process, 'exit').mockImplementation(); mockExit = jest.spyOn(process, 'exit').mockImplementation();
mockConfig = getConfig() as ConfigType; mockConfig = getConfig() as ConfigType;
mockConfig.OCO_AI_PROVIDER = 'gemini'; mockConfig.OCO_AI_PROVIDER = OCO_AI_PROVIDER_ENUM.GEMINI;
mockConfig.OCO_GEMINI_API_KEY = 'mock-api-key'; mockConfig.OCO_GEMINI_API_KEY = 'mock-api-key';
mockConfig.OCO_MODEL = 'gemini-1.5-flash'; mockConfig.OCO_MODEL = 'gemini-1.5-flash';
mockGoogleGenerativeAi = new GoogleGenerativeAI(mockConfig.OCO_GEMINI_API_KEY); mockGoogleGenerativeAi = new GoogleGenerativeAI(
mockGenerativeModel = mockGoogleGenerativeAi.getGenerativeModel({ model: mockConfig.OCO_MODEL, }); mockConfig.OCO_GEMINI_API_KEY
);
mockGenerativeModel = mockGoogleGenerativeAi.getGenerativeModel({
model: mockConfig.OCO_MODEL
});
}); });
afterEach(() => { afterEach(() => {
gemini = undefined as any; gemini = undefined as any;
}) });
afterAll(() => { afterAll(() => {
mockExit.mockRestore(); mockExit.mockRestore();
process.env = oldEnv; process.env = oldEnv;
}); });
it('should initialize with correct config', () => { it.skip('should exit process if OCO_GEMINI_API_KEY is not set and command is not config', () => {
mockGemini();
// gemini = new Gemini();
expect(gemini).toBeDefined();
});
it('should warmup correctly', () => {
mockWarmup = jest.spyOn(Gemini.prototype as any, 'warmup').mockImplementation(noop);
mockGemini();
expect(gemini).toBeDefined();
});
it('should exit process if OCO_GEMINI_API_KEY is not set and command is not config', () => {
process.env.OCO_GEMINI_API_KEY = undefined;
process.env.OCO_AI_PROVIDER = 'gemini';
mockGemini();
expect(mockExit).toHaveBeenCalledWith(1);
});
it('should exit process if model is not supported and command is not config', () => {
process.env.OCO_GEMINI_API_KEY = undefined; process.env.OCO_GEMINI_API_KEY = undefined;
process.env.OCO_AI_PROVIDER = 'gemini'; process.env.OCO_AI_PROVIDER = 'gemini';
@@ -84,22 +75,24 @@ describe('Gemini', () => {
}); });
it('should generate commit message', async () => { it('should generate commit message', async () => {
const mockGenerateContent = jest.fn().mockResolvedValue({ response: { text: () => 'generated content' } }); const mockGenerateContent = jest
.fn()
.mockResolvedValue({ response: { text: () => 'generated content' } });
mockGenerativeModel.generateContent = mockGenerateContent; mockGenerativeModel.generateContent = mockGenerateContent;
mockWarmup = jest.spyOn(Gemini.prototype as any, 'warmup').mockImplementation(noop);
mockGemini(); mockGemini();
const messages: ChatCompletionRequestMessage[] = [ const messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam> =
{ role: 'system', content: 'system message' }, [
{ role: 'assistant', content: 'assistant message' }, { role: 'system', content: 'system message' },
]; { role: 'assistant', content: 'assistant message' }
];
jest.spyOn(gemini, 'generateCommitMessage').mockImplementation(async () => 'generated content'); jest
.spyOn(gemini, 'generateCommitMessage')
.mockImplementation(async () => 'generated content');
const result = await gemini.generateCommitMessage(messages); const result = await gemini.generateCommitMessage(messages);
expect(result).toEqual('generated content'); expect(result).toEqual('generated content');
expect(mockWarmup).toHaveBeenCalled();
}); });
}); });

View File

@@ -22,6 +22,7 @@ export async function prepareFile(
const cleanup = async () => { const cleanup = async () => {
return fsRemove(tempDir, { recursive: true }); return fsRemove(tempDir, { recursive: true });
}; };
return { return {
filePath, filePath,
cleanup cleanup