Compare commits

...

36 Commits

Author SHA1 Message Date
di-sukharev
6d9fff56aa 3.2.12 2026-01-17 23:46:15 +03:00
di-sukharev
6ed70d0382 add oco models command 2026-01-17 23:46:04 +03:00
di-sukharev
5b241ed2d0 refactor: enhance error handling and normalization across AI engines
This update introduces a centralized error handling mechanism for various AI engines, improving the consistency and clarity of error messages. The new `normalizeEngineError` function standardizes error responses, allowing for better user feedback and recovery suggestions. Additionally, specific error classes for insufficient credits, rate limits, and service availability have been implemented, along with user-friendly formatting for error messages. This refactor aims to enhance the overall user experience when interacting with the AI services.
2026-01-17 23:34:49 +03:00
di-sukharev
8b0ee25923 build 2026-01-17 23:06:18 +03:00
di-sukharev
fdd4d89bba 3.2.11 2026-01-17 23:06:17 +03:00
di-sukharev
d70797b864 feat: add interactive setup wizard and model error handling
Add comprehensive setup command with provider selection, API key
configuration, and model selection. Include error recovery for
model-not-found scenarios with suggested alternatives and automatic
retry functionality. Update Anthropic model list with latest versions
and add provider metadata for better user experience.
2026-01-17 23:04:43 +03:00
GPT8
ebbaff0628 Merge pull request #514 from Abir-Tx/master
Addition of troubleshooting section for Ollama IPv6/IPv4 connection issues into the documentation
2025-08-25 12:07:52 +03:00
Mushfiqur Rahman Abir
4f164a31d1 Merge pull request #1 from Abir-Tx/imgbot
[ImgBot] Optimize images
2025-08-16 20:05:50 +06:00
Mushfiqur Rahman Abir
a70a2b8a9f Add troubleshooting section for Ollama IPv6/IPv4 connection issues in the doc
closes #310
The fix has been well documented on the README.md file.

Signed-off-by: Mushfiqur Rahman Abir <28858998+Abir-Tx@users.noreply.github.com>
2025-08-16 18:44:31 +06:00
ImgBotApp
52bb719f4e [ImgBot] Optimize images
*Total -- 315.69kb -> 245.97kb (22.09%)

/.github/github-mark-white.png -- 4.72kb -> 2.83kb (39.98%)
/.github/opencommit-example.png -- 303.97kb -> 236.62kb (22.16%)
/.github/logo-black.png -- 2.11kb -> 1.65kb (21.8%)
/.github/logo-grad.svg -- 3.19kb -> 3.17kb (0.52%)
/.github/logo.svg -- 1.70kb -> 1.69kb (0.34%)

Signed-off-by: ImgBotApp <ImgBotHelp@gmail.com>
2025-08-16 12:33:29 +00:00
di-sukharev
c904a78cd9 build 2025-08-01 16:13:54 +03:00
GPT8
22077399fd Merge pull request #506 from di-sukharev/dev
3.2.10
2025-08-01 16:13:34 +03:00
di-sukharev
8ae2f7ddf1 3.2.10 2025-08-01 16:05:20 +03:00
di-sukharev
b318d1d882 Merge branch 'master' into dev 2025-08-01 16:02:44 +03:00
GPT8
af0f2c1df4 Merge pull request #505 from D1m7asis/dev-aimlapi
feat: add AIML API provider support
2025-08-01 16:00:16 +03:00
D1m7asis
c5ce50aaa3 feat: add AIML API provider support
Introduces AIMLAPI as a supported AI provider, including model list, config validation, and engine implementation. Updates README and engine selection logic to integrate AIMLAPI for chat completions.

Refactor AimlApiEngine response handling

Removed dependency on removeContentTags and simplified message content extraction. Minor header formatting fix for HTTP-Referer. This streamlines the response handling and reduces unnecessary processing.
2025-08-01 14:48:11 +02:00
GPT8
c1756b85af Merge pull request #498 from kykungz/fix-491
Fix TypeScript build error and add missing confirm import (regression from #491)
2025-07-23 17:12:44 +03:00
GPT8
dac1271782 Merge pull request #496 from kykungz/resolve-top-level-git-dir
Fix git commands when executed from subdirectories
2025-07-23 17:10:37 +03:00
Kongpon Charanwattanakit
1cc7a64f99 feat(commit.ts): add confirmation prompt and refactor commit message editing for better user experience 2025-07-23 16:15:20 +07:00
GPT8
4deb7bca65 Merge pull request #488 from anpigon/fix/i18n-ko
fix(i18n): correct typo in Korean translation for 'feat' commit type
2025-07-22 23:40:54 +03:00
GPT8
1a90485a10 Merge pull request #491 from leoliu0605/dev
feat(commit.ts): enable users to edit commit message before committing
2025-07-22 23:38:30 +03:00
GPT8
48b8d9d7b2 Merge pull request #494 from PhantasWeng/commit-hook-default
feat(config): add OCO_HOOK_AUTO_UNCOMMENT config key and update commit message hook behavior to conditionally uncomment the message
2025-07-22 23:37:05 +03:00
Kongpon Charanwattanakit
7e60c68ba5 refactor(git): add getGitDir helper and update functions to use cwd option for better git repository handling 2025-07-14 21:50:58 +07:00
Phantas Weng
24adc16adf fix(run.ts): remove trailing comma from OCO_AI_PROVIDER_ENUM array to fix the prettier test 2025-07-08 09:27:40 +00:00
Phantas Weng
881f07eebe fix(prepare-commit-msg-hook): simplify commit message generation logic for clarity and maintainability 2025-07-08 05:38:42 +00:00
Phantas Weng
3a255a3ad9 feat(config): add OCO_HOOK_AUTO_UNCOMMENT config key and update commit message hook behavior to conditionally uncomment the message 2025-07-08 05:25:32 +00:00
GPT8
9971b3c74e Merge pull request #492 from PhantasWeng/git-hook-message
feat(prepare-commit-msg-hook): enhance commit message formatting with a divider and instructions for better user guidance
2025-07-04 11:42:30 +03:00
Phantas Weng
66a5695d89 feat(prepare-commit-msg-hook): enhance commit message formatting with a divider and instructions for better user guidance 2025-07-01 06:02:32 +00:00
GPT8
fd22f713ed Merge pull request #489 from yshngg/patch-1
fix(migrations): skip unhandled AI providers during migration execution
2025-06-29 12:19:17 +03:00
leoliu
43dc5e6c2b feat(commit.ts): enable users to edit commit message before committing 2025-06-26 23:41:58 +08:00
Yusheng Guo
3d42dde48c fix(migrations): skip unhandled AI providers during migration execution
The changes:
1. Expanded the skip condition to include additional AI providers (DEEPSEEK, GROQ, MISTRAL, MLX, OPENROUTER) beyond just TEST
2. Maintained existing TEST provider skip behavior
3. Added explicit comment explaining the skip logic

The why:
Prevents migration execution for unsupported AI providers to avoid potential runtime errors or data inconsistencies, ensuring migrations only run for properly handled configurations.
2025-06-23 15:34:22 +08:00
anpigon
19f32ca57d fix(i18n): correct typo in Korean translation for 'feat' commit type #487 2025-06-21 18:12:55 +09:00
GPT8
c1070789fd Merge pull request #485 from frauniki/add-prettier-ci
chore: Add Prettier format check to CI and format code
2025-06-15 12:18:31 +03:00
di-sukharev
1f0f44ede0 build 2025-06-15 12:17:18 +03:00
frauniki
45aed936b1 ♻️ refactor: clean up code formatting and improve readability
- Fix inconsistent indentation across multiple engine files
- Remove trailing whitespace and add missing newlines
- Improve code formatting in prompt generation functions
- Break long lines for better readability
- Standardize spacing and brackets placement
2025-06-15 17:29:12 +09:00
frauniki
e4f7e8dc80 add prettier formatting check to CI workflow and npm scripts
- Add prettier job to GitHub Actions workflow to enforce code formatting
- Add format:check script to package.json for checking formatting
- Include failure message when prettier check fails in CI
2025-06-15 17:28:40 +09:00
42 changed files with 10669 additions and 5138 deletions

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.7 KiB

After

Width:  |  Height:  |  Size: 2.8 KiB

BIN
.github/logo-black.png vendored

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.1 KiB

After

Width:  |  Height:  |  Size: 1.7 KiB

14
.github/logo-grad.svg vendored
View File

@@ -1,13 +1 @@
<svg width="78" height="75" viewBox="0 0 78 75" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M32.269 2.94345C34.6328 4.17458 36.5623 5.81371 38.0626 7.86409C37.7038 8.37105 37.3661 8.90001 37.0496 9.45094L37.0495 9.45091L37.0456 9.45797C35.2629 12.6805 34.3831 16.5345 34.3831 21V54C34.3831 58.4007 35.2636 62.2523 37.0435 65.5381L37.0433 65.5382L37.0496 65.5491C37.3661 66.1 37.7038 66.629 38.0626 67.1359C36.5622 69.1863 34.6328 70.8254 32.269 72.0565L32.2652 72.0586C29.2195 73.6786 25.5374 74.5 21.2 74.5C16.8638 74.5 13.1471 73.6791 10.0328 72.0575C6.98854 70.4377 4.62693 68.1096 2.94057 65.0635C1.31973 61.949 0.5 58.2664 0.5 54V21C0.5 16.6643 1.32072 12.9834 2.93951 9.93843C4.62596 6.89138 6.98794 4.56255 10.0329 2.94245C13.1472 1.32089 16.8639 0.5 21.2 0.5C25.5374 0.5 29.2195 1.32137 32.2652 2.94145L32.269 2.94345ZM38.6667 8.74806C38.9107 9.13077 39.1413 9.52635 39.3586 9.93481L39.3585 9.93484L39.3625 9.94203C41.047 12.9872 41.9 16.6336 41.9 20.9V54C41.9 58.266 41.0472 61.9477 39.3603 65.0619L39.3586 65.0652C39.1413 65.4736 38.9107 65.8692 38.6667 66.2519C38.4054 65.8665 38.1565 65.468 37.9199 65.0565C36.235 61.9435 35.3831 58.2635 35.3831 54V21C35.3831 16.6672 36.236 12.989 37.9187 9.94557C38.1556 9.53328 38.405 9.13412 38.6667 8.74806ZM39.2936 7.87926C40.8728 5.82164 42.8446 4.17787 45.2123 2.94436C48.3955 1.32076 52.1474 0.5 56.4831 0.5C60.8172 0.5 64.5319 1.3534 67.645 3.03964L67.6449 3.0397L67.6522 3.04345C70.7657 4.6651 73.1602 6.99537 74.8456 10.042C76.464 12.9676 77.3148 16.448 77.3792 20.5H69.3778C69.2917 16.5201 68.1674 13.3804 65.942 11.1517C63.6909 8.76341 60.5126 7.6 56.4831 7.6C52.4533 7.6 49.2164 8.72969 46.8349 11.0412L46.8348 11.0412L46.8296 11.0464C44.5081 13.3679 43.3831 16.6791 43.3831 20.9V54C43.3831 58.2218 44.5085 61.5622 46.8243 63.9482L46.8295 63.9536L46.8349 63.9588C49.2164 66.2703 52.4533 67.4 56.4831 67.4C60.5114 67.4 63.6898 66.2708 65.9421 63.9481C68.1656 61.657 69.2916 58.4862 69.3778 54.5H77.379C77.3138 58.4875 76.4638 61.9697 74.8444 64.9601C73.1588 68.0063 70.7636 70.3703 67.6486 72.0584C64.5346 73.6794 60.8185 74.5 56.4831 74.5C52.1474 74.5 48.3956 73.6793 45.2125 72.0557C42.8446 70.8222 40.8729 69.1784 39.2936 67.1207C39.6322 66.6146 39.9479 66.0865 40.2405 65.5365C42.0198 62.251 42.9 58.4 42.9 54V20.9C42.9 16.5014 42.0203 12.6824 40.2396 9.46166C39.9472 8.91234 39.6319 8.38486 39.2936 7.87926ZM11.8359 63.9427L11.8359 63.9427L11.841 63.9481C14.0918 66.2691 17.2355 67.4 21.2 67.4C25.2274 67.4 28.3768 66.2711 30.5644 63.9423C32.8103 61.5559 33.9 58.2177 33.9 54V21C33.9 16.7865 32.8123 13.4792 30.5643 11.1575C28.378 8.76316 25.2286 7.6 21.2 7.6C17.2326 7.6 14.088 8.76605 11.8384 11.1546C9.58856 13.4765 8.5 16.7848 8.5 21V54C8.5 58.2179 9.58979 61.5562 11.8359 63.9427Z" fill="url(#paint0_linear_498_146)" stroke="url(#paint1_linear_498_146)"/>
<defs>
<linearGradient id="paint0_linear_498_146" x1="38.9416" y1="0" x2="38.9416" y2="75" gradientUnits="userSpaceOnUse">
<stop stop-color="#D33075"/>
<stop offset="1" stop-color="#6157D8"/>
</linearGradient>
<linearGradient id="paint1_linear_498_146" x1="38.9416" y1="0" x2="38.9416" y2="75" gradientUnits="userSpaceOnUse">
<stop stop-color="#D33075"/>
<stop offset="1" stop-color="#6157D8"/>
</linearGradient>
</defs>
</svg>
<svg xmlns="http://www.w3.org/2000/svg" width="78" height="75" fill="none" viewBox="0 0 78 75"><path fill="url(#paint0_linear_498_146)" stroke="url(#paint1_linear_498_146)" d="M32.269 2.94345C34.6328 4.17458 36.5623 5.81371 38.0626 7.86409C37.7038 8.37105 37.3661 8.90001 37.0496 9.45094L37.0495 9.45091L37.0456 9.45797C35.2629 12.6805 34.3831 16.5345 34.3831 21V54C34.3831 58.4007 35.2636 62.2523 37.0435 65.5381L37.0433 65.5382L37.0496 65.5491C37.3661 66.1 37.7038 66.629 38.0626 67.1359C36.5622 69.1863 34.6328 70.8254 32.269 72.0565L32.2652 72.0586C29.2195 73.6786 25.5374 74.5 21.2 74.5C16.8638 74.5 13.1471 73.6791 10.0328 72.0575C6.98854 70.4377 4.62693 68.1096 2.94057 65.0635C1.31973 61.949 0.5 58.2664 0.5 54V21C0.5 16.6643 1.32072 12.9834 2.93951 9.93843C4.62596 6.89138 6.98794 4.56255 10.0329 2.94245C13.1472 1.32089 16.8639 0.5 21.2 0.5C25.5374 0.5 29.2195 1.32137 32.2652 2.94145L32.269 2.94345ZM38.6667 8.74806C38.9107 9.13077 39.1413 9.52635 39.3586 9.93481L39.3585 9.93484L39.3625 9.94203C41.047 12.9872 41.9 16.6336 41.9 20.9V54C41.9 58.266 41.0472 61.9477 39.3603 65.0619L39.3586 65.0652C39.1413 65.4736 38.9107 65.8692 38.6667 66.2519C38.4054 65.8665 38.1565 65.468 37.9199 65.0565C36.235 61.9435 35.3831 58.2635 35.3831 54V21C35.3831 16.6672 36.236 12.989 37.9187 9.94557C38.1556 9.53328 38.405 9.13412 38.6667 8.74806ZM39.2936 7.87926C40.8728 5.82164 42.8446 4.17787 45.2123 2.94436C48.3955 1.32076 52.1474 0.5 56.4831 0.5C60.8172 0.5 64.5319 1.3534 67.645 3.03964L67.6449 3.0397L67.6522 3.04345C70.7657 4.6651 73.1602 6.99537 74.8456 10.042C76.464 12.9676 77.3148 16.448 77.3792 20.5H69.3778C69.2917 16.5201 68.1674 13.3804 65.942 11.1517C63.6909 8.76341 60.5126 7.6 56.4831 7.6C52.4533 7.6 49.2164 8.72969 46.8349 11.0412L46.8348 11.0412L46.8296 11.0464C44.5081 13.3679 43.3831 16.6791 43.3831 20.9V54C43.3831 58.2218 44.5085 61.5622 46.8243 63.9482L46.8295 63.9536L46.8349 63.9588C49.2164 66.2703 52.4533 67.4 56.4831 67.4C60.5114 67.4 63.6898 66.2708 65.9421 63.9481C68.1656 61.657 69.2916 58.4862 69.3778 54.5H77.379C77.3138 58.4875 76.4638 61.9697 74.8444 64.9601C73.1588 68.0063 70.7636 70.3703 67.6486 72.0584C64.5346 73.6794 60.8185 74.5 56.4831 74.5C52.1474 74.5 48.3956 73.6793 45.2125 72.0557C42.8446 70.8222 40.8729 69.1784 39.2936 67.1207C39.6322 66.6146 39.9479 66.0865 40.2405 65.5365C42.0198 62.251 42.9 58.4 42.9 54V20.9C42.9 16.5014 42.0203 12.6824 40.2396 9.46166C39.9472 8.91234 39.6319 8.38486 39.2936 7.87926ZM11.8359 63.9427L11.8359 63.9427L11.841 63.9481C14.0918 66.2691 17.2355 67.4 21.2 67.4C25.2274 67.4 28.3768 66.2711 30.5644 63.9423C32.8103 61.5559 33.9 58.2177 33.9 54V21C33.9 16.7865 32.8123 13.4792 30.5643 11.1575C28.378 8.76316 25.2286 7.6 21.2 7.6C17.2326 7.6 14.088 8.76605 11.8384 11.1546C9.58856 13.4765 8.5 16.7848 8.5 21V54C8.5 58.2179 9.58979 61.5562 11.8359 63.9427Z"/><defs><linearGradient id="paint0_linear_498_146" x1="38.942" x2="38.942" y1="0" y2="75" gradientUnits="userSpaceOnUse"><stop stop-color="#D33075"/><stop offset="1" stop-color="#6157D8"/></linearGradient><linearGradient id="paint1_linear_498_146" x1="38.942" x2="38.942" y1="0" y2="75" gradientUnits="userSpaceOnUse"><stop stop-color="#D33075"/><stop offset="1" stop-color="#6157D8"/></linearGradient></defs></svg>

Before

Width:  |  Height:  |  Size: 3.2 KiB

After

Width:  |  Height:  |  Size: 3.2 KiB

5
.github/logo.svg vendored
View File

@@ -1,4 +1 @@
<svg width="78" height="75" viewBox="0 0 78 75" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M21.2 75C16.8 75 13 74.1667 9.8 72.5C6.66667 70.8333 4.23333 68.4333 2.5 65.3C0.833333 62.1 0 58.3333 0 54V21C0 16.6 0.833333 12.8333 2.5 9.7C4.23333 6.56666 6.66667 4.16666 9.8 2.5C13 0.833333 16.8 0 21.2 0C25.6 0 29.3667 0.833333 32.5 2.5C35.7 4.16666 38.1333 6.56666 39.8 9.7C41.5333 12.8333 42.4 16.5667 42.4 20.9V54C42.4 58.3333 41.5333 62.1 39.8 65.3C38.1333 68.4333 35.7 70.8333 32.5 72.5C29.3667 74.1667 25.6 75 21.2 75ZM21.2 66.9C25.1333 66.9 28.1333 65.8 30.2 63.6C32.3333 61.3333 33.4 58.1333 33.4 54V21C33.4 16.8667 32.3333 13.7 30.2 11.5C28.1333 9.23333 25.1333 8.1 21.2 8.1C17.3333 8.1 14.3333 9.23333 12.2 11.5C10.0667 13.7 9 16.8667 9 21V54C9 58.1333 10.0667 61.3333 12.2 63.6C14.3333 65.8 17.3333 66.9 21.2 66.9Z" fill="black"/>
<path d="M56.4831 75C52.0831 75 48.2498 74.1667 44.9831 72.5C41.7831 70.8333 39.2831 68.4333 37.4831 65.3C35.7498 62.1 34.8831 58.3333 34.8831 54V21C34.8831 16.6 35.7498 12.8333 37.4831 9.7C39.2831 6.56666 41.7831 4.16666 44.9831 2.5C48.2498 0.833333 52.0831 0 56.4831 0C60.8831 0 64.6831 0.866665 67.8831 2.6C71.0831 4.26667 73.5498 6.66667 75.2831 9.8C77.0165 12.9333 77.8831 16.6667 77.8831 21H68.8831C68.8831 16.8667 67.7831 13.7 65.5831 11.5C63.4498 9.23333 60.4165 8.1 56.4831 8.1C52.5498 8.1 49.4498 9.2 47.1831 11.4C44.9831 13.6 43.8831 16.7667 43.8831 20.9V54C43.8831 58.1333 44.9831 61.3333 47.1831 63.6C49.4498 65.8 52.5498 66.9 56.4831 66.9C60.4165 66.9 63.4498 65.8 65.5831 63.6C67.7831 61.3333 68.8831 58.1333 68.8831 54H77.8831C77.8831 58.2667 77.0165 62 75.2831 65.2C73.5498 68.3333 71.0831 70.7667 67.8831 72.5C64.6831 74.1667 60.8831 75 56.4831 75Z" fill="black"/>
</svg>
<svg xmlns="http://www.w3.org/2000/svg" width="78" height="75" fill="none" viewBox="0 0 78 75"><path fill="#000" d="M21.2 75C16.8 75 13 74.1667 9.8 72.5C6.66667 70.8333 4.23333 68.4333 2.5 65.3C0.833333 62.1 0 58.3333 0 54V21C0 16.6 0.833333 12.8333 2.5 9.7C4.23333 6.56666 6.66667 4.16666 9.8 2.5C13 0.833333 16.8 0 21.2 0C25.6 0 29.3667 0.833333 32.5 2.5C35.7 4.16666 38.1333 6.56666 39.8 9.7C41.5333 12.8333 42.4 16.5667 42.4 20.9V54C42.4 58.3333 41.5333 62.1 39.8 65.3C38.1333 68.4333 35.7 70.8333 32.5 72.5C29.3667 74.1667 25.6 75 21.2 75ZM21.2 66.9C25.1333 66.9 28.1333 65.8 30.2 63.6C32.3333 61.3333 33.4 58.1333 33.4 54V21C33.4 16.8667 32.3333 13.7 30.2 11.5C28.1333 9.23333 25.1333 8.1 21.2 8.1C17.3333 8.1 14.3333 9.23333 12.2 11.5C10.0667 13.7 9 16.8667 9 21V54C9 58.1333 10.0667 61.3333 12.2 63.6C14.3333 65.8 17.3333 66.9 21.2 66.9Z"/><path fill="#000" d="M56.4831 75C52.0831 75 48.2498 74.1667 44.9831 72.5C41.7831 70.8333 39.2831 68.4333 37.4831 65.3C35.7498 62.1 34.8831 58.3333 34.8831 54V21C34.8831 16.6 35.7498 12.8333 37.4831 9.7C39.2831 6.56666 41.7831 4.16666 44.9831 2.5C48.2498 0.833333 52.0831 0 56.4831 0C60.8831 0 64.6831 0.866665 67.8831 2.6C71.0831 4.26667 73.5498 6.66667 75.2831 9.8C77.0165 12.9333 77.8831 16.6667 77.8831 21H68.8831C68.8831 16.8667 67.7831 13.7 65.5831 11.5C63.4498 9.23333 60.4165 8.1 56.4831 8.1C52.5498 8.1 49.4498 9.2 47.1831 11.4C44.9831 13.6 43.8831 16.7667 43.8831 20.9V54C43.8831 58.1333 44.9831 61.3333 47.1831 63.6C49.4498 65.8 52.5498 66.9 56.4831 66.9C60.4165 66.9 63.4498 65.8 65.5831 63.6C67.7831 61.3333 68.8831 58.1333 68.8831 54H77.8831C77.8831 58.2667 77.0165 62 75.2831 65.2C73.5498 68.3333 71.0831 70.7667 67.8831 72.5C64.6831 74.1667 60.8831 75 56.4831 75Z"/></svg>

Before

Width:  |  Height:  |  Size: 1.7 KiB

After

Width:  |  Height:  |  Size: 1.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 304 KiB

After

Width:  |  Height:  |  Size: 237 KiB

View File

@@ -51,3 +51,21 @@ jobs:
run: npm run build
- name: Run E2E Tests
run: npm run test:e2e
prettier:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Use Node.js
uses: actions/setup-node@v4
with:
node-version: '20.x'
cache: 'npm'
- name: Install dependencies
run: npm ci
- name: Run Prettier
run: npm run format:check
- name: Prettier Output
if: failure()
run: |
echo "Prettier check failed. Please run 'npm run format' to fix formatting issues."
exit 1

View File

@@ -74,6 +74,22 @@ oco config set OCO_API_URL='http://192.168.1.10:11434/api/chat'
where 192.168.1.10 is example of endpoint URL, where you have ollama set up.
#### Troubleshooting Ollama IPv6/IPv4 Connection Fix
If you encounter issues with Ollama, such as the error
```sh
✖ local model issues. details: connect ECONNREFUSED ::1:11434
```
It's likely because Ollama is not listening on IPv6 by default. To fix this, you can set the OLLAMA_HOST environment variable to 0.0.0.0 before starting Ollama:
```bash
export OLLAMA_HOST=0.0.0.0
```
This will make Ollama listen on all interfaces, including IPv6 and IPv4, resolving the connection issue. You can add this line to your shell configuration file (like `.bashrc` or `.zshrc`) to make it persistent across sessions.
### Flags
There are multiple optional flags that can be used with the `oco` command:
@@ -106,7 +122,7 @@ Create a `.env` file and add OpenCommit config variables there like this:
```env
...
OCO_AI_PROVIDER=<openai (default), anthropic, azure, ollama, gemini, flowise, deepseek>
OCO_AI_PROVIDER=<openai (default), anthropic, azure, ollama, gemini, flowise, deepseek, aimlapi>
OCO_API_KEY=<your OpenAI API token> // or other LLM provider API token
OCO_API_URL=<may be used to set proxy path to OpenAI api>
OCO_API_CUSTOM_HEADERS=<JSON string of custom HTTP headers to include in API requests>
@@ -185,6 +201,28 @@ or for as a cheaper option:
oco config set OCO_MODEL=gpt-3.5-turbo
```
### Model Management
OpenCommit automatically fetches available models from your provider when you run `oco setup`. Models are cached for 7 days to reduce API calls.
To see available models for your current provider:
```sh
oco models
```
To refresh the model list (e.g., after new models are released):
```sh
oco models --refresh
```
To see models for a specific provider:
```sh
oco models --provider anthropic
```
### Switch to other LLM providers with a custom URL
By default OpenCommit uses [OpenAI](https://openai.com).

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

4
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "opencommit",
"version": "3.2.9",
"version": "3.2.12",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "opencommit",
"version": "3.2.9",
"version": "3.2.12",
"license": "MIT",
"dependencies": {
"@actions/core": "^1.10.0",

View File

@@ -1,6 +1,6 @@
{
"name": "opencommit",
"version": "3.2.9",
"version": "3.2.12",
"description": "Auto-generate impressive commits in 1 second. Killing lame commits with AI 🤯🔫",
"keywords": [
"git",
@@ -51,6 +51,7 @@
"deploy:patch": "npm version patch && npm run deploy:build",
"lint": "eslint src --ext ts && tsc --noEmit",
"format": "prettier --write src",
"format:check": "prettier --check src",
"test": "node --no-warnings --experimental-vm-modules $( [ -f ./node_modules/.bin/jest ] && echo ./node_modules/.bin/jest || which jest ) test/unit",
"test:all": "npm run test:unit:docker && npm run test:e2e:docker",
"test:docker-build": "docker build -t oco-test -f test/Dockerfile .",

View File

@@ -8,6 +8,13 @@ import { commitlintConfigCommand } from './commands/commitlint';
import { configCommand } from './commands/config';
import { hookCommand, isHookCalled } from './commands/githook.js';
import { prepareCommitMessageHook } from './commands/prepare-commit-msg-hook';
import {
setupCommand,
isFirstRun,
runSetup,
promptForMissingApiKey
} from './commands/setup';
import { modelsCommand } from './commands/models';
import { checkIsLatestVersion } from './utils/checkIsLatestVersion';
import { runMigrations } from './migrations/_run.js';
@@ -17,7 +24,7 @@ cli(
{
version: packageJSON.version,
name: 'opencommit',
commands: [configCommand, hookCommand, commitlintConfigCommand],
commands: [configCommand, hookCommand, commitlintConfigCommand, setupCommand, modelsCommand],
flags: {
fgm: {
type: Boolean,
@@ -47,6 +54,20 @@ cli(
if (await isHookCalled()) {
prepareCommitMessageHook();
} else {
// Check for first run and trigger setup wizard
if (isFirstRun()) {
const setupComplete = await runSetup();
if (!setupComplete) {
process.exit(1);
}
}
// Check for missing API key and prompt if needed
const hasApiKey = await promptForMissingApiKey();
if (!hasApiKey) {
process.exit(1);
}
commit(extraArgs, flags.context, false, flags.fgm, flags.yes);
}
},

View File

@@ -1,5 +1,7 @@
export enum COMMANDS {
config = 'config',
hook = 'hook',
commitlint = 'commitlint'
commitlint = 'commitlint',
setup = 'setup',
models = 'models'
}

View File

@@ -1,4 +1,5 @@
import {
text,
confirm,
intro,
isCancel,
@@ -10,6 +11,10 @@ import {
import chalk from 'chalk';
import { execa } from 'execa';
import { generateCommitMessageByDiff } from '../generateCommitMessageFromGitDiff';
import {
formatUserFriendlyError,
printFormattedError
} from '../utils/errors';
import {
assertGitRepo,
getChangedFiles,
@@ -85,15 +90,29 @@ ${commitMessage}
${chalk.grey('——————————————————')}`
);
const isCommitConfirmedByUser =
skipCommitConfirmation ||
(await confirm({
message: 'Confirm the commit message?'
}));
const userAction = skipCommitConfirmation
? 'Yes'
: await select({
message: 'Confirm the commit message?',
options: [
{ value: 'Yes', label: 'Yes' },
{ value: 'No', label: 'No' },
{ value: 'Edit', label: 'Edit' }
]
});
if (isCancel(isCommitConfirmedByUser)) process.exit(1);
if (isCancel(userAction)) process.exit(1);
if (isCommitConfirmedByUser) {
if (userAction === 'Edit') {
const textResponse = await text({
message: 'Please edit the commit message: (press Enter to continue)',
initialValue: commitMessage
});
commitMessage = textResponse.toString();
}
if (userAction === 'Yes' || userAction === 'Edit') {
const committingChangesSpinner = spinner();
committingChangesSpinner.start('Committing the changes');
const { stdout } = await execa('git', [
@@ -196,10 +215,11 @@ ${chalk.grey('——————————————————')}`
`${chalk.red('✖')} Failed to generate the commit message`
);
console.log(error);
const errorConfig = getConfig();
const provider = errorConfig.OCO_AI_PROVIDER || 'openai';
const formatted = formatUserFriendlyError(error, provider);
outro(printFormattedError(formatted));
const err = error as Error;
outro(`${chalk.red('✖')} ${err?.message || err}`);
process.exit(1);
}
};

View File

@@ -27,7 +27,8 @@ export enum CONFIG_KEYS {
OCO_API_URL = 'OCO_API_URL',
OCO_API_CUSTOM_HEADERS = 'OCO_API_CUSTOM_HEADERS',
OCO_OMIT_SCOPE = 'OCO_OMIT_SCOPE',
OCO_GITPUSH = 'OCO_GITPUSH' // todo: deprecate
OCO_GITPUSH = 'OCO_GITPUSH', // todo: deprecate
OCO_HOOK_AUTO_UNCOMMENT = 'OCO_HOOK_AUTO_UNCOMMENT'
}
export enum CONFIG_MODES {
@@ -67,10 +68,11 @@ export const MODEL_LIST = {
],
anthropic: [
'claude-3-5-sonnet-20240620',
'claude-3-opus-20240229',
'claude-3-sonnet-20240229',
'claude-3-haiku-20240307'
'claude-sonnet-4-20250514',
'claude-opus-4-20250514',
'claude-3-7-sonnet-20250219',
'claude-3-5-sonnet-20241022',
'claude-3-5-haiku-20241022'
],
gemini: [
@@ -132,9 +134,113 @@ export const MODEL_LIST = {
'mistral-moderation-2411',
'mistral-moderation-latest'
],
deepseek: ['deepseek-chat', 'deepseek-reasoner'],
// AI/ML API available chat-completion models
// https://api.aimlapi.com/v1/models
aimlapi: [
'openai/gpt-4o',
'gpt-4o-2024-08-06',
'gpt-4o-2024-05-13',
'gpt-4o-mini',
'gpt-4o-mini-2024-07-18',
'chatgpt-4o-latest',
'gpt-4-turbo',
'gpt-4-turbo-2024-04-09',
'gpt-4',
'gpt-4-0125-preview',
'gpt-4-1106-preview',
'gpt-3.5-turbo',
'gpt-3.5-turbo-0125',
'gpt-3.5-turbo-1106',
'o1-preview',
'o1-preview-2024-09-12',
'o1-mini',
'o1-mini-2024-09-12',
'o3-mini',
'gpt-4o-audio-preview',
'gpt-4o-mini-audio-preview',
'gpt-4o-search-preview',
'gpt-4o-mini-search-preview',
'openai/gpt-4.1-2025-04-14',
'openai/gpt-4.1-mini-2025-04-14',
'openai/gpt-4.1-nano-2025-04-14',
'openai/o4-mini-2025-04-16',
'openai/o3-2025-04-16',
'o1',
'openai/o3-pro',
'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
'google/gemma-2-27b-it',
'meta-llama/Llama-Vision-Free',
'Qwen/Qwen2-72B-Instruct',
'mistralai/Mixtral-8x7B-Instruct-v0.1',
'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF',
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
'meta-llama/Llama-3.3-70B-Instruct-Turbo',
'meta-llama/Llama-3.2-3B-Instruct-Turbo',
'meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo',
'meta-llama/Llama-Guard-3-11B-Vision-Turbo',
'Qwen/Qwen2.5-7B-Instruct-Turbo',
'Qwen/Qwen2.5-Coder-32B-Instruct',
'meta-llama/Meta-Llama-3-8B-Instruct-Lite',
'meta-llama/Llama-3-8b-chat-hf',
'meta-llama/Llama-3-70b-chat-hf',
'Qwen/Qwen2.5-72B-Instruct-Turbo',
'Qwen/QwQ-32B',
'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo',
'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo',
'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo',
'mistralai/Mistral-7B-Instruct-v0.2',
'meta-llama/LlamaGuard-2-8b',
'mistralai/Mistral-7B-Instruct-v0.1',
'mistralai/Mistral-7B-Instruct-v0.3',
'meta-llama/Meta-Llama-Guard-3-8B',
'meta-llama/llama-4-scout',
'meta-llama/llama-4-maverick',
'Qwen/Qwen3-235B-A22B-fp8-tput',
'claude-3-opus-20240229',
'claude-3-haiku-20240307',
'claude-3-5-sonnet-20240620',
'claude-3-5-sonnet-20241022',
'claude-3-5-haiku-20241022',
'claude-3-7-sonnet-20250219',
'claude-sonnet-4-20250514',
'claude-opus-4-20250514',
'google/gemini-2.0-flash-exp',
'google/gemini-2.0-flash',
'google/gemini-2.5-pro',
'google/gemini-2.5-flash',
'deepseek-chat',
'deepseek-reasoner',
'qwen-max',
'qwen-plus',
'qwen-turbo',
'qwen-max-2025-01-25',
'mistralai/mistral-tiny',
'mistralai/mistral-nemo',
'anthracite-org/magnum-v4-72b',
'nvidia/llama-3.1-nemotron-70b-instruct',
'cohere/command-r-plus',
'mistralai/codestral-2501',
'google/gemma-3-4b-it',
'google/gemma-3-12b-it',
'google/gemma-3-27b-it',
'google/gemini-2.5-flash-lite-preview',
'deepseek/deepseek-prover-v2',
'google/gemma-3n-e4b-it',
'cohere/command-a',
'MiniMax-Text-01',
'abab6.5s-chat',
'minimax/m1',
'bagoodex/bagoodex-search-v1',
'moonshot/kimi-k2-preview',
'perplexity/sonar',
'perplexity/sonar-pro',
'x-ai/grok-4-07-09',
'x-ai/grok-3-beta',
'x-ai/grok-3-mini-beta'
],
// OpenRouter available models
// input_modalities: 'text'
// output_modalities: 'text'
@@ -483,6 +589,8 @@ const getDefaultModel = (provider: string | undefined): string => {
return MODEL_LIST.mistral[0];
case 'deepseek':
return MODEL_LIST.deepseek[0];
case 'aimlapi':
return MODEL_LIST.aimlapi[0];
case 'openrouter':
return MODEL_LIST.openrouter[0];
default:
@@ -675,9 +783,10 @@ export const configValidators = {
'flowise',
'groq',
'deepseek',
'aimlapi',
'openrouter'
].includes(value) || value.startsWith('ollama'),
`${value} is not supported yet, use 'ollama', 'mlx', 'anthropic', 'azure', 'gemini', 'flowise', 'mistral', 'deepseek' or 'openai' (default)`
`${value} is not supported yet, use 'ollama', 'mlx', 'anthropic', 'azure', 'gemini', 'flowise', 'mistral', 'deepseek', 'aimlapi' or 'openai' (default)`
);
return value;
@@ -711,6 +820,14 @@ export const configValidators = {
'Must be true or false'
);
return value;
},
[CONFIG_KEYS.OCO_HOOK_AUTO_UNCOMMENT](value: any) {
validateConfig(
CONFIG_KEYS.OCO_HOOK_AUTO_UNCOMMENT,
typeof value === 'boolean',
'Must be true or false'
);
}
};
@@ -726,9 +843,37 @@ export enum OCO_AI_PROVIDER_ENUM {
MISTRAL = 'mistral',
MLX = 'mlx',
DEEPSEEK = 'deepseek',
AIMLAPI = 'aimlapi',
OPENROUTER = 'openrouter'
}
export const PROVIDER_API_KEY_URLS: Record<string, string | null> = {
[OCO_AI_PROVIDER_ENUM.OPENAI]: 'https://platform.openai.com/api-keys',
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]: 'https://console.anthropic.com/settings/keys',
[OCO_AI_PROVIDER_ENUM.GEMINI]: 'https://aistudio.google.com/app/apikey',
[OCO_AI_PROVIDER_ENUM.GROQ]: 'https://console.groq.com/keys',
[OCO_AI_PROVIDER_ENUM.MISTRAL]: 'https://console.mistral.ai/api-keys/',
[OCO_AI_PROVIDER_ENUM.DEEPSEEK]: 'https://platform.deepseek.com/api_keys',
[OCO_AI_PROVIDER_ENUM.OPENROUTER]: 'https://openrouter.ai/keys',
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'https://aimlapi.com/app/keys',
[OCO_AI_PROVIDER_ENUM.AZURE]: 'https://portal.azure.com/',
[OCO_AI_PROVIDER_ENUM.OLLAMA]: null,
[OCO_AI_PROVIDER_ENUM.MLX]: null,
[OCO_AI_PROVIDER_ENUM.FLOWISE]: null,
[OCO_AI_PROVIDER_ENUM.TEST]: null
};
export const RECOMMENDED_MODELS: Record<string, string> = {
[OCO_AI_PROVIDER_ENUM.OPENAI]: 'gpt-4o-mini',
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]: 'claude-sonnet-4-20250514',
[OCO_AI_PROVIDER_ENUM.GEMINI]: 'gemini-1.5-flash',
[OCO_AI_PROVIDER_ENUM.GROQ]: 'llama3-70b-8192',
[OCO_AI_PROVIDER_ENUM.MISTRAL]: 'mistral-small-latest',
[OCO_AI_PROVIDER_ENUM.DEEPSEEK]: 'deepseek-chat',
[OCO_AI_PROVIDER_ENUM.OPENROUTER]: 'openai/gpt-4o-mini',
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'gpt-4o-mini'
}
export type ConfigType = {
[CONFIG_KEYS.OCO_API_KEY]?: string;
[CONFIG_KEYS.OCO_TOKENS_MAX_INPUT]: number;
@@ -747,6 +892,7 @@ export type ConfigType = {
[CONFIG_KEYS.OCO_ONE_LINE_COMMIT]: boolean;
[CONFIG_KEYS.OCO_OMIT_SCOPE]: boolean;
[CONFIG_KEYS.OCO_TEST_MOCK_TYPE]: string;
[CONFIG_KEYS.OCO_HOOK_AUTO_UNCOMMENT]: boolean;
};
export const defaultConfigPath = pathJoin(homedir(), '.opencommit');
@@ -794,7 +940,8 @@ export const DEFAULT_CONFIG = {
OCO_TEST_MOCK_TYPE: 'commit-message',
OCO_WHY: false,
OCO_OMIT_SCOPE: false,
OCO_GITPUSH: true // todo: deprecate
OCO_GITPUSH: true, // todo: deprecate
OCO_HOOK_AUTO_UNCOMMENT: false
};
const initGlobalConfig = (configPath: string = defaultConfigPath) => {
@@ -1046,6 +1193,11 @@ function getConfigKeyDetails(key) {
description: 'Message template placeholder',
values: ['String (must start with $)']
};
case CONFIG_KEYS.OCO_HOOK_AUTO_UNCOMMENT:
return {
description: 'Automatically uncomment the commit message in the hook',
values: ['true', 'false']
};
default:
return {
description: 'String value',

144
src/commands/models.ts Normal file
View File

@@ -0,0 +1,144 @@
import { intro, outro, spinner } from '@clack/prompts';
import chalk from 'chalk';
import { command } from 'cleye';
import { COMMANDS } from './ENUMS';
import {
MODEL_LIST,
OCO_AI_PROVIDER_ENUM,
getConfig
} from './config';
import {
fetchModelsForProvider,
clearModelCache,
getCacheInfo,
getCachedModels
} from '../utils/modelCache';
function formatCacheAge(timestamp: number | null): string {
if (!timestamp) return 'never';
const ageMs = Date.now() - timestamp;
const days = Math.floor(ageMs / (1000 * 60 * 60 * 24));
const hours = Math.floor(ageMs / (1000 * 60 * 60));
const minutes = Math.floor(ageMs / (1000 * 60));
if (days > 0) {
return `${days} day${days === 1 ? '' : 's'} ago`;
} else if (hours > 0) {
return `${hours} hour${hours === 1 ? '' : 's'} ago`;
} else if (minutes > 0) {
return `${minutes} minute${minutes === 1 ? '' : 's'} ago`;
}
return 'just now';
}
async function listModels(provider: string, useCache: boolean = true): Promise<void> {
const config = getConfig();
const apiKey = config.OCO_API_KEY;
const currentModel = config.OCO_MODEL;
// Get cached models or fetch new ones
let models: string[] = [];
if (useCache) {
const cached = getCachedModels(provider);
if (cached) {
models = cached;
}
}
if (models.length === 0) {
// Fallback to hardcoded list
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
models = MODEL_LIST[providerKey] || [];
}
console.log(`\n${chalk.bold('Available models for')} ${chalk.cyan(provider)}:\n`);
if (models.length === 0) {
console.log(chalk.dim(' No models found'));
} else {
models.forEach((model) => {
const isCurrent = model === currentModel;
const prefix = isCurrent ? chalk.green('* ') : ' ';
const label = isCurrent ? chalk.green(model) : model;
console.log(`${prefix}${label}`);
});
}
console.log('');
}
async function refreshModels(provider: string): Promise<void> {
const config = getConfig();
const apiKey = config.OCO_API_KEY;
const loadingSpinner = spinner();
loadingSpinner.start(`Fetching models from ${provider}...`);
// Clear cache first
clearModelCache();
try {
const models = await fetchModelsForProvider(provider, apiKey, undefined, true);
loadingSpinner.stop(`${chalk.green('+')} Fetched ${models.length} models`);
// List the models
await listModels(provider, true);
} catch (error) {
loadingSpinner.stop(chalk.red('Failed to fetch models'));
console.error(chalk.red(`Error: ${error instanceof Error ? error.message : 'Unknown error'}`));
}
}
export const modelsCommand = command(
{
name: COMMANDS.models,
help: {
description: 'List and manage cached models for your AI provider'
},
flags: {
refresh: {
type: Boolean,
alias: 'r',
description: 'Clear cache and re-fetch models from the provider',
default: false
},
provider: {
type: String,
alias: 'p',
description: 'Specify provider (defaults to current OCO_AI_PROVIDER)'
}
}
},
async ({ flags }) => {
const config = getConfig();
const provider = flags.provider || config.OCO_AI_PROVIDER || OCO_AI_PROVIDER_ENUM.OPENAI;
intro(chalk.bgCyan(' OpenCommit Models '));
// Show cache info
const cacheInfo = getCacheInfo();
if (cacheInfo.timestamp) {
console.log(
chalk.dim(` Cache last updated: ${formatCacheAge(cacheInfo.timestamp)}`)
);
if (cacheInfo.providers.length > 0) {
console.log(
chalk.dim(` Cached providers: ${cacheInfo.providers.join(', ')}`)
);
}
} else {
console.log(chalk.dim(' No cached models'));
}
if (flags.refresh) {
await refreshModels(provider);
} else {
await listModels(provider);
}
outro(
`Run ${chalk.cyan('oco models --refresh')} to update the model list`
);
}
);

View File

@@ -56,10 +56,14 @@ export const prepareCommitMessageHook = async (
const fileContent = await fs.readFile(messageFilePath);
await fs.writeFile(
messageFilePath,
commitMessage + '\n' + fileContent.toString()
);
const messageWithComment = `# ${commitMessage}\n\n# ---------- [OpenCommit] ---------- #\n# Remove the # above to use this generated commit message.\n# To cancel the commit, just close this window without making any changes.\n\n${fileContent.toString()}`;
const messageWithoutComment = `${commitMessage}\n\n${fileContent.toString()}`;
const message = config.OCO_HOOK_AUTO_UNCOMMENT
? messageWithoutComment
: messageWithComment;
await fs.writeFile(messageFilePath, message);
} catch (error) {
outro(`${chalk.red('✖')} ${error}`);
process.exit(1);

480
src/commands/setup.ts Normal file
View File

@@ -0,0 +1,480 @@
import { intro, outro, select, text, isCancel, spinner } from '@clack/prompts';
import chalk from 'chalk';
import { command } from 'cleye';
import { COMMANDS } from './ENUMS';
import {
CONFIG_KEYS,
MODEL_LIST,
OCO_AI_PROVIDER_ENUM,
getConfig,
setGlobalConfig,
getGlobalConfig,
getIsGlobalConfigFileExist,
DEFAULT_CONFIG,
PROVIDER_API_KEY_URLS,
RECOMMENDED_MODELS
} from './config';
import {
fetchModelsForProvider,
fetchOllamaModels,
getCacheInfo
} from '../utils/modelCache';
const PROVIDER_DISPLAY_NAMES: Record<string, string> = {
[OCO_AI_PROVIDER_ENUM.OPENAI]: 'OpenAI (GPT-4o, GPT-4)',
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]: 'Anthropic (Claude Sonnet, Opus)',
[OCO_AI_PROVIDER_ENUM.OLLAMA]: 'Ollama (Free, runs locally)',
[OCO_AI_PROVIDER_ENUM.GEMINI]: 'Google Gemini',
[OCO_AI_PROVIDER_ENUM.GROQ]: 'Groq (Fast inference, free tier)',
[OCO_AI_PROVIDER_ENUM.MISTRAL]: 'Mistral AI',
[OCO_AI_PROVIDER_ENUM.DEEPSEEK]: 'DeepSeek',
[OCO_AI_PROVIDER_ENUM.OPENROUTER]: 'OpenRouter (Multiple providers)',
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'AI/ML API',
[OCO_AI_PROVIDER_ENUM.AZURE]: 'Azure OpenAI',
[OCO_AI_PROVIDER_ENUM.MLX]: 'MLX (Apple Silicon, local)'
};
const PRIMARY_PROVIDERS = [
OCO_AI_PROVIDER_ENUM.OPENAI,
OCO_AI_PROVIDER_ENUM.ANTHROPIC,
OCO_AI_PROVIDER_ENUM.OLLAMA
];
const OTHER_PROVIDERS = [
OCO_AI_PROVIDER_ENUM.GEMINI,
OCO_AI_PROVIDER_ENUM.GROQ,
OCO_AI_PROVIDER_ENUM.MISTRAL,
OCO_AI_PROVIDER_ENUM.DEEPSEEK,
OCO_AI_PROVIDER_ENUM.OPENROUTER,
OCO_AI_PROVIDER_ENUM.AIMLAPI,
OCO_AI_PROVIDER_ENUM.AZURE,
OCO_AI_PROVIDER_ENUM.MLX
];
const NO_API_KEY_PROVIDERS = [
OCO_AI_PROVIDER_ENUM.OLLAMA,
OCO_AI_PROVIDER_ENUM.MLX
];
async function selectProvider(): Promise<string | symbol> {
const primaryOptions = PRIMARY_PROVIDERS.map((provider) => ({
value: provider,
label: PROVIDER_DISPLAY_NAMES[provider] || provider
}));
primaryOptions.push({
value: 'other',
label: 'Other providers...'
});
const selection = await select({
message: 'Select your AI provider:',
options: primaryOptions
});
if (isCancel(selection)) return selection;
if (selection === 'other') {
const otherOptions = OTHER_PROVIDERS.map((provider) => ({
value: provider,
label: PROVIDER_DISPLAY_NAMES[provider] || provider
}));
return await select({
message: 'Select provider:',
options: otherOptions
});
}
return selection;
}
async function getApiKey(provider: string): Promise<string | symbol> {
const url = PROVIDER_API_KEY_URLS[provider as keyof typeof PROVIDER_API_KEY_URLS];
let message = `Enter your ${provider} API key:`;
if (url) {
message = `Enter your API key:\n${chalk.dim(` Get your key at: ${url}`)}`;
}
return await text({
message,
placeholder: 'sk-...',
validate: (value) => {
if (!value || value.trim().length === 0) {
return 'API key is required';
}
return undefined;
}
});
}
function formatCacheAge(timestamp: number | null): string {
if (!timestamp) return '';
const ageMs = Date.now() - timestamp;
const days = Math.floor(ageMs / (1000 * 60 * 60 * 24));
const hours = Math.floor(ageMs / (1000 * 60 * 60));
if (days > 0) {
return `${days} day${days === 1 ? '' : 's'} ago`;
} else if (hours > 0) {
return `${hours} hour${hours === 1 ? '' : 's'} ago`;
}
return 'just now';
}
async function selectModel(
provider: string,
apiKey?: string
): Promise<string | symbol> {
const providerDisplayName = PROVIDER_DISPLAY_NAMES[provider]?.split(' (')[0] || provider;
const loadingSpinner = spinner();
loadingSpinner.start(`Fetching models from ${providerDisplayName}...`);
let models: string[] = [];
let usedFallback = false;
try {
models = await fetchModelsForProvider(provider, apiKey);
} catch {
// Fall back to hardcoded list
usedFallback = true;
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
models = MODEL_LIST[providerKey] || [];
}
// Check cache info for display
const cacheInfo = getCacheInfo();
const cacheAge = formatCacheAge(cacheInfo.timestamp);
if (usedFallback) {
loadingSpinner.stop(
chalk.yellow('Could not fetch models from API. Using default list.')
);
} else if (cacheAge) {
loadingSpinner.stop(`Models loaded ${chalk.dim(`(cached ${cacheAge})`)}`);
} else {
loadingSpinner.stop('Models loaded');
}
if (models.length === 0) {
// For Ollama/MLX, prompt for manual entry
if (NO_API_KEY_PROVIDERS.includes(provider as OCO_AI_PROVIDER_ENUM)) {
return await text({
message: 'Enter model name (e.g., llama3:8b, mistral):',
placeholder: 'llama3:8b',
validate: (value) => {
if (!value || value.trim().length === 0) {
return 'Model name is required';
}
return undefined;
}
});
}
// Use default from config
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
return MODEL_LIST[providerKey]?.[0] || 'gpt-4o-mini';
}
// Get recommended model for this provider
const recommended = RECOMMENDED_MODELS[provider as keyof typeof RECOMMENDED_MODELS];
// Build options with recommended first
const options: Array<{ value: string; label: string }> = [];
if (recommended && models.includes(recommended)) {
options.push({
value: recommended,
label: `${recommended} (Recommended)`
});
}
// Add other models (first 10, excluding recommended)
const otherModels = models
.filter((m) => m !== recommended)
.slice(0, 10);
otherModels.forEach((model) => {
options.push({ value: model, label: model });
});
// Add option to see all or enter custom
if (models.length > 11) {
options.push({ value: '__show_all__', label: 'Show all models...' });
}
options.push({ value: '__custom__', label: 'Enter custom model...' });
const selection = await select({
message: 'Select a model:',
options
});
if (isCancel(selection)) return selection;
if (selection === '__show_all__') {
const allOptions = models.map((model) => ({
value: model,
label: model === recommended ? `${model} (Recommended)` : model
}));
return await select({
message: 'Select a model:',
options: allOptions
});
}
if (selection === '__custom__') {
return await text({
message: 'Enter model name:',
validate: (value) => {
if (!value || value.trim().length === 0) {
return 'Model name is required';
}
return undefined;
}
});
}
return selection;
}
async function setupOllama(): Promise<{
provider: string;
model: string;
apiUrl: string;
} | null> {
console.log(chalk.cyan('\n Ollama - Free Local AI\n'));
console.log(chalk.dim(' Setup steps:'));
console.log(chalk.dim(' 1. Install: https://ollama.ai/download'));
console.log(chalk.dim(' 2. Pull a model: ollama pull llama3:8b'));
console.log(chalk.dim(' 3. Start server: ollama serve\n'));
// Try to fetch available models
const loadingSpinner = spinner();
loadingSpinner.start('Checking for local Ollama installation...');
const defaultUrl = 'http://localhost:11434';
let ollamaModels: string[] = [];
try {
ollamaModels = await fetchOllamaModels(defaultUrl);
if (ollamaModels.length > 0) {
loadingSpinner.stop(
`${chalk.green('✔')} Found ${ollamaModels.length} local model(s)`
);
} else {
loadingSpinner.stop(
chalk.yellow(
'Ollama is running but no models found. Pull a model first: ollama pull llama3:8b'
)
);
}
} catch {
loadingSpinner.stop(
chalk.yellow(
'Could not connect to Ollama. Make sure it is running: ollama serve'
)
);
}
// Model selection
let model: string | symbol;
if (ollamaModels.length > 0) {
model = await select({
message: 'Select a model:',
options: [
...ollamaModels.map((m) => ({ value: m, label: m })),
{ value: '__custom__', label: 'Enter custom model name...' }
]
});
if (isCancel(model)) return null;
if (model === '__custom__') {
model = await text({
message: 'Enter model name (e.g., llama3:8b, mistral):',
placeholder: 'llama3:8b'
});
}
} else {
model = await text({
message: 'Enter model name (e.g., llama3:8b, mistral):',
placeholder: 'llama3:8b',
validate: (value) => {
if (!value || value.trim().length === 0) {
return 'Model name is required';
}
return undefined;
}
});
}
if (isCancel(model)) return null;
// API URL (optional)
const apiUrl = await text({
message: 'Ollama URL (press Enter for default):',
placeholder: defaultUrl,
defaultValue: defaultUrl
});
if (isCancel(apiUrl)) return null;
return {
provider: OCO_AI_PROVIDER_ENUM.OLLAMA,
model: model as string,
apiUrl: (apiUrl as string) || defaultUrl
};
}
export async function runSetup(): Promise<boolean> {
intro(chalk.bgCyan(' Welcome to OpenCommit! '));
// Select provider
const provider = await selectProvider();
if (isCancel(provider)) {
outro('Setup cancelled');
return false;
}
let config: Partial<Record<string, any>> = {};
// Handle Ollama specially
if (provider === OCO_AI_PROVIDER_ENUM.OLLAMA) {
const ollamaConfig = await setupOllama();
if (!ollamaConfig) {
outro('Setup cancelled');
return false;
}
config = {
OCO_AI_PROVIDER: ollamaConfig.provider,
OCO_MODEL: ollamaConfig.model,
OCO_API_URL: ollamaConfig.apiUrl,
OCO_API_KEY: 'ollama' // Placeholder
};
} else if (provider === OCO_AI_PROVIDER_ENUM.MLX) {
// MLX setup
console.log(chalk.cyan('\n MLX - Apple Silicon Local AI\n'));
console.log(chalk.dim(' MLX runs locally on Apple Silicon Macs.'));
console.log(chalk.dim(' No API key required.\n'));
const model = await text({
message: 'Enter model name:',
placeholder: 'mlx-community/Llama-3-8B-Instruct-4bit'
});
if (isCancel(model)) {
outro('Setup cancelled');
return false;
}
config = {
OCO_AI_PROVIDER: OCO_AI_PROVIDER_ENUM.MLX,
OCO_MODEL: model,
OCO_API_KEY: 'mlx' // Placeholder
};
} else {
// Standard provider flow: API key then model
const apiKey = await getApiKey(provider as string);
if (isCancel(apiKey)) {
outro('Setup cancelled');
return false;
}
const model = await selectModel(provider as string, apiKey as string);
if (isCancel(model)) {
outro('Setup cancelled');
return false;
}
config = {
OCO_AI_PROVIDER: provider,
OCO_API_KEY: apiKey,
OCO_MODEL: model
};
}
// Save configuration
const existingConfig = getIsGlobalConfigFileExist()
? getGlobalConfig()
: DEFAULT_CONFIG;
const newConfig = {
...existingConfig,
...config
};
setGlobalConfig(newConfig as any);
outro(
`${chalk.green('✔')} Configuration saved to ~/.opencommit\n\n Run ${chalk.cyan('oco')} to generate commit messages!`
);
return true;
}
export function isFirstRun(): boolean {
if (!getIsGlobalConfigFileExist()) {
return true;
}
const config = getConfig();
// Check if API key is missing for providers that need it
const provider = config.OCO_AI_PROVIDER || OCO_AI_PROVIDER_ENUM.OPENAI;
if (NO_API_KEY_PROVIDERS.includes(provider as OCO_AI_PROVIDER_ENUM)) {
// For Ollama/MLX, check if model is set
return !config.OCO_MODEL;
}
// For other providers, check if API key is set
return !config.OCO_API_KEY;
}
export async function promptForMissingApiKey(): Promise<boolean> {
const config = getConfig();
const provider = config.OCO_AI_PROVIDER || OCO_AI_PROVIDER_ENUM.OPENAI;
if (NO_API_KEY_PROVIDERS.includes(provider as OCO_AI_PROVIDER_ENUM)) {
return true; // No API key needed
}
if (config.OCO_API_KEY) {
return true; // Already has key
}
console.log(
chalk.yellow(
`\nAPI key missing for ${provider}. Let's set it up.\n`
)
);
const apiKey = await getApiKey(provider);
if (isCancel(apiKey)) {
return false;
}
const existingConfig = getGlobalConfig();
setGlobalConfig({
...existingConfig,
OCO_API_KEY: apiKey as string
} as any);
console.log(chalk.green('✔') + ' API key saved\n');
return true;
}
export const setupCommand = command(
{
name: COMMANDS.setup,
help: {
description: 'Interactive setup wizard for OpenCommit'
}
},
async () => {
await runSetup();
}
);

39
src/engine/aimlapi.ts Normal file
View File

@@ -0,0 +1,39 @@
import OpenAI from 'openai';
import axios, { AxiosInstance } from 'axios';
import { normalizeEngineError } from '../utils/engineErrorHandler';
import { AiEngine, AiEngineConfig } from './Engine';
interface AimlApiConfig extends AiEngineConfig {}
export class AimlApiEngine implements AiEngine {
client: AxiosInstance;
constructor(public config: AimlApiConfig) {
this.client = axios.create({
baseURL: config.baseURL || 'https://api.aimlapi.com/v1/chat/completions',
headers: {
Authorization: `Bearer ${config.apiKey}`,
'HTTP-Referer': 'https://github.com/di-sukharev/opencommit',
'X-Title': 'opencommit',
'Content-Type': 'application/json',
...config.customHeaders
}
});
}
public generateCommitMessage = async (
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | null> => {
try {
const response = await this.client.post('', {
model: this.config.model,
messages
});
const message = response.data.choices?.[0]?.message;
return message?.content ?? null;
} catch (error) {
throw normalizeEngineError(error, 'aimlapi', this.config.model);
}
};
}

View File

@@ -3,11 +3,9 @@ import {
MessageCreateParamsNonStreaming,
MessageParam
} from '@anthropic-ai/sdk/resources/messages.mjs';
import { outro } from '@clack/prompts';
import axios from 'axios';
import chalk from 'chalk';
import { OpenAI } from 'openai';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { normalizeEngineError } from '../utils/engineErrorHandler';
import { removeContentTags } from '../utils/removeContentTags';
import { tokenCount } from '../utils/tokenCount';
import { AiEngine, AiEngineConfig } from './Engine';
@@ -58,22 +56,7 @@ export class AnthropicEngine implements AiEngine {
let content = message;
return removeContentTags(content, 'think');
} catch (error) {
const err = error as Error;
outro(`${chalk.red('✖')} ${err?.message || err}`);
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401
) {
const anthropicAiError = error.response.data.error;
if (anthropicAiError?.message) outro(anthropicAiError.message);
outro(
'For help look into README https://github.com/di-sukharev/opencommit#setup'
);
}
throw err;
throw normalizeEngineError(error, 'anthropic', this.config.model);
}
};
}

View File

@@ -2,11 +2,9 @@ import {
AzureKeyCredential,
OpenAIClient as AzureOpenAIClient
} from '@azure/openai';
import { outro } from '@clack/prompts';
import axios from 'axios';
import chalk from 'chalk';
import { OpenAI } from 'openai';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { normalizeEngineError } from '../utils/engineErrorHandler';
import { removeContentTags } from '../utils/removeContentTags';
import { tokenCount } from '../utils/tokenCount';
import { AiEngine, AiEngineConfig } from './Engine';
@@ -53,28 +51,11 @@ export class AzureEngine implements AiEngine {
if (message?.content === null) {
return undefined;
}
let content = message?.content;
return removeContentTags(content, 'think');
} catch (error) {
outro(`${chalk.red('✖')} ${this.config.model}`);
const err = error as Error;
outro(`${chalk.red('✖')} ${JSON.stringify(error)}`);
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401
) {
const openAiError = error.response.data.error;
if (openAiError?.message) outro(openAiError.message);
outro(
'For help look into README https://github.com/di-sukharev/opencommit#setup'
);
}
throw err;
throw normalizeEngineError(error, 'azure', this.config.model);
}
};
}

View File

@@ -1,6 +1,6 @@
import axios from 'axios';
import { OpenAI } from 'openai';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { normalizeEngineError } from '../utils/engineErrorHandler';
import { removeContentTags } from '../utils/removeContentTags';
import { tokenCount } from '../utils/tokenCount';
import { OpenAiEngine, OpenAiConfig } from './openAi';
@@ -45,17 +45,7 @@ export class DeepseekEngine extends OpenAiEngine {
let content = message?.content;
return removeContentTags(content, 'think');
} catch (error) {
const err = error as Error;
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401
) {
const openAiError = error.response.data.error;
if (openAiError) throw new Error(openAiError.message);
}
throw err;
throw normalizeEngineError(error, 'deepseek', this.config.model);
}
};
}

View File

@@ -1,5 +1,6 @@
import axios, { AxiosInstance } from 'axios';
import { OpenAI } from 'openai';
import { normalizeEngineError } from '../utils/engineErrorHandler';
import { removeContentTags } from '../utils/removeContentTags';
import { AiEngine, AiEngineConfig } from './Engine';
@@ -39,9 +40,8 @@ export class FlowiseEngine implements AiEngine {
const message = response.data;
let content = message?.text;
return removeContentTags(content, 'think');
} catch (err: any) {
const message = err.response?.data?.error ?? err.message;
throw new Error('local model issues. details: ' + message);
} catch (error) {
throw normalizeEngineError(error, 'flowise', this.config.model);
}
}
}

View File

@@ -5,8 +5,8 @@ import {
HarmCategory,
Part
} from '@google/generative-ai';
import axios from 'axios';
import { OpenAI } from 'openai';
import { normalizeEngineError } from '../utils/engineErrorHandler';
import { removeContentTags } from '../utils/removeContentTags';
import { AiEngine, AiEngineConfig } from './Engine';
@@ -75,16 +75,7 @@ export class GeminiEngine implements AiEngine {
const content = result.response.text();
return removeContentTags(content, 'think');
} catch (error) {
const err = error as Error;
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401
) {
const geminiError = error.response.data.error;
if (geminiError) throw new Error(geminiError?.message);
}
throw err;
throw normalizeEngineError(error, 'gemini', this.config.model);
}
}
}

View File

@@ -7,4 +7,4 @@ export class GroqEngine extends OpenAiEngine {
config.baseURL = 'https://api.groq.com/openai/v1';
super(config);
}
}
}

View File

@@ -1,6 +1,6 @@
import axios from 'axios';
import { OpenAI } from 'openai';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { normalizeEngineError } from '../utils/engineErrorHandler';
import { removeContentTags } from '../utils/removeContentTags';
import { tokenCount } from '../utils/tokenCount';
import { AiEngine, AiEngineConfig } from './Engine';
@@ -23,7 +23,10 @@ export class MistralAiEngine implements AiEngine {
if (!config.baseURL) {
this.client = new Mistral({ apiKey: config.apiKey });
} else {
this.client = new Mistral({ apiKey: config.apiKey, serverURL: config.baseURL });
this.client = new Mistral({
apiKey: config.apiKey,
serverURL: config.baseURL
});
}
}
@@ -50,28 +53,17 @@ export class MistralAiEngine implements AiEngine {
const completion = await this.client.chat.complete(params);
if (!completion.choices)
throw Error('No completion choice available.')
if (!completion.choices) throw Error('No completion choice available.');
const message = completion.choices[0].message;
if (!message || !message.content)
throw Error('No completion choice available.')
throw Error('No completion choice available.');
let content = message.content as string;
return removeContentTags(content, 'think');
} catch (error) {
const err = error as Error;
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401
) {
const mistralError = error.response.data.error;
if (mistralError) throw new Error(mistralError.message);
}
throw err;
throw normalizeEngineError(error, 'mistral', this.config.model);
}
};
}

View File

@@ -1,47 +1,47 @@
import axios, { AxiosInstance } from 'axios';
import { OpenAI } from 'openai';
import { normalizeEngineError } from '../utils/engineErrorHandler';
import { removeContentTags } from '../utils/removeContentTags';
import { AiEngine, AiEngineConfig } from './Engine';
interface MLXConfig extends AiEngineConfig {}
export class MLXEngine implements AiEngine {
config: MLXConfig;
client: AxiosInstance;
config: MLXConfig;
client: AxiosInstance;
constructor(config) {
this.config = config;
this.client = axios.create({
url: config.baseURL
? `${config.baseURL}/${config.apiKey}`
: 'http://localhost:8080/v1/chat/completions',
headers: { 'Content-Type': 'application/json' }
});
constructor(config) {
this.config = config;
this.client = axios.create({
url: config.baseURL
? `${config.baseURL}/${config.apiKey}`
: 'http://localhost:8080/v1/chat/completions',
headers: { 'Content-Type': 'application/json' }
});
}
async generateCommitMessage(
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | undefined> {
const params = {
messages,
temperature: 0,
top_p: 0.1,
repetition_penalty: 1.5,
stream: false
};
try {
const response = await this.client.post(
this.client.getUri(this.config),
params
);
const choices = response.data.choices;
const message = choices[0].message;
let content = message?.content;
return removeContentTags(content, 'think');
} catch (error) {
throw normalizeEngineError(error, 'mlx', this.config.model);
}
async generateCommitMessage(
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>):
Promise<string | undefined> {
const params = {
messages,
temperature: 0,
top_p: 0.1,
repetition_penalty: 1.5,
stream: false
};
try {
const response = await this.client.post(
this.client.getUri(this.config),
params
);
const choices = response.data.choices;
const message = choices[0].message;
let content = message?.content;
return removeContentTags(content, 'think');
} catch (err: any) {
const message = err.response?.data?.error ?? err.message;
throw new Error(`MLX provider error: ${message}`);
}
}
}
}

View File

@@ -1,5 +1,6 @@
import axios, { AxiosInstance } from 'axios';
import { OpenAI } from 'openai';
import { normalizeEngineError } from '../utils/engineErrorHandler';
import { removeContentTags } from '../utils/removeContentTags';
import { AiEngine, AiEngineConfig } from './Engine';
@@ -11,13 +12,13 @@ export class OllamaEngine implements AiEngine {
constructor(config) {
this.config = config;
// Combine base headers with custom headers
const headers = {
const headers = {
'Content-Type': 'application/json',
...config.customHeaders
...config.customHeaders
};
this.client = axios.create({
url: config.baseURL
? `${config.baseURL}/${config.apiKey}`
@@ -44,9 +45,8 @@ export class OllamaEngine implements AiEngine {
const { message } = response.data;
let content = message?.content;
return removeContentTags(content, 'think');
} catch (err: any) {
const message = err.response?.data?.error ?? err.message;
throw new Error(`Ollama provider error: ${message}`);
} catch (error) {
throw normalizeEngineError(error, 'ollama', this.config.model);
}
}
}

View File

@@ -1,7 +1,7 @@
import axios from 'axios';
import { OpenAI } from 'openai';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { parseCustomHeaders } from '../utils/engine';
import { normalizeEngineError } from '../utils/engineErrorHandler';
import { removeContentTags } from '../utils/removeContentTags';
import { tokenCount } from '../utils/tokenCount';
import { AiEngine, AiEngineConfig } from './Engine';
@@ -18,18 +18,18 @@ export class OpenAiEngine implements AiEngine {
const clientOptions: OpenAI.ClientOptions = {
apiKey: config.apiKey
};
if (config.baseURL) {
clientOptions.baseURL = config.baseURL;
}
if (config.customHeaders) {
const headers = parseCustomHeaders(config.customHeaders);
if (Object.keys(headers).length > 0) {
clientOptions.defaultHeaders = headers;
}
}
this.client = new OpenAI(clientOptions);
}
@@ -54,24 +54,14 @@ export class OpenAiEngine implements AiEngine {
this.config.maxTokensInput - this.config.maxTokensOutput
)
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
const completion = await this.client.chat.completions.create(params);
const message = completion.choices[0].message;
let content = message?.content;
return removeContentTags(content, 'think');
} catch (error) {
const err = error as Error;
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401
) {
const openAiError = error.response.data.error;
if (openAiError) throw new Error(openAiError.message);
}
throw err;
throw normalizeEngineError(error, 'openai', this.config.model);
}
};
}

View File

@@ -1,7 +1,8 @@
import OpenAI from 'openai';
import { AiEngine, AiEngineConfig } from './Engine';
import axios, { AxiosInstance } from 'axios';
import { normalizeEngineError } from '../utils/engineErrorHandler';
import { removeContentTags } from '../utils/removeContentTags';
import { AiEngine, AiEngineConfig } from './Engine';
interface OpenRouterConfig extends AiEngineConfig {}
@@ -33,17 +34,7 @@ export class OpenRouterEngine implements AiEngine {
let content = message?.content;
return removeContentTags(content, 'think');
} catch (error) {
const err = error as Error;
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401
) {
const openRouterError = error.response.data.error;
if (openRouterError) throw new Error(openRouterError.message);
}
throw err;
throw normalizeEngineError(error, 'openrouter', this.config.model);
}
};
}

View File

@@ -1,7 +1,21 @@
import { select, confirm, isCancel } from '@clack/prompts';
import chalk from 'chalk';
import { OpenAI } from 'openai';
import { DEFAULT_TOKEN_LIMITS, getConfig } from './commands/config';
import {
DEFAULT_TOKEN_LIMITS,
getConfig,
setGlobalConfig,
getGlobalConfig,
MODEL_LIST,
RECOMMENDED_MODELS
} from './commands/config';
import { getMainCommitPrompt } from './prompts';
import { getEngine } from './utils/engine';
import {
isModelNotFoundError,
getSuggestedModels,
ModelNotFoundError
} from './utils/errors';
import { mergeDiffs } from './utils/mergeDiffs';
import { tokenCount } from './utils/tokenCount';
@@ -14,7 +28,10 @@ const generateCommitMessageChatCompletionPrompt = async (
fullGitMojiSpec: boolean,
context: string
): Promise<Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>> => {
const INIT_MESSAGES_PROMPT = await getMainCommitPrompt(fullGitMojiSpec, context);
const INIT_MESSAGES_PROMPT = await getMainCommitPrompt(
fullGitMojiSpec,
context
);
const chatContextAsCompletionRequest = [...INIT_MESSAGES_PROMPT];
@@ -33,13 +50,106 @@ export enum GenerateCommitMessageErrorEnum {
outputTokensTooHigh = `Token limit exceeded, OCO_TOKENS_MAX_OUTPUT must not be much higher than the default ${DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT} tokens.`
}
async function handleModelNotFoundError(
error: Error,
provider: string,
currentModel: string
): Promise<string | null> {
console.log(
chalk.red(`\n✖ Model '${currentModel}' not found\n`)
);
const suggestedModels = getSuggestedModels(provider, currentModel);
const recommended =
RECOMMENDED_MODELS[provider as keyof typeof RECOMMENDED_MODELS];
if (suggestedModels.length === 0) {
console.log(
chalk.yellow(
`No alternative models available. Run 'oco setup' to configure a different model.`
)
);
return null;
}
const options: Array<{ value: string; label: string }> = [];
// Add recommended first if available
if (recommended && suggestedModels.includes(recommended)) {
options.push({
value: recommended,
label: `${recommended} (Recommended)`
});
}
// Add other suggestions
suggestedModels
.filter((m) => m !== recommended)
.forEach((model) => {
options.push({ value: model, label: model });
});
options.push({ value: '__custom__', label: 'Enter custom model...' });
const selection = await select({
message: 'Select an alternative model:',
options
});
if (isCancel(selection)) {
return null;
}
let newModel: string;
if (selection === '__custom__') {
const { text } = await import('@clack/prompts');
const customModel = await text({
message: 'Enter model name:',
validate: (value) => {
if (!value || value.trim().length === 0) {
return 'Model name is required';
}
return undefined;
}
});
if (isCancel(customModel)) {
return null;
}
newModel = customModel as string;
} else {
newModel = selection as string;
}
// Ask if user wants to save as default
const saveAsDefault = await confirm({
message: 'Save as default model?'
});
if (!isCancel(saveAsDefault) && saveAsDefault) {
const existingConfig = getGlobalConfig();
setGlobalConfig({
...existingConfig,
OCO_MODEL: newModel
} as any);
console.log(chalk.green('✔') + ' Model saved as default\n');
}
return newModel;
}
const ADJUSTMENT_FACTOR = 20;
export const generateCommitMessageByDiff = async (
diff: string,
fullGitMojiSpec: boolean = false,
context: string = ""
context: string = '',
retryWithModel?: string
): Promise<string> => {
const currentConfig = getConfig();
const provider = currentConfig.OCO_AI_PROVIDER || 'openai';
const currentModel = retryWithModel || currentConfig.OCO_MODEL;
try {
const INIT_MESSAGES_PROMPT = await getMainCommitPrompt(
fullGitMojiSpec,
@@ -75,7 +185,7 @@ export const generateCommitMessageByDiff = async (
const messages = await generateCommitMessageChatCompletionPrompt(
diff,
fullGitMojiSpec,
context,
context
);
const engine = getEngine();
@@ -86,6 +196,32 @@ export const generateCommitMessageByDiff = async (
return commitMessage;
} catch (error) {
// Handle model-not-found errors with interactive recovery
if (isModelNotFoundError(error)) {
const newModel = await handleModelNotFoundError(
error as Error,
provider,
currentModel
);
if (newModel) {
console.log(chalk.cyan(`Retrying with ${newModel}...\n`));
// Retry with the new model by updating config temporarily
const existingConfig = getGlobalConfig();
setGlobalConfig({
...existingConfig,
OCO_MODEL: newModel
} as any);
return generateCommitMessageByDiff(
diff,
fullGitMojiSpec,
context,
newModel
);
}
}
throw error;
}
};

View File

@@ -1,8 +1,8 @@
{
"localLanguage": "한국어",
"commitFix": "fix(server.ts): 포트 변수를 소문자 port에서 대문자 PORT로 변경",
"commitFeat": "피트(server.ts): process.env.PORT 환경 변수 지원 추가",
"commitFeat": "feat(server.ts): process.env.PORT 환경 변수 지원 추가",
"commitDescription": "포트 변수는 이제 PORT로 이름이 지정되어 상수인 PORT와 일관성 있는 이름 규칙을 따릅니다. 환경 변수 지원을 통해 애플리케이션은 이제 process.env.PORT 환경 변수로 지정된 사용 가능한 모든 포트에서 실행할 수 있으므로 더 유연해졌습니다.",
"commitFixOmitScope": "fix: 포트 변수를 소문자 port에서 대문자 PORT로 변경",
"commitFeatOmitScope": "피트: process.env.PORT 환경 변수 지원 추가"
"commitFeatOmitScope": "feat: process.env.PORT 환경 변수 지원 추가"
}

View File

@@ -36,6 +36,19 @@ export const runMigrations = async () => {
const config = getConfig();
if (config.OCO_AI_PROVIDER === OCO_AI_PROVIDER_ENUM.TEST) return;
// skip unhandled providers in migration00
if (
[
OCO_AI_PROVIDER_ENUM.DEEPSEEK,
OCO_AI_PROVIDER_ENUM.GROQ,
OCO_AI_PROVIDER_ENUM.MISTRAL,
OCO_AI_PROVIDER_ENUM.MLX,
OCO_AI_PROVIDER_ENUM.OPENROUTER
].includes(config.OCO_AI_PROVIDER)
) {
return;
}
const completedMigrations = getCompletedMigrations();
let isMigrated = false;

View File

@@ -56,10 +56,11 @@ const llmReadableRules: {
blankline: (key, applicable) =>
`There should ${applicable} be a blank line at the beginning of the ${key}.`,
caseRule: (key, applicable, value: string | Array<string>) =>
`The ${key} should ${applicable} be in ${Array.isArray(value)
? `one of the following case:
`The ${key} should ${applicable} be in ${
Array.isArray(value)
? `one of the following case:
- ${value.join('\n - ')}.`
: `${value} case.`
: `${value} case.`
}`,
emptyRule: (key, applicable) => `The ${key} should ${applicable} be empty.`,
enumRule: (key, applicable, value: string | Array<string>) =>
@@ -67,17 +68,18 @@ const llmReadableRules: {
- ${Array.isArray(value) ? value.join('\n - ') : value}.`,
enumTypeRule: (key, applicable, value: string | Array<string>, prompt) =>
`The ${key} should ${applicable} be one of the following values:
- ${Array.isArray(value)
- ${
Array.isArray(value)
? value
.map((v) => {
const description = getTypeRuleExtraDescription(v, prompt);
if (description) {
return `${v} (${description})`;
} else return v;
})
.join('\n - ')
.map((v) => {
const description = getTypeRuleExtraDescription(v, prompt);
if (description) {
return `${v} (${description})`;
} else return v;
})
.join('\n - ')
: value
}.`,
}.`,
fullStopRule: (key, applicable, value: string) =>
`The ${key} should ${applicable} end with '${value}'.`,
maxLengthRule: (key, applicable, value: string) =>
@@ -214,16 +216,20 @@ const STRUCTURE_OF_COMMIT = config.OCO_OMIT_SCOPE
const GEN_COMMITLINT_CONSISTENCY_PROMPT = (
prompts: string[]
): OpenAI.Chat.Completions.ChatCompletionMessageParam[] => [
{
role: 'system',
content: `${IDENTITY} Your mission is to create clean and comprehensive commit messages for two different changes in a single codebase and output them in the provided JSON format: one for a bug fix and another for a new feature.
{
role: 'system',
content: `${IDENTITY} Your mission is to create clean and comprehensive commit messages for two different changes in a single codebase and output them in the provided JSON format: one for a bug fix and another for a new feature.
Here are the specific requirements and conventions that should be strictly followed:
Commit Message Conventions:
- The commit message consists of three parts: Header, Body, and Footer.
- Header:
- Format: ${config.OCO_OMIT_SCOPE ? '`<type>: <subject>`' : '`<type>(<scope>): <subject>`'}
- Format: ${
config.OCO_OMIT_SCOPE
? '`<type>: <subject>`'
: '`<type>(<scope>): <subject>`'
}
- ${prompts.join('\n- ')}
JSON Output Format:
@@ -246,9 +252,9 @@ Additional Details:
- Allowing the server to listen on a port specified through the environment variable is considered a new feature.
Example Git Diff is to follow:`
},
INIT_DIFF_PROMPT
];
},
INIT_DIFF_PROMPT
];
/**
* Prompt to have LLM generate a message using @commitlint rules.
@@ -262,25 +268,30 @@ const INIT_MAIN_PROMPT = (
prompts: string[]
): OpenAI.Chat.Completions.ChatCompletionMessageParam => ({
role: 'system',
content: `${IDENTITY} Your mission is to create clean and comprehensive commit messages in the given @commitlint convention and explain WHAT were the changes ${config.OCO_WHY ? 'and WHY the changes were done' : ''
}. I'll send you an output of 'git diff --staged' command, and you convert it into a commit message.
${config.OCO_EMOJI
? 'Use GitMoji convention to preface the commit.'
: 'Do not preface the commit with anything.'
}
${config.OCO_DESCRIPTION
? 'Add a short description of WHY the changes are done after the commit message. Don\'t start it with "This commit", just describe the changes.'
: "Don't add any descriptions to the commit, only commit message."
}
content: `${IDENTITY} Your mission is to create clean and comprehensive commit messages in the given @commitlint convention and explain WHAT were the changes ${
config.OCO_WHY ? 'and WHY the changes were done' : ''
}. I'll send you an output of 'git diff --staged' command, and you convert it into a commit message.
${
config.OCO_EMOJI
? 'Use GitMoji convention to preface the commit.'
: 'Do not preface the commit with anything.'
}
${
config.OCO_DESCRIPTION
? 'Add a short description of WHY the changes are done after the commit message. Don\'t start it with "This commit", just describe the changes.'
: "Don't add any descriptions to the commit, only commit message."
}
Use the present tense. Use ${language} to answer.
${config.OCO_ONE_LINE_COMMIT
? 'Craft a concise commit message that encapsulates all changes made, with an emphasis on the primary updates. If the modifications share a common theme or scope, mention it succinctly; otherwise, leave the scope out to maintain focus. The goal is to provide a clear and unified overview of the changes in a one single message, without diverging into a list of commit per file change.'
: ''
}
${config.OCO_OMIT_SCOPE
? 'Do not include a scope in the commit message format. Use the format: <type>: <subject>'
: ''
}
${
config.OCO_ONE_LINE_COMMIT
? 'Craft a concise commit message that encapsulates all changes made, with an emphasis on the primary updates. If the modifications share a common theme or scope, mention it succinctly; otherwise, leave the scope out to maintain focus. The goal is to provide a clear and unified overview of the changes in a one single message, without diverging into a list of commit per file change.'
: ''
}
${
config.OCO_OMIT_SCOPE
? 'Do not include a scope in the commit message format. Use the format: <type>: <subject>'
: ''
}
You will strictly follow the following conventions to generate the content of the commit message:
- ${prompts.join('\n- ')}

View File

@@ -21,7 +21,7 @@ export const getJSONBlock = (input: string): string => {
if (jsonIndex > -1) {
input = input.slice(jsonIndex + 8);
const endJsonIndex = input.search('```');
input = input.slice(0, endJsonIndex);
input = input.slice(0, endJsonIndex);
}
return input;
};

View File

@@ -155,9 +155,9 @@ const INIT_MAIN_PROMPT = (
});
export const INIT_DIFF_PROMPT: OpenAI.Chat.Completions.ChatCompletionMessageParam =
{
role: 'user',
content: `diff --git a/src/server.ts b/src/server.ts
{
role: 'user',
content: `diff --git a/src/server.ts b/src/server.ts
index ad4db42..f3b18a9 100644
--- a/src/server.ts
+++ b/src/server.ts
@@ -181,7 +181,7 @@ export const INIT_DIFF_PROMPT: OpenAI.Chat.Completions.ChatCompletionMessagePara
+app.listen(process.env.PORT || PORT, () => {
+ console.log(\`Server listening on port \${PORT}\`);
});`
};
};
const COMMIT_TYPES = {
fix: '🐛',
@@ -193,19 +193,19 @@ const generateCommitString = (
message: string
): string => {
const cleanMessage = removeConventionalCommitWord(message);
return config.OCO_EMOJI
? `${COMMIT_TYPES[type]} ${cleanMessage}`
: message;
return config.OCO_EMOJI ? `${COMMIT_TYPES[type]} ${cleanMessage}` : message;
};
const getConsistencyContent = (translation: ConsistencyPrompt) => {
const fixMessage = config.OCO_OMIT_SCOPE && translation.commitFixOmitScope
? translation.commitFixOmitScope
: translation.commitFix;
const fixMessage =
config.OCO_OMIT_SCOPE && translation.commitFixOmitScope
? translation.commitFixOmitScope
: translation.commitFix;
const featMessage = config.OCO_OMIT_SCOPE && translation.commitFeatOmitScope
? translation.commitFeatOmitScope
: translation.commitFeat;
const featMessage =
config.OCO_OMIT_SCOPE && translation.commitFeatOmitScope
? translation.commitFeatOmitScope
: translation.commitFeat;
const fix = generateCommitString('fix', fixMessage);
const feat = config.OCO_ONE_LINE_COMMIT
@@ -250,7 +250,7 @@ export const getMainCommitPrompt = async (
INIT_DIFF_PROMPT,
INIT_CONSISTENCY_PROMPT(
commitLintConfig.consistency[
translation.localLanguage
translation.localLanguage
] as ConsistencyPrompt
)
];

View File

@@ -11,6 +11,7 @@ import { TestAi, TestMockType } from '../engine/testAi';
import { GroqEngine } from '../engine/groq';
import { MLXEngine } from '../engine/mlx';
import { DeepseekEngine } from '../engine/deepseek';
import { AimlApiEngine } from '../engine/aimlapi';
import { OpenRouterEngine } from '../engine/openrouter';
export function parseCustomHeaders(headers: any): Record<string, string> {
@@ -81,6 +82,9 @@ export function getEngine(): AiEngine {
case OCO_AI_PROVIDER_ENUM.DEEPSEEK:
return new DeepseekEngine(DEFAULT_CONFIG);
case OCO_AI_PROVIDER_ENUM.AIMLAPI:
return new AimlApiEngine(DEFAULT_CONFIG);
case OCO_AI_PROVIDER_ENUM.OPENROUTER:
return new OpenRouterEngine(DEFAULT_CONFIG);

View File

@@ -0,0 +1,205 @@
import axios from 'axios';
import {
AuthenticationError,
InsufficientCreditsError,
ModelNotFoundError,
RateLimitError,
ServiceUnavailableError
} from './errors';
/**
* Extracts HTTP status code from various error types
*/
function getStatusCode(error: unknown): number | null {
// Direct status property (common in API SDKs)
if (typeof (error as any)?.status === 'number') {
return (error as any).status;
}
// Axios-style errors
if (axios.isAxiosError(error)) {
return error.response?.status ?? null;
}
// Response object with status
if (typeof (error as any)?.response?.status === 'number') {
return (error as any).response.status;
}
return null;
}
/**
* Extracts retry-after value from error headers (for rate limiting)
*/
function getRetryAfter(error: unknown): number | undefined {
const headers = (error as any)?.response?.headers;
if (headers) {
const retryAfter = headers['retry-after'] || headers['Retry-After'];
if (retryAfter) {
const seconds = parseInt(retryAfter, 10);
if (!isNaN(seconds)) {
return seconds;
}
}
}
return undefined;
}
/**
* Extracts the error message from various error structures
*/
function extractErrorMessage(error: unknown): string {
if (error instanceof Error) {
return error.message;
}
// API error response structures
const apiError = (error as any)?.response?.data?.error;
if (apiError) {
if (typeof apiError === 'string') {
return apiError;
}
if (apiError.message) {
return apiError.message;
}
}
// Direct error data
const errorData = (error as any)?.error;
if (errorData) {
if (typeof errorData === 'string') {
return errorData;
}
if (errorData.message) {
return errorData.message;
}
}
// Fallback
if (typeof error === 'string') {
return error;
}
return 'An unknown error occurred';
}
/**
* Checks if the error message indicates a model not found error
*/
function isModelNotFoundMessage(message: string): boolean {
const lowerMessage = message.toLowerCase();
return (
(lowerMessage.includes('model') &&
(lowerMessage.includes('not found') ||
lowerMessage.includes('does not exist') ||
lowerMessage.includes('invalid') ||
lowerMessage.includes('pull'))) ||
lowerMessage.includes('does_not_exist')
);
}
/**
* Checks if the error message indicates insufficient credits
*/
function isInsufficientCreditsMessage(message: string): boolean {
const lowerMessage = message.toLowerCase();
return (
lowerMessage.includes('insufficient') ||
lowerMessage.includes('credit') ||
lowerMessage.includes('quota') ||
lowerMessage.includes('balance too low') ||
lowerMessage.includes('billing') ||
lowerMessage.includes('payment required') ||
lowerMessage.includes('exceeded')
);
}
/**
* Normalizes raw API errors into typed error classes.
* This provides consistent error handling across all engine implementations.
*
* @param error - The raw error from the API call
* @param provider - The AI provider name (e.g., 'openai', 'anthropic')
* @param model - The model being used
* @returns A typed Error instance
*/
export function normalizeEngineError(
error: unknown,
provider: string,
model: string
): Error {
// If it's already one of our custom errors, return as-is
if (
error instanceof ModelNotFoundError ||
error instanceof AuthenticationError ||
error instanceof InsufficientCreditsError ||
error instanceof RateLimitError ||
error instanceof ServiceUnavailableError
) {
return error;
}
const statusCode = getStatusCode(error);
const message = extractErrorMessage(error);
// Handle based on HTTP status codes
switch (statusCode) {
case 401:
return new AuthenticationError(provider, message);
case 402:
return new InsufficientCreditsError(provider, message);
case 404:
// Could be model not found or endpoint not found
if (isModelNotFoundMessage(message)) {
return new ModelNotFoundError(model, provider, 404);
}
// Return generic error for other 404s
return error instanceof Error ? error : new Error(message);
case 429:
const retryAfter = getRetryAfter(error);
return new RateLimitError(provider, retryAfter, message);
case 500:
case 502:
case 503:
case 504:
return new ServiceUnavailableError(provider, statusCode, message);
}
// Handle based on error message content
if (isModelNotFoundMessage(message)) {
return new ModelNotFoundError(model, provider, 404);
}
if (isInsufficientCreditsMessage(message)) {
return new InsufficientCreditsError(provider, message);
}
// Check for rate limit patterns in message
const lowerMessage = message.toLowerCase();
if (
lowerMessage.includes('rate limit') ||
lowerMessage.includes('rate_limit') ||
lowerMessage.includes('too many requests')
) {
return new RateLimitError(provider, undefined, message);
}
// Check for auth patterns in message
if (
lowerMessage.includes('unauthorized') ||
lowerMessage.includes('api key') ||
lowerMessage.includes('apikey') ||
lowerMessage.includes('authentication') ||
lowerMessage.includes('invalid_api_key')
) {
return new AuthenticationError(provider, message);
}
// Return original error or wrap in Error if needed
return error instanceof Error ? error : new Error(message);
}

471
src/utils/errors.ts Normal file
View File

@@ -0,0 +1,471 @@
import chalk from 'chalk';
import { MODEL_LIST, OCO_AI_PROVIDER_ENUM } from '../commands/config';
// Provider billing/help URLs for common errors
export const PROVIDER_BILLING_URLS: Record<string, string | null> = {
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]: 'https://console.anthropic.com/settings/billing',
[OCO_AI_PROVIDER_ENUM.OPENAI]: 'https://platform.openai.com/settings/organization/billing',
[OCO_AI_PROVIDER_ENUM.GEMINI]: 'https://aistudio.google.com/app/plan',
[OCO_AI_PROVIDER_ENUM.GROQ]: 'https://console.groq.com/settings/billing',
[OCO_AI_PROVIDER_ENUM.MISTRAL]: 'https://console.mistral.ai/billing/',
[OCO_AI_PROVIDER_ENUM.DEEPSEEK]: 'https://platform.deepseek.com/usage',
[OCO_AI_PROVIDER_ENUM.OPENROUTER]: 'https://openrouter.ai/credits',
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'https://aimlapi.com/app/billing',
[OCO_AI_PROVIDER_ENUM.AZURE]: 'https://portal.azure.com/#view/Microsoft_Azure_CostManagement',
[OCO_AI_PROVIDER_ENUM.OLLAMA]: null,
[OCO_AI_PROVIDER_ENUM.MLX]: null,
[OCO_AI_PROVIDER_ENUM.FLOWISE]: null,
[OCO_AI_PROVIDER_ENUM.TEST]: null
};
// Error type for insufficient credits/quota
export class InsufficientCreditsError extends Error {
public readonly provider: string;
constructor(provider: string, message?: string) {
super(message || `Insufficient credits or quota for provider '${provider}'`);
this.name = 'InsufficientCreditsError';
this.provider = provider;
}
}
// Error type for rate limiting (429 errors)
export class RateLimitError extends Error {
public readonly provider: string;
public readonly retryAfter?: number;
constructor(provider: string, retryAfter?: number, message?: string) {
super(message || `Rate limit exceeded for provider '${provider}'`);
this.name = 'RateLimitError';
this.provider = provider;
this.retryAfter = retryAfter;
}
}
// Error type for service unavailable (5xx errors)
export class ServiceUnavailableError extends Error {
public readonly provider: string;
public readonly statusCode: number;
constructor(provider: string, statusCode: number = 503, message?: string) {
super(message || `Service unavailable for provider '${provider}'`);
this.name = 'ServiceUnavailableError';
this.provider = provider;
this.statusCode = statusCode;
}
}
// Error type for authentication failures
export class AuthenticationError extends Error {
public readonly provider: string;
constructor(provider: string, message?: string) {
super(message || `Authentication failed for provider '${provider}'`);
this.name = 'AuthenticationError';
this.provider = provider;
}
}
export class ModelNotFoundError extends Error {
public readonly modelName: string;
public readonly provider: string;
public readonly statusCode: number;
constructor(modelName: string, provider: string, statusCode: number = 404) {
super(`Model '${modelName}' not found for provider '${provider}'`);
this.name = 'ModelNotFoundError';
this.modelName = modelName;
this.provider = provider;
this.statusCode = statusCode;
}
}
export class ApiKeyMissingError extends Error {
public readonly provider: string;
constructor(provider: string) {
super(`API key is missing for provider '${provider}'`);
this.name = 'ApiKeyMissingError';
this.provider = provider;
}
}
export function isModelNotFoundError(error: unknown): boolean {
if (error instanceof ModelNotFoundError) {
return true;
}
if (error instanceof Error) {
const message = error.message.toLowerCase();
// OpenAI error patterns
if (
message.includes('model') &&
(message.includes('not found') ||
message.includes('does not exist') ||
message.includes('invalid model'))
) {
return true;
}
// Anthropic error patterns
if (
message.includes('model') &&
(message.includes('not found') || message.includes('invalid'))
) {
return true;
}
// Check for 404 status in axios/fetch errors
if (
'status' in (error as any) &&
(error as any).status === 404 &&
message.includes('model')
) {
return true;
}
// Check for response status
if ('response' in (error as any)) {
const response = (error as any).response;
if (response?.status === 404) {
return true;
}
}
}
return false;
}
export function isApiKeyError(error: unknown): boolean {
if (error instanceof ApiKeyMissingError) {
return true;
}
if (error instanceof Error) {
const message = error.message.toLowerCase();
// Common API key error patterns
if (
message.includes('api key') ||
message.includes('apikey') ||
message.includes('authentication') ||
message.includes('unauthorized') ||
message.includes('invalid_api_key') ||
message.includes('incorrect api key')
) {
return true;
}
// Check for 401 status
if ('response' in (error as any)) {
const response = (error as any).response;
if (response?.status === 401) {
return true;
}
}
}
return false;
}
export function getSuggestedModels(
provider: string,
failedModel: string
): string[] {
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
const models = MODEL_LIST[providerKey];
if (!models || !Array.isArray(models)) {
return [];
}
// Return first 5 models as suggestions, excluding the failed one
return models.filter((m) => m !== failedModel).slice(0, 5);
}
export function getRecommendedModel(provider: string): string | null {
switch (provider.toLowerCase()) {
case OCO_AI_PROVIDER_ENUM.OPENAI:
return 'gpt-4o-mini';
case OCO_AI_PROVIDER_ENUM.ANTHROPIC:
return 'claude-sonnet-4-20250514';
case OCO_AI_PROVIDER_ENUM.GEMINI:
return 'gemini-1.5-flash';
case OCO_AI_PROVIDER_ENUM.GROQ:
return 'llama3-70b-8192';
case OCO_AI_PROVIDER_ENUM.MISTRAL:
return 'mistral-small-latest';
case OCO_AI_PROVIDER_ENUM.DEEPSEEK:
return 'deepseek-chat';
case OCO_AI_PROVIDER_ENUM.OPENROUTER:
return 'openai/gpt-4o-mini';
case OCO_AI_PROVIDER_ENUM.AIMLAPI:
return 'gpt-4o-mini';
default:
return null;
}
}
export function formatErrorWithRecovery(
error: Error,
provider: string,
model: string
): string {
const suggestions = getSuggestedModels(provider, model);
const recommended = getRecommendedModel(provider);
let message = `\n${error.message}\n`;
if (suggestions.length > 0) {
message += '\nSuggested alternatives:\n';
suggestions.forEach((m, i) => {
const isRecommended = m === recommended;
message += ` ${i + 1}. ${m}${isRecommended ? ' (Recommended)' : ''}\n`;
});
}
message += '\nTo fix this, run: oco config set OCO_MODEL=<model-name>\n';
message += 'Or run: oco setup\n';
return message;
}
// Detect insufficient credits/quota errors from various providers
export function isInsufficientCreditsError(error: unknown): boolean {
if (error instanceof InsufficientCreditsError) {
return true;
}
if (error instanceof Error) {
const message = error.message.toLowerCase();
// Common patterns for insufficient credits/quota
if (
message.includes('insufficient') ||
message.includes('credit') ||
message.includes('quota') ||
message.includes('balance') ||
message.includes('billing') ||
message.includes('payment') ||
message.includes('exceeded') ||
message.includes('limit reached') ||
message.includes('no remaining')
) {
return true;
}
// Check for 402 Payment Required status
if ('status' in (error as any) && (error as any).status === 402) {
return true;
}
if ('response' in (error as any)) {
const response = (error as any).response;
if (response?.status === 402) {
return true;
}
}
}
return false;
}
// Detect rate limit errors (429)
export function isRateLimitError(error: unknown): boolean {
if (error instanceof RateLimitError) {
return true;
}
if (error instanceof Error) {
const message = error.message.toLowerCase();
// Common patterns for rate limiting
if (
message.includes('rate limit') ||
message.includes('rate_limit') ||
message.includes('too many requests') ||
message.includes('throttle')
) {
return true;
}
// Check for 429 status
if ('status' in (error as any) && (error as any).status === 429) {
return true;
}
if ('response' in (error as any)) {
const response = (error as any).response;
if (response?.status === 429) {
return true;
}
}
}
return false;
}
// Detect service unavailable errors (5xx)
export function isServiceUnavailableError(error: unknown): boolean {
if (error instanceof ServiceUnavailableError) {
return true;
}
if (error instanceof Error) {
const message = error.message.toLowerCase();
// Common patterns for service unavailable
if (
message.includes('service unavailable') ||
message.includes('server error') ||
message.includes('internal error') ||
message.includes('temporarily unavailable') ||
message.includes('overloaded')
) {
return true;
}
// Check for 5xx status
const status = (error as any).status || (error as any).response?.status;
if (status && status >= 500 && status < 600) {
return true;
}
}
return false;
}
// User-friendly formatted error structure
export interface FormattedError {
title: string;
message: string;
helpUrl: string | null;
suggestion: string | null;
}
// Format an error into a user-friendly structure
export function formatUserFriendlyError(error: unknown, provider: string): FormattedError {
const billingUrl = PROVIDER_BILLING_URLS[provider] || null;
// Handle our custom error types first
if (error instanceof InsufficientCreditsError) {
return {
title: 'Insufficient Credits',
message: `Your ${provider} account has insufficient credits or quota.`,
helpUrl: billingUrl,
suggestion: 'Add credits to your account to continue using the service.'
};
}
if (error instanceof RateLimitError) {
const retryMsg = error.retryAfter
? `Please wait ${error.retryAfter} seconds before retrying.`
: 'Please wait a moment before retrying.';
return {
title: 'Rate Limit Exceeded',
message: `You've made too many requests to ${provider}.`,
helpUrl: billingUrl,
suggestion: retryMsg
};
}
if (error instanceof ServiceUnavailableError) {
return {
title: 'Service Unavailable',
message: `The ${provider} service is temporarily unavailable.`,
helpUrl: null,
suggestion: 'Please try again in a few moments.'
};
}
if (error instanceof AuthenticationError) {
return {
title: 'Authentication Failed',
message: `Your ${provider} API key is invalid or expired.`,
helpUrl: billingUrl,
suggestion: 'Run `oco setup` to configure a valid API key.'
};
}
if (error instanceof ModelNotFoundError) {
return {
title: 'Model Not Found',
message: `The model '${error.modelName}' is not available for ${provider}.`,
helpUrl: null,
suggestion: 'Run `oco setup` to select a valid model.'
};
}
// Detect error type from raw errors
if (isInsufficientCreditsError(error)) {
return {
title: 'Insufficient Credits',
message: `Your ${provider} account has insufficient credits or quota.`,
helpUrl: billingUrl,
suggestion: 'Add credits to your account to continue using the service.'
};
}
if (isRateLimitError(error)) {
return {
title: 'Rate Limit Exceeded',
message: `You've made too many requests to ${provider}.`,
helpUrl: billingUrl,
suggestion: 'Please wait a moment before retrying.'
};
}
if (isServiceUnavailableError(error)) {
return {
title: 'Service Unavailable',
message: `The ${provider} service is temporarily unavailable.`,
helpUrl: null,
suggestion: 'Please try again in a few moments.'
};
}
if (isApiKeyError(error)) {
return {
title: 'Authentication Failed',
message: `Your ${provider} API key is invalid or expired.`,
helpUrl: billingUrl,
suggestion: 'Run `oco setup` to configure a valid API key.'
};
}
if (isModelNotFoundError(error)) {
const model = (error as any).modelName || (error as any).model || 'unknown';
return {
title: 'Model Not Found',
message: `The model '${model}' is not available for ${provider}.`,
helpUrl: null,
suggestion: 'Run `oco setup` to select a valid model.'
};
}
// Default: generic error
const errorMessage = error instanceof Error ? error.message : String(error);
return {
title: 'Error',
message: errorMessage,
helpUrl: null,
suggestion: 'Run `oco setup` to reconfigure or check your settings.'
};
}
// Print a formatted error as a chalk-styled string
export function printFormattedError(formatted: FormattedError): string {
let output = `\n${chalk.red('✖')} ${chalk.bold.red(formatted.title)}\n`;
output += ` ${formatted.message}\n`;
if (formatted.helpUrl) {
output += `\n ${chalk.cyan('Help:')} ${chalk.underline(formatted.helpUrl)}\n`;
}
if (formatted.suggestion) {
output += `\n ${chalk.yellow('Suggestion:')} ${formatted.suggestion}\n`;
}
return output;
}

View File

@@ -1,7 +1,7 @@
import { execa } from 'execa';
import { readFileSync } from 'fs';
import ignore, { Ignore } from 'ignore';
import { join } from 'path';
import { outro, spinner } from '@clack/prompts';
export const assertGitRepo = async () => {
@@ -16,41 +16,44 @@ export const assertGitRepo = async () => {
// (file) => `:(exclude)${file}`
// );
export const getOpenCommitIgnore = (): Ignore => {
export const getOpenCommitIgnore = async (): Promise<Ignore> => {
const gitDir = await getGitDir();
const ig = ignore();
try {
ig.add(readFileSync('.opencommitignore').toString().split('\n'));
ig.add(
readFileSync(join(gitDir, '.opencommitignore')).toString().split('\n')
);
} catch (e) {}
return ig;
};
export const getCoreHooksPath = async (): Promise<string> => {
const { stdout } = await execa('git', ['config', 'core.hooksPath']);
const gitDir = await getGitDir();
const { stdout } = await execa('git', ['config', 'core.hooksPath'], {
cwd: gitDir
});
return stdout;
};
export const getStagedFiles = async (): Promise<string[]> => {
const { stdout: gitDir } = await execa('git', [
'rev-parse',
'--show-toplevel'
]);
const gitDir = await getGitDir();
const { stdout: files } = await execa('git', [
'diff',
'--name-only',
'--cached',
'--relative',
gitDir
]);
const { stdout: files } = await execa(
'git',
['diff', '--name-only', '--cached', '--relative'],
{ cwd: gitDir }
);
if (!files) return [];
const filesList = files.split('\n');
const ig = getOpenCommitIgnore();
const ig = await getOpenCommitIgnore();
const allowedFiles = filesList.filter((file) => !ig.ignores(file));
if (!allowedFiles) return [];
@@ -59,12 +62,17 @@ export const getStagedFiles = async (): Promise<string[]> => {
};
export const getChangedFiles = async (): Promise<string[]> => {
const { stdout: modified } = await execa('git', ['ls-files', '--modified']);
const { stdout: others } = await execa('git', [
'ls-files',
'--others',
'--exclude-standard'
]);
const gitDir = await getGitDir();
const { stdout: modified } = await execa('git', ['ls-files', '--modified'], {
cwd: gitDir
});
const { stdout: others } = await execa(
'git',
['ls-files', '--others', '--exclude-standard'],
{ cwd: gitDir }
);
const files = [...modified.split('\n'), ...others.split('\n')].filter(
(file) => !!file
@@ -74,16 +82,20 @@ export const getChangedFiles = async (): Promise<string[]> => {
};
export const gitAdd = async ({ files }: { files: string[] }) => {
const gitDir = await getGitDir();
const gitAddSpinner = spinner();
gitAddSpinner.start('Adding files to commit');
await execa('git', ['add', ...files]);
await execa('git', ['add', ...files], { cwd: gitDir });
gitAddSpinner.stop(`Staged ${files.length} files`);
};
export const getDiff = async ({ files }: { files: string[] }) => {
const gitDir = await getGitDir();
const lockFiles = files.filter(
(file) =>
file.includes('.lock') ||
@@ -108,12 +120,20 @@ export const getDiff = async ({ files }: { files: string[] }) => {
(file) => !file.includes('.lock') && !file.includes('-lock.')
);
const { stdout: diff } = await execa('git', [
'diff',
'--staged',
'--',
...filesWithoutLocks
]);
const { stdout: diff } = await execa(
'git',
['diff', '--staged', '--', ...filesWithoutLocks],
{ cwd: gitDir }
);
return diff;
};
export const getGitDir = async (): Promise<string> => {
const { stdout: gitDir } = await execa('git', [
'rev-parse',
'--show-toplevel'
]);
return gitDir;
};

332
src/utils/modelCache.ts Normal file
View File

@@ -0,0 +1,332 @@
import { existsSync, readFileSync, writeFileSync } from 'fs';
import { homedir } from 'os';
import { join as pathJoin } from 'path';
import { MODEL_LIST, OCO_AI_PROVIDER_ENUM } from '../commands/config';
const MODEL_CACHE_PATH = pathJoin(homedir(), '.opencommit-models.json');
const CACHE_TTL_MS = 7 * 24 * 60 * 60 * 1000; // 7 days
interface ModelCache {
timestamp: number;
models: Record<string, string[]>;
}
function readCache(): ModelCache | null {
try {
if (!existsSync(MODEL_CACHE_PATH)) {
return null;
}
const data = readFileSync(MODEL_CACHE_PATH, 'utf8');
return JSON.parse(data);
} catch {
return null;
}
}
function writeCache(models: Record<string, string[]>): void {
try {
const cache: ModelCache = {
timestamp: Date.now(),
models
};
writeFileSync(MODEL_CACHE_PATH, JSON.stringify(cache, null, 2), 'utf8');
} catch {
// Silently fail if we can't write cache
}
}
function isCacheValid(cache: ModelCache | null): boolean {
if (!cache) return false;
return Date.now() - cache.timestamp < CACHE_TTL_MS;
}
export async function fetchOpenAIModels(apiKey: string): Promise<string[]> {
try {
const response = await fetch('https://api.openai.com/v1/models', {
headers: {
Authorization: `Bearer ${apiKey}`
}
});
if (!response.ok) {
return MODEL_LIST.openai;
}
const data = await response.json();
const models = data.data
.map((m: { id: string }) => m.id)
.filter(
(id: string) =>
id.startsWith('gpt-') ||
id.startsWith('o1') ||
id.startsWith('o3') ||
id.startsWith('o4')
)
.sort();
return models.length > 0 ? models : MODEL_LIST.openai;
} catch {
return MODEL_LIST.openai;
}
}
export async function fetchOllamaModels(
baseUrl: string = 'http://localhost:11434'
): Promise<string[]> {
try {
const response = await fetch(`${baseUrl}/api/tags`);
if (!response.ok) {
return [];
}
const data = await response.json();
return data.models?.map((m: { name: string }) => m.name) || [];
} catch {
return [];
}
}
export async function fetchAnthropicModels(apiKey: string): Promise<string[]> {
try {
const response = await fetch('https://api.anthropic.com/v1/models', {
headers: {
'x-api-key': apiKey,
'anthropic-version': '2023-06-01'
}
});
if (!response.ok) {
return MODEL_LIST.anthropic;
}
const data = await response.json();
const models = data.data
?.map((m: { id: string }) => m.id)
.filter((id: string) => id.startsWith('claude-'))
.sort();
return models && models.length > 0 ? models : MODEL_LIST.anthropic;
} catch {
return MODEL_LIST.anthropic;
}
}
export async function fetchMistralModels(apiKey: string): Promise<string[]> {
try {
const response = await fetch('https://api.mistral.ai/v1/models', {
headers: {
Authorization: `Bearer ${apiKey}`
}
});
if (!response.ok) {
return MODEL_LIST.mistral;
}
const data = await response.json();
const models = data.data
?.map((m: { id: string }) => m.id)
.sort();
return models && models.length > 0 ? models : MODEL_LIST.mistral;
} catch {
return MODEL_LIST.mistral;
}
}
export async function fetchGroqModels(apiKey: string): Promise<string[]> {
try {
const response = await fetch('https://api.groq.com/openai/v1/models', {
headers: {
Authorization: `Bearer ${apiKey}`
}
});
if (!response.ok) {
return MODEL_LIST.groq;
}
const data = await response.json();
const models = data.data
?.map((m: { id: string }) => m.id)
.sort();
return models && models.length > 0 ? models : MODEL_LIST.groq;
} catch {
return MODEL_LIST.groq;
}
}
export async function fetchOpenRouterModels(apiKey: string): Promise<string[]> {
try {
const response = await fetch('https://openrouter.ai/api/v1/models', {
headers: {
Authorization: `Bearer ${apiKey}`
}
});
if (!response.ok) {
return MODEL_LIST.openrouter;
}
const data = await response.json();
// Filter to text-capable models only (exclude image/audio models)
const models = data.data
?.filter((m: { id: string; context_length?: number }) =>
m.context_length && m.context_length > 0
)
.map((m: { id: string }) => m.id)
.sort();
return models && models.length > 0 ? models : MODEL_LIST.openrouter;
} catch {
return MODEL_LIST.openrouter;
}
}
export async function fetchDeepSeekModels(apiKey: string): Promise<string[]> {
try {
const response = await fetch('https://api.deepseek.com/v1/models', {
headers: {
Authorization: `Bearer ${apiKey}`
}
});
if (!response.ok) {
return MODEL_LIST.deepseek;
}
const data = await response.json();
const models = data.data
?.map((m: { id: string }) => m.id)
.sort();
return models && models.length > 0 ? models : MODEL_LIST.deepseek;
} catch {
return MODEL_LIST.deepseek;
}
}
export async function fetchModelsForProvider(
provider: string,
apiKey?: string,
baseUrl?: string,
forceRefresh: boolean = false
): Promise<string[]> {
const cache = readCache();
// Return cached models if valid (unless force refresh)
if (!forceRefresh && isCacheValid(cache) && cache!.models[provider]) {
return cache!.models[provider];
}
let models: string[] = [];
switch (provider.toLowerCase()) {
case OCO_AI_PROVIDER_ENUM.OPENAI:
if (apiKey) {
models = await fetchOpenAIModels(apiKey);
} else {
models = MODEL_LIST.openai;
}
break;
case OCO_AI_PROVIDER_ENUM.OLLAMA:
models = await fetchOllamaModels(baseUrl);
break;
case OCO_AI_PROVIDER_ENUM.ANTHROPIC:
if (apiKey) {
models = await fetchAnthropicModels(apiKey);
} else {
models = MODEL_LIST.anthropic;
}
break;
case OCO_AI_PROVIDER_ENUM.GEMINI:
// Google's API doesn't easily list generative models, use hardcoded list
models = MODEL_LIST.gemini;
break;
case OCO_AI_PROVIDER_ENUM.GROQ:
if (apiKey) {
models = await fetchGroqModels(apiKey);
} else {
models = MODEL_LIST.groq;
}
break;
case OCO_AI_PROVIDER_ENUM.MISTRAL:
if (apiKey) {
models = await fetchMistralModels(apiKey);
} else {
models = MODEL_LIST.mistral;
}
break;
case OCO_AI_PROVIDER_ENUM.DEEPSEEK:
if (apiKey) {
models = await fetchDeepSeekModels(apiKey);
} else {
models = MODEL_LIST.deepseek;
}
break;
case OCO_AI_PROVIDER_ENUM.AIMLAPI:
models = MODEL_LIST.aimlapi;
break;
case OCO_AI_PROVIDER_ENUM.OPENROUTER:
if (apiKey) {
models = await fetchOpenRouterModels(apiKey);
} else {
models = MODEL_LIST.openrouter;
}
break;
default:
models = MODEL_LIST.openai;
}
// Update cache
const existingCache = cache?.models || {};
existingCache[provider] = models;
writeCache(existingCache);
return models;
}
export function getModelsForProvider(provider: string): string[] {
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
return MODEL_LIST[providerKey] || MODEL_LIST.openai;
}
export function clearModelCache(): void {
try {
if (existsSync(MODEL_CACHE_PATH)) {
writeFileSync(MODEL_CACHE_PATH, '{}', 'utf8');
}
} catch {
// Silently fail
}
}
export function getCacheInfo(): { timestamp: number | null; providers: string[] } {
const cache = readCache();
if (!cache) {
return { timestamp: null, providers: [] };
}
return {
timestamp: cache.timestamp,
providers: Object.keys(cache.models || {})
};
}
export function getCachedModels(provider: string): string[] | null {
const cache = readCache();
if (!cache || !cache.models[provider]) {
return null;
}
return cache.models[provider];
}

View File

@@ -4,20 +4,23 @@
* @param tag The tag name without angle brackets (e.g., 'think' for '<think></think>')
* @returns The content with the specified tags and their contents removed, and trimmed
*/
export function removeContentTags<T extends string | null | undefined>(content: T, tag: string): T {
export function removeContentTags<T extends string | null | undefined>(
content: T,
tag: string
): T {
if (!content || typeof content !== 'string') {
return content;
}
// Dynamic implementation for other cases
const openTag = `<${tag}>`;
const closeTag = `</${tag}>`;
// Parse the content and remove tags
let result = '';
let skipUntil: number | null = null;
let depth = 0;
for (let i = 0; i < content.length; i++) {
// Check for opening tag
if (content.substring(i, i + openTag.length) === openTag) {
@@ -29,7 +32,10 @@ export function removeContentTags<T extends string | null | undefined>(content:
}
}
// Check for closing tag
else if (content.substring(i, i + closeTag.length) === closeTag && depth > 0) {
else if (
content.substring(i, i + closeTag.length) === closeTag &&
depth > 0
) {
depth--;
if (depth === 0) {
i = i + closeTag.length - 1; // Skip the closing tag
@@ -37,7 +43,7 @@ export function removeContentTags<T extends string | null | undefined>(content:
continue;
}
}
// Only add character if not inside a tag
if (skipUntil === null) {
result += content[i];