mirror of
https://github.com/di-sukharev/opencommit.git
synced 2026-04-20 03:02:51 -04:00
Compare commits
70 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
695edf7ed9 | ||
|
|
61805175ed | ||
|
|
eaa60fdfb1 | ||
|
|
1bef4c5623 | ||
|
|
8f360a8c65 | ||
|
|
8ed6846910 | ||
|
|
5fde6dbb63 | ||
|
|
58b9d844b8 | ||
|
|
9855ed1f69 | ||
|
|
7e41139d9c | ||
|
|
66a8c2b52a | ||
|
|
57fb52a3c5 | ||
|
|
88964cbc5e | ||
|
|
cf27085ac9 | ||
|
|
7fa2384761 | ||
|
|
fa1482d8b1 | ||
|
|
f656c39f63 | ||
|
|
420a15343c | ||
|
|
fd9820dd64 | ||
|
|
2d9a26dc37 | ||
|
|
8cbaa36e82 | ||
|
|
42029fff4e | ||
|
|
4d767da9e5 | ||
|
|
361327a8fe | ||
|
|
3a2fa11fcd | ||
|
|
4056bfa547 | ||
|
|
a48d33096a | ||
|
|
d5dcd42d2c | ||
|
|
f300b5dd4e | ||
|
|
15884724e6 | ||
|
|
0b6fda1c2b | ||
|
|
a7fd0d8237 | ||
|
|
6cb67e5150 | ||
|
|
62129503b3 | ||
|
|
f81e836f34 | ||
|
|
c3d1fb379f | ||
|
|
e17294abc7 | ||
|
|
789b4f5e9f | ||
|
|
a9c9bcfd5a | ||
|
|
0ee82f7430 | ||
|
|
9923dab532 | ||
|
|
f74ba2dfc6 | ||
|
|
53414438d1 | ||
|
|
6982e76cf5 | ||
|
|
dc7f7f6552 | ||
|
|
db8a22b0cb | ||
|
|
e27007b6fe | ||
|
|
f51393e37a | ||
|
|
83f9193749 | ||
|
|
bc608e97bd | ||
|
|
40182f26b3 | ||
|
|
62d56a5278 | ||
|
|
9e601ca6b5 | ||
|
|
4a9b1391a3 | ||
|
|
3fe71c1d23 | ||
|
|
2f2e888098 | ||
|
|
4fc8284b87 | ||
|
|
689f52b22f | ||
|
|
de5d5cbb95 | ||
|
|
ccc227ed85 | ||
|
|
9ca7c02840 | ||
|
|
6d9fff56aa | ||
|
|
6ed70d0382 | ||
|
|
5b241ed2d0 | ||
|
|
8b0ee25923 | ||
|
|
fdd4d89bba | ||
|
|
d70797b864 | ||
|
|
74fff2861b | ||
|
|
a0dc1c87c5 | ||
|
|
d65547dcaa |
@@ -1,33 +0,0 @@
|
||||
{
|
||||
"extends": [
|
||||
"eslint:recommended",
|
||||
"plugin:@typescript-eslint/recommended",
|
||||
"plugin:prettier/recommended"
|
||||
],
|
||||
"parser": "@typescript-eslint/parser",
|
||||
"parserOptions": {
|
||||
"ecmaVersion": 12,
|
||||
"sourceType": "module"
|
||||
},
|
||||
"plugins": ["simple-import-sort", "import", "@typescript-eslint", "prettier"],
|
||||
"settings": {
|
||||
"import/resolver": {
|
||||
"node": {
|
||||
"extensions": [".js", ".jsx", ".ts", ".tsx"]
|
||||
}
|
||||
}
|
||||
},
|
||||
"packageManager": "npm",
|
||||
"rules": {
|
||||
"prettier/prettier": "error",
|
||||
"no-console": "error",
|
||||
"import/order": "off",
|
||||
"sort-imports": "off",
|
||||
"simple-import-sort/imports": "error",
|
||||
"simple-import-sort/exports": "error",
|
||||
"import/first": "error",
|
||||
"import/newline-after-import": "error",
|
||||
"import/no-duplicates": "error",
|
||||
"@typescript-eslint/no-non-null-assertion": "off"
|
||||
}
|
||||
}
|
||||
6
.github/ISSUE_TEMPLATE/bug.yaml
vendored
6
.github/ISSUE_TEMPLATE/bug.yaml
vendored
@@ -1,7 +1,7 @@
|
||||
name: 🐞 Bug Report
|
||||
description: File a bug report
|
||||
title: "[Bug]: "
|
||||
labels: ["bug", "triage"]
|
||||
title: '[Bug]: '
|
||||
labels: ['bug', 'triage']
|
||||
assignees:
|
||||
- octocat
|
||||
body:
|
||||
@@ -48,7 +48,7 @@ body:
|
||||
label: What happened?
|
||||
description: Also tell us, what did you expect to happen?
|
||||
placeholder: Tell us what you see!
|
||||
value: "A bug happened!"
|
||||
value: 'A bug happened!'
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
|
||||
4
.github/ISSUE_TEMPLATE/featureRequest.yaml
vendored
4
.github/ISSUE_TEMPLATE/featureRequest.yaml
vendored
@@ -1,9 +1,9 @@
|
||||
---
|
||||
name: 🛠️ Feature Request
|
||||
description: Suggest an idea to help us improve Opencommit
|
||||
title: "[Feature]: "
|
||||
title: '[Feature]: '
|
||||
labels:
|
||||
- "feature_request"
|
||||
- 'feature_request'
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
|
||||
11
.github/workflows/codeql.yml
vendored
11
.github/workflows/codeql.yml
vendored
@@ -9,14 +9,14 @@
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL"
|
||||
name: 'CodeQL'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "master" ]
|
||||
branches: ['master']
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ "master" ]
|
||||
branches: ['master']
|
||||
schedule:
|
||||
- cron: '21 16 * * 0'
|
||||
|
||||
@@ -32,7 +32,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: [ 'javascript' ]
|
||||
language: ['javascript']
|
||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
|
||||
# Use only 'java' to analyze code written in Java, Kotlin or both
|
||||
# Use only 'javascript' to analyze code written in JavaScript, TypeScript or both
|
||||
@@ -54,7 +54,6 @@ jobs:
|
||||
# Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
|
||||
# queries: security-extended,security-and-quality
|
||||
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, Go, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
@@ -73,4 +72,4 @@ jobs:
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
||||
category: '/language:${{matrix.language}}'
|
||||
|
||||
61
.github/workflows/test.yml
vendored
61
.github/workflows/test.yml
vendored
@@ -8,51 +8,33 @@ on:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
unit-test:
|
||||
linux-tests:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
node-version: [20.x]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Use Node.js ${{ matrix.node-version }}
|
||||
uses: actions/setup-node@v3
|
||||
- name: Use Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ matrix.node-version }}
|
||||
node-version: '20.x'
|
||||
cache: 'npm'
|
||||
- name: Install dependencies
|
||||
run: npm install
|
||||
- name: Run Unit Tests
|
||||
run: npm run test:unit
|
||||
e2e-test:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
node-version: [20.x]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Use Node.js ${{ matrix.node-version }}
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: ${{ matrix.node-version }}
|
||||
cache: 'npm'
|
||||
- name: Install git
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y git
|
||||
git --version
|
||||
- name: Setup git
|
||||
run: |
|
||||
git config --global user.email "test@example.com"
|
||||
git config --global user.name "Test User"
|
||||
- name: Install dependencies
|
||||
run: npm install
|
||||
- name: Build
|
||||
run: npm run build
|
||||
- name: Run E2E Tests
|
||||
run: npm run test:e2e
|
||||
prettier:
|
||||
runs-on: ubuntu-latest
|
||||
run: npm ci
|
||||
- name: Run Lint
|
||||
run: npm run lint
|
||||
- name: Run Format Check
|
||||
run: npm run format:check
|
||||
- name: Run Unit Tests
|
||||
run: npm run test:unit
|
||||
- name: Run Core E2E Tests
|
||||
run: npm run test:e2e:core
|
||||
- name: Run Prompt Module E2E Tests
|
||||
run: npm run test:e2e:prompt-module
|
||||
macos-smoke:
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Use Node.js
|
||||
@@ -62,10 +44,5 @@ jobs:
|
||||
cache: 'npm'
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
- name: Run Prettier
|
||||
run: npm run format:check
|
||||
- name: Prettier Output
|
||||
if: failure()
|
||||
run: |
|
||||
echo "Prettier check failed. Please run 'npm run format' to fix formatting issues."
|
||||
exit 1
|
||||
- name: Run Smoke E2E Tests
|
||||
run: npm run test:e2e:smoke
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,4 +1,5 @@
|
||||
node_modules/
|
||||
out/
|
||||
coverage/
|
||||
temp/
|
||||
build/
|
||||
|
||||
38
README.md
38
README.md
@@ -201,6 +201,28 @@ or for as a cheaper option:
|
||||
oco config set OCO_MODEL=gpt-3.5-turbo
|
||||
```
|
||||
|
||||
### Model Management
|
||||
|
||||
OpenCommit automatically fetches available models from your provider when you run `oco setup`. Models are cached for 7 days to reduce API calls.
|
||||
|
||||
To see available models for your current provider:
|
||||
|
||||
```sh
|
||||
oco models
|
||||
```
|
||||
|
||||
To refresh the model list (e.g., after new models are released):
|
||||
|
||||
```sh
|
||||
oco models --refresh
|
||||
```
|
||||
|
||||
To see models for a specific provider:
|
||||
|
||||
```sh
|
||||
oco models --provider anthropic
|
||||
```
|
||||
|
||||
### Switch to other LLM providers with a custom URL
|
||||
|
||||
By default OpenCommit uses [OpenAI](https://openai.com).
|
||||
@@ -215,6 +237,22 @@ oco config set OCO_AI_PROVIDER=flowise OCO_API_KEY=<your_flowise_api_key> OCO_AP
|
||||
oco config set OCO_AI_PROVIDER=ollama OCO_API_KEY=<your_ollama_api_key> OCO_API_URL=<your_ollama_endpoint>
|
||||
```
|
||||
|
||||
### Use with Proxy
|
||||
|
||||
If you are behind a proxy, you can set it in the config:
|
||||
|
||||
```sh
|
||||
oco config set OCO_PROXY=http://127.0.0.1:7890
|
||||
```
|
||||
|
||||
If `OCO_PROXY` is unset, OpenCommit will automatically use `HTTPS_PROXY` or `HTTP_PROXY` environment variables.
|
||||
|
||||
To explicitly disable proxy use for OpenCommit, even when those environment variables are set:
|
||||
|
||||
```sh
|
||||
oco config set OCO_PROXY=null
|
||||
```
|
||||
|
||||
### Locale configuration
|
||||
|
||||
To globally specify the language used to generate commit messages:
|
||||
|
||||
57
biome.json
Normal file
57
biome.json
Normal file
@@ -0,0 +1,57 @@
|
||||
{
|
||||
"$schema": "https://biomejs.dev/schemas/2.4.11/schema.json",
|
||||
|
||||
"vcs": {
|
||||
"enabled": true,
|
||||
"clientKind": "git",
|
||||
"useIgnoreFile": true
|
||||
},
|
||||
|
||||
"files": {
|
||||
"ignoreUnknown": true,
|
||||
"includes": ["**", "!!build", "!!dist", "!!out"]
|
||||
},
|
||||
|
||||
"formatter": {
|
||||
"enabled": false,
|
||||
"indentStyle": "space",
|
||||
"indentWidth": 2,
|
||||
"lineEnding": "lf"
|
||||
},
|
||||
|
||||
"javascript": {
|
||||
"formatter": {
|
||||
"quoteStyle": "single",
|
||||
"jsxQuoteStyle": "double",
|
||||
"trailingCommas": "none",
|
||||
"semicolons": "always"
|
||||
}
|
||||
},
|
||||
|
||||
"linter": {
|
||||
"enabled": true,
|
||||
"rules": {
|
||||
"recommended": true,
|
||||
"suspicious": {
|
||||
"noConsole": "off",
|
||||
"noImplicitAnyLet": "off",
|
||||
"useIterableCallbackReturn": "off"
|
||||
},
|
||||
"correctness": {
|
||||
"noSwitchDeclarations": "off"
|
||||
},
|
||||
"style": {
|
||||
"noNonNullAssertion": "off"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
"assist": {
|
||||
"enabled": false,
|
||||
"actions": {
|
||||
"source": {
|
||||
"organizeImports": "off"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -15,11 +15,9 @@ const config: Config = {
|
||||
testRegex: ['.*\\.test\\.ts$'],
|
||||
// Tell Jest to ignore the specific duplicate package.json files
|
||||
// that are causing Haste module naming collisions
|
||||
modulePathIgnorePatterns: [
|
||||
'<rootDir>/test/e2e/prompt-module/data/'
|
||||
],
|
||||
modulePathIgnorePatterns: ['<rootDir>/test/e2e/prompt-module/data/'],
|
||||
transformIgnorePatterns: [
|
||||
'node_modules/(?!(cli-testing-library|@clack|cleye)/.*)'
|
||||
'node_modules/(?!(cli-testing-library|@clack|cleye|chalk)/.*)'
|
||||
],
|
||||
transform: {
|
||||
'^.+\\.(ts|tsx|js|jsx|mjs)$': [
|
||||
|
||||
23029
out/cli.cjs
23029
out/cli.cjs
File diff suppressed because one or more lines are too long
88262
out/github-action.cjs
88262
out/github-action.cjs
File diff suppressed because one or more lines are too long
Binary file not shown.
1474
package-lock.json
generated
1474
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
29
package.json
29
package.json
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "opencommit",
|
||||
"version": "3.2.10",
|
||||
"version": "3.2.19",
|
||||
"description": "Auto-generate impressive commits in 1 second. Killing lame commits with AI 🤯🔫",
|
||||
"keywords": [
|
||||
"git",
|
||||
@@ -49,31 +49,37 @@
|
||||
"deploy": "npm publish --tag latest",
|
||||
"deploy:build": "npm run build:push && git push --tags && npm run deploy",
|
||||
"deploy:patch": "npm version patch && npm run deploy:build",
|
||||
"lint": "eslint src --ext ts && tsc --noEmit",
|
||||
"format": "prettier --write src",
|
||||
"format:check": "prettier --check src",
|
||||
"lint": "biome check . --diagnostic-level=error && tsc --noEmit",
|
||||
"typecheck": "tsc --noEmit",
|
||||
"format": "prettier --write \"src/**/*.{ts,js,json,md}\" \"test/**/*.{ts,js,json,md}\" \".github/**/*.{yml,yaml}\" \"*.{js,json,ts,md,yml,yaml}\"",
|
||||
"format:check": "prettier --check \"src/**/*.{ts,js,json,md}\" \"test/**/*.{ts,js,json,md}\" \".github/**/*.{yml,yaml}\" \"*.{js,json,ts,md,yml,yaml}\"",
|
||||
"test": "node --no-warnings --experimental-vm-modules $( [ -f ./node_modules/.bin/jest ] && echo ./node_modules/.bin/jest || which jest ) test/unit",
|
||||
"test:all": "npm run test:unit:docker && npm run test:e2e:docker",
|
||||
"test:docker-build": "docker build -t oco-test -f test/Dockerfile .",
|
||||
"test:unit": "NODE_OPTIONS=--experimental-vm-modules jest test/unit",
|
||||
"test:unit:docker": "npm run test:docker-build && DOCKER_CONTENT_TRUST=0 docker run --rm oco-test npm run test:unit",
|
||||
"test:e2e": "npm run test:e2e:setup && jest test/e2e",
|
||||
"test:e2e:setup": "sh test/e2e/setup.sh",
|
||||
"test:e2e": "npm run build && npm run test:e2e:smoke:run && npm run test:e2e:core:run && npm run test:e2e:prompt-module:run",
|
||||
"test:e2e:smoke": "npm run build && npm run test:e2e:smoke:run",
|
||||
"test:e2e:smoke:run": "OCO_TEST_SKIP_VERSION_CHECK=true jest test/e2e/smoke.test.ts",
|
||||
"test:e2e:core": "npm run build && npm run test:e2e:core:run",
|
||||
"test:e2e:core:run": "OCO_TEST_SKIP_VERSION_CHECK=true jest test/e2e/cliBehavior.test.ts test/e2e/geminiBehavior.test.ts test/e2e/gitPush.test.ts test/e2e/oneFile.test.ts test/e2e/noChanges.test.ts",
|
||||
"test:e2e:setup": "npm run test:e2e:prompt-module:setup",
|
||||
"test:e2e:prompt-module:setup": "sh test/e2e/setup.sh",
|
||||
"test:e2e:prompt-module": "npm run build && npm run test:e2e:prompt-module:run",
|
||||
"test:e2e:prompt-module:run": "npm run test:e2e:prompt-module:setup && OCO_TEST_SKIP_VERSION_CHECK=true jest test/e2e/prompt-module",
|
||||
"test:e2e:docker": "npm run test:docker-build && DOCKER_CONTENT_TRUST=0 docker run --rm oco-test npm run test:e2e",
|
||||
"mlx:start": "OCO_AI_PROVIDER='mlx' node ./out/cli.cjs"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@biomejs/biome": "2.4.11",
|
||||
"@commitlint/types": "^17.4.4",
|
||||
"@types/ini": "^1.3.31",
|
||||
"@types/inquirer": "^9.0.3",
|
||||
"@types/jest": "^29.5.12",
|
||||
"@types/node": "^16.18.14",
|
||||
"@typescript-eslint/eslint-plugin": "^8.29.0",
|
||||
"@typescript-eslint/parser": "^8.29.0",
|
||||
"cli-testing-library": "^2.0.2",
|
||||
"dotenv": "^16.0.3",
|
||||
"esbuild": "^0.25.5",
|
||||
"eslint": "^9.24.0",
|
||||
"jest": "^29.7.0",
|
||||
"prettier": "^2.8.4",
|
||||
"rimraf": "^6.0.1",
|
||||
@@ -89,15 +95,16 @@
|
||||
"@azure/openai": "^1.0.0-beta.12",
|
||||
"@clack/prompts": "^0.6.1",
|
||||
"@dqbd/tiktoken": "^1.0.2",
|
||||
"@google/generative-ai": "^0.11.4",
|
||||
"@google/generative-ai": "^0.24.1",
|
||||
"@mistralai/mistralai": "^1.3.5",
|
||||
"@octokit/webhooks-schemas": "^6.11.0",
|
||||
"@octokit/webhooks-types": "^6.11.0",
|
||||
"axios": "^1.3.4",
|
||||
"axios": "1.9.0",
|
||||
"chalk": "^5.2.0",
|
||||
"cleye": "^1.3.2",
|
||||
"crypto": "^1.0.1",
|
||||
"execa": "^7.0.0",
|
||||
"https-proxy-agent": "^8.0.0",
|
||||
"ignore": "^5.2.4",
|
||||
"ini": "^3.0.1",
|
||||
"inquirer": "^9.1.4",
|
||||
|
||||
75
src/cli.ts
75
src/cli.ts
@@ -5,19 +5,63 @@ import { cli } from 'cleye';
|
||||
import packageJSON from '../package.json';
|
||||
import { commit } from './commands/commit';
|
||||
import { commitlintConfigCommand } from './commands/commitlint';
|
||||
import { configCommand } from './commands/config';
|
||||
import { configCommand, getConfig } from './commands/config';
|
||||
import { hookCommand, isHookCalled } from './commands/githook.js';
|
||||
import { prepareCommitMessageHook } from './commands/prepare-commit-msg-hook';
|
||||
import { resolveProxy, setupProxy } from './utils/proxy';
|
||||
import {
|
||||
setupCommand,
|
||||
isFirstRun,
|
||||
runSetup,
|
||||
promptForMissingApiKey
|
||||
} from './commands/setup';
|
||||
import { modelsCommand } from './commands/models';
|
||||
import { checkIsLatestVersion } from './utils/checkIsLatestVersion';
|
||||
import { runMigrations } from './migrations/_run.js';
|
||||
|
||||
const extraArgs = process.argv.slice(2);
|
||||
const config = getConfig();
|
||||
setupProxy(resolveProxy(config.OCO_PROXY));
|
||||
|
||||
const OCO_FLAGS_WITH_VALUE = new Set(['-c', '--context']);
|
||||
const OCO_BOOLEAN_FLAGS = new Set(['-y', '--yes', '--fgm']);
|
||||
const OCO_EQUALS_PREFIXES = ['-c=', '--context=', '-y=', '--yes=', '--fgm='];
|
||||
|
||||
const stripOcoFlags = (argv: string[]): string[] => {
|
||||
const out: string[] = [];
|
||||
for (let i = 0; i < argv.length; i++) {
|
||||
const a = argv[i];
|
||||
// String flags with a separate value token: -c <val>, --context <val>
|
||||
if (OCO_FLAGS_WITH_VALUE.has(a)) {
|
||||
i++; // skip the value token too
|
||||
continue;
|
||||
}
|
||||
// Boolean flags: -y, --yes, --fgm
|
||||
if (OCO_BOOLEAN_FLAGS.has(a)) {
|
||||
continue;
|
||||
}
|
||||
// Equals form: -c=…, --context=…, -y=…, --yes=…, --fgm=…
|
||||
if (OCO_EQUALS_PREFIXES.some((prefix) => a.startsWith(prefix))) {
|
||||
continue;
|
||||
}
|
||||
out.push(a);
|
||||
}
|
||||
return out;
|
||||
};
|
||||
|
||||
const rawArgv = process.argv.slice(2);
|
||||
const extraArgs = stripOcoFlags(rawArgv);
|
||||
|
||||
cli(
|
||||
{
|
||||
version: packageJSON.version,
|
||||
name: 'opencommit',
|
||||
commands: [configCommand, hookCommand, commitlintConfigCommand],
|
||||
commands: [
|
||||
configCommand,
|
||||
hookCommand,
|
||||
commitlintConfigCommand,
|
||||
setupCommand,
|
||||
modelsCommand
|
||||
],
|
||||
flags: {
|
||||
fgm: {
|
||||
type: Boolean,
|
||||
@@ -41,14 +85,29 @@ cli(
|
||||
help: { description: packageJSON.description }
|
||||
},
|
||||
async ({ flags }) => {
|
||||
if (await isHookCalled()) {
|
||||
await prepareCommitMessageHook();
|
||||
return;
|
||||
}
|
||||
|
||||
await runMigrations();
|
||||
await checkIsLatestVersion();
|
||||
|
||||
if (await isHookCalled()) {
|
||||
prepareCommitMessageHook();
|
||||
} else {
|
||||
commit(extraArgs, flags.context, false, flags.fgm, flags.yes);
|
||||
// Check for first run and trigger setup wizard
|
||||
if (isFirstRun()) {
|
||||
const setupComplete = await runSetup();
|
||||
if (!setupComplete) {
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Check for missing API key and prompt if needed
|
||||
const hasApiKey = await promptForMissingApiKey();
|
||||
if (!hasApiKey) {
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
commit(extraArgs, flags.context, false, flags.fgm, flags.yes);
|
||||
},
|
||||
extraArgs
|
||||
rawArgv
|
||||
);
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
export enum COMMANDS {
|
||||
config = 'config',
|
||||
hook = 'hook',
|
||||
commitlint = 'commitlint'
|
||||
commitlint = 'commitlint',
|
||||
setup = 'setup',
|
||||
models = 'models'
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import {
|
||||
import chalk from 'chalk';
|
||||
import { execa } from 'execa';
|
||||
import { generateCommitMessageByDiff } from '../generateCommitMessageFromGitDiff';
|
||||
import { formatUserFriendlyError, printFormattedError } from '../utils/errors';
|
||||
import {
|
||||
assertGitRepo,
|
||||
getChangedFiles,
|
||||
@@ -28,6 +29,32 @@ const getGitRemotes = async () => {
|
||||
return stdout.split('\n').filter((remote) => Boolean(remote.trim()));
|
||||
};
|
||||
|
||||
const hasUpstreamBranch = async (): Promise<boolean> => {
|
||||
try {
|
||||
await execa('git', [
|
||||
'rev-parse',
|
||||
'--abbrev-ref',
|
||||
'--symbolic-full-name',
|
||||
'@{u}'
|
||||
]);
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
const getCurrentBranch = async (): Promise<string> => {
|
||||
const { stdout } = await execa('git', ['branch', '--show-current']);
|
||||
return stdout.trim();
|
||||
};
|
||||
|
||||
const displayPushUrl = (stderr: string) => {
|
||||
const urlMatch = stderr.match(/https?:\/\/\S+/);
|
||||
if (urlMatch) {
|
||||
outro(`${chalk.cyan('Create a pull request:')} ${urlMatch[0]}`);
|
||||
}
|
||||
};
|
||||
|
||||
// Check for the presence of message templates
|
||||
const checkMessageTemplate = (extraArgs: string[]): string | false => {
|
||||
for (const key in extraArgs) {
|
||||
@@ -129,8 +156,13 @@ ${chalk.grey('——————————————————')}`
|
||||
if (config.OCO_GITPUSH === false) return;
|
||||
|
||||
if (!remotes.length) {
|
||||
const { stdout } = await execa('git', ['push']);
|
||||
const pushArgs = ['push'];
|
||||
if (!(await hasUpstreamBranch())) {
|
||||
pushArgs.push('--set-upstream', 'origin', await getCurrentBranch());
|
||||
}
|
||||
const { stdout, stderr } = await execa('git', pushArgs);
|
||||
if (stdout) outro(stdout);
|
||||
displayPushUrl(stderr);
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
@@ -146,11 +178,11 @@ ${chalk.grey('——————————————————')}`
|
||||
|
||||
pushSpinner.start(`Running 'git push ${remotes[0]}'`);
|
||||
|
||||
const { stdout } = await execa('git', [
|
||||
'push',
|
||||
'--verbose',
|
||||
remotes[0]
|
||||
]);
|
||||
const pushArgs = ['push', '--verbose', remotes[0]];
|
||||
if (!(await hasUpstreamBranch())) {
|
||||
pushArgs.push('--set-upstream', await getCurrentBranch());
|
||||
}
|
||||
const { stdout, stderr } = await execa('git', pushArgs);
|
||||
|
||||
pushSpinner.stop(
|
||||
`${chalk.green('✔')} Successfully pushed all commits to ${
|
||||
@@ -159,6 +191,7 @@ ${chalk.grey('——————————————————')}`
|
||||
);
|
||||
|
||||
if (stdout) outro(stdout);
|
||||
displayPushUrl(stderr);
|
||||
} else {
|
||||
outro('`git push` aborted');
|
||||
process.exit(0);
|
||||
@@ -180,7 +213,11 @@ ${chalk.grey('——————————————————')}`
|
||||
|
||||
pushSpinner.start(`Running 'git push ${selectedRemote}'`);
|
||||
|
||||
const { stdout } = await execa('git', ['push', selectedRemote]);
|
||||
const pushArgs = ['push', selectedRemote];
|
||||
if (!(await hasUpstreamBranch())) {
|
||||
pushArgs.push('--set-upstream', await getCurrentBranch());
|
||||
}
|
||||
const { stdout, stderr } = await execa('git', pushArgs);
|
||||
|
||||
if (stdout) outro(stdout);
|
||||
|
||||
@@ -189,6 +226,8 @@ ${chalk.grey('——————————————————')}`
|
||||
'✔'
|
||||
)} successfully pushed all commits to ${selectedRemote}`
|
||||
);
|
||||
|
||||
displayPushUrl(stderr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -202,7 +241,9 @@ ${chalk.grey('——————————————————')}`
|
||||
await generateCommitMessageFromGitDiff({
|
||||
diff,
|
||||
extraArgs,
|
||||
fullGitMojiSpec
|
||||
context,
|
||||
fullGitMojiSpec,
|
||||
skipCommitConfirmation
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -211,10 +252,13 @@ ${chalk.grey('——————————————————')}`
|
||||
`${chalk.red('✖')} Failed to generate the commit message`
|
||||
);
|
||||
|
||||
console.log(error);
|
||||
const errorConfig = getConfig();
|
||||
const provider = errorConfig.OCO_AI_PROVIDER || 'openai';
|
||||
const formatted = formatUserFriendlyError(error, provider, {
|
||||
baseURL: errorConfig.OCO_API_URL
|
||||
});
|
||||
outro(printFormattedError(formatted));
|
||||
|
||||
const err = error as Error;
|
||||
outro(`${chalk.red('✖')} ${err?.message || err}`);
|
||||
process.exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -25,10 +25,12 @@ export enum CONFIG_KEYS {
|
||||
OCO_ONE_LINE_COMMIT = 'OCO_ONE_LINE_COMMIT',
|
||||
OCO_TEST_MOCK_TYPE = 'OCO_TEST_MOCK_TYPE',
|
||||
OCO_API_URL = 'OCO_API_URL',
|
||||
OCO_PROXY = 'OCO_PROXY',
|
||||
OCO_API_CUSTOM_HEADERS = 'OCO_API_CUSTOM_HEADERS',
|
||||
OCO_OMIT_SCOPE = 'OCO_OMIT_SCOPE',
|
||||
OCO_GITPUSH = 'OCO_GITPUSH', // todo: deprecate
|
||||
OCO_HOOK_AUTO_UNCOMMENT = 'OCO_HOOK_AUTO_UNCOMMENT'
|
||||
OCO_HOOK_AUTO_UNCOMMENT = 'OCO_HOOK_AUTO_UNCOMMENT',
|
||||
OCO_OLLAMA_THINK = 'OCO_OLLAMA_THINK'
|
||||
}
|
||||
|
||||
export enum CONFIG_MODES {
|
||||
@@ -68,10 +70,11 @@ export const MODEL_LIST = {
|
||||
],
|
||||
|
||||
anthropic: [
|
||||
'claude-3-5-sonnet-20240620',
|
||||
'claude-3-opus-20240229',
|
||||
'claude-3-sonnet-20240229',
|
||||
'claude-3-haiku-20240307'
|
||||
'claude-sonnet-4-20250514',
|
||||
'claude-opus-4-20250514',
|
||||
'claude-3-7-sonnet-20250219',
|
||||
'claude-3-5-sonnet-20241022',
|
||||
'claude-3-5-haiku-20241022'
|
||||
],
|
||||
|
||||
gemini: [
|
||||
@@ -720,7 +723,17 @@ export const configValidators = {
|
||||
[CONFIG_KEYS.OCO_API_URL](value: any) {
|
||||
validateConfig(
|
||||
CONFIG_KEYS.OCO_API_URL,
|
||||
typeof value === 'string',
|
||||
typeof value === 'string' && /^(https?:\/\/)/.test(value),
|
||||
`${value} is not a valid URL. It should start with 'http://' or 'https://'.`
|
||||
);
|
||||
return value;
|
||||
},
|
||||
|
||||
[CONFIG_KEYS.OCO_PROXY](value: any) {
|
||||
validateConfig(
|
||||
CONFIG_KEYS.OCO_PROXY,
|
||||
value === null ||
|
||||
(typeof value === 'string' && /^(https?:\/\/)/.test(value)),
|
||||
`${value} is not a valid URL. It should start with 'http://' or 'https://'.`
|
||||
);
|
||||
return value;
|
||||
@@ -827,6 +840,15 @@ export const configValidators = {
|
||||
typeof value === 'boolean',
|
||||
'Must be true or false'
|
||||
);
|
||||
return value;
|
||||
},
|
||||
|
||||
[CONFIG_KEYS.OCO_OLLAMA_THINK](value: any) {
|
||||
validateConfig(
|
||||
CONFIG_KEYS.OCO_OLLAMA_THINK,
|
||||
typeof value === 'boolean',
|
||||
'Must be true or false'
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -846,11 +868,40 @@ export enum OCO_AI_PROVIDER_ENUM {
|
||||
OPENROUTER = 'openrouter'
|
||||
}
|
||||
|
||||
export const PROVIDER_API_KEY_URLS: Record<string, string | null> = {
|
||||
[OCO_AI_PROVIDER_ENUM.OPENAI]: 'https://platform.openai.com/api-keys',
|
||||
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]:
|
||||
'https://console.anthropic.com/settings/keys',
|
||||
[OCO_AI_PROVIDER_ENUM.GEMINI]: 'https://aistudio.google.com/app/apikey',
|
||||
[OCO_AI_PROVIDER_ENUM.GROQ]: 'https://console.groq.com/keys',
|
||||
[OCO_AI_PROVIDER_ENUM.MISTRAL]: 'https://console.mistral.ai/api-keys/',
|
||||
[OCO_AI_PROVIDER_ENUM.DEEPSEEK]: 'https://platform.deepseek.com/api_keys',
|
||||
[OCO_AI_PROVIDER_ENUM.OPENROUTER]: 'https://openrouter.ai/keys',
|
||||
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'https://aimlapi.com/app/keys',
|
||||
[OCO_AI_PROVIDER_ENUM.AZURE]: 'https://portal.azure.com/',
|
||||
[OCO_AI_PROVIDER_ENUM.OLLAMA]: null,
|
||||
[OCO_AI_PROVIDER_ENUM.MLX]: null,
|
||||
[OCO_AI_PROVIDER_ENUM.FLOWISE]: null,
|
||||
[OCO_AI_PROVIDER_ENUM.TEST]: null
|
||||
};
|
||||
|
||||
export const RECOMMENDED_MODELS: Record<string, string> = {
|
||||
[OCO_AI_PROVIDER_ENUM.OPENAI]: 'gpt-4o-mini',
|
||||
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]: 'claude-sonnet-4-20250514',
|
||||
[OCO_AI_PROVIDER_ENUM.GEMINI]: 'gemini-1.5-flash',
|
||||
[OCO_AI_PROVIDER_ENUM.GROQ]: 'llama3-70b-8192',
|
||||
[OCO_AI_PROVIDER_ENUM.MISTRAL]: 'mistral-small-latest',
|
||||
[OCO_AI_PROVIDER_ENUM.DEEPSEEK]: 'deepseek-chat',
|
||||
[OCO_AI_PROVIDER_ENUM.OPENROUTER]: 'openai/gpt-4o-mini',
|
||||
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'gpt-4o-mini'
|
||||
};
|
||||
|
||||
export type ConfigType = {
|
||||
[CONFIG_KEYS.OCO_API_KEY]?: string;
|
||||
[CONFIG_KEYS.OCO_TOKENS_MAX_INPUT]: number;
|
||||
[CONFIG_KEYS.OCO_TOKENS_MAX_OUTPUT]: number;
|
||||
[CONFIG_KEYS.OCO_API_URL]?: string;
|
||||
[CONFIG_KEYS.OCO_PROXY]?: string | null;
|
||||
[CONFIG_KEYS.OCO_API_CUSTOM_HEADERS]?: string;
|
||||
[CONFIG_KEYS.OCO_DESCRIPTION]: boolean;
|
||||
[CONFIG_KEYS.OCO_EMOJI]: boolean;
|
||||
@@ -865,6 +916,7 @@ export type ConfigType = {
|
||||
[CONFIG_KEYS.OCO_OMIT_SCOPE]: boolean;
|
||||
[CONFIG_KEYS.OCO_TEST_MOCK_TYPE]: string;
|
||||
[CONFIG_KEYS.OCO_HOOK_AUTO_UNCOMMENT]: boolean;
|
||||
[CONFIG_KEYS.OCO_OLLAMA_THINK]?: boolean;
|
||||
};
|
||||
|
||||
export const defaultConfigPath = pathJoin(homedir(), '.opencommit');
|
||||
@@ -935,6 +987,7 @@ const getEnvConfig = (envPath: string) => {
|
||||
return {
|
||||
OCO_MODEL: process.env.OCO_MODEL,
|
||||
OCO_API_URL: process.env.OCO_API_URL,
|
||||
OCO_PROXY: process.env.OCO_PROXY,
|
||||
OCO_API_KEY: process.env.OCO_API_KEY,
|
||||
OCO_API_CUSTOM_HEADERS: process.env.OCO_API_CUSTOM_HEADERS,
|
||||
OCO_AI_PROVIDER: process.env.OCO_AI_PROVIDER as OCO_AI_PROVIDER_ENUM,
|
||||
@@ -972,16 +1025,13 @@ export const getIsGlobalConfigFileExist = (
|
||||
};
|
||||
|
||||
export const getGlobalConfig = (configPath: string = defaultConfigPath) => {
|
||||
let globalConfig: ConfigType;
|
||||
|
||||
const isGlobalConfigFileExist = getIsGlobalConfigFileExist(configPath);
|
||||
if (!isGlobalConfigFileExist) globalConfig = initGlobalConfig(configPath);
|
||||
else {
|
||||
const configFile = readFileSync(configPath, 'utf8');
|
||||
globalConfig = iniParse(configFile) as ConfigType;
|
||||
if (!isGlobalConfigFileExist) {
|
||||
return { ...DEFAULT_CONFIG };
|
||||
}
|
||||
|
||||
return globalConfig;
|
||||
const configFile = readFileSync(configPath, 'utf8');
|
||||
return iniParse(configFile) as ConfigType;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -994,7 +1044,10 @@ export const getGlobalConfig = (configPath: string = defaultConfigPath) => {
|
||||
const mergeConfigs = (main: Partial<ConfigType>, fallback: ConfigType) => {
|
||||
const allKeys = new Set([...Object.keys(main), ...Object.keys(fallback)]);
|
||||
return Array.from(allKeys).reduce((acc, key) => {
|
||||
acc[key] = parseConfigVarValue(main[key] ?? fallback[key]);
|
||||
const mainValue = main[key];
|
||||
acc[key] = parseConfigVarValue(
|
||||
mainValue !== undefined ? mainValue : fallback[key]
|
||||
);
|
||||
return acc;
|
||||
}, {} as ConfigType);
|
||||
};
|
||||
@@ -1160,6 +1213,14 @@ function getConfigKeyDetails(key) {
|
||||
'Custom API URL - may be used to set proxy path to OpenAI API',
|
||||
values: ["URL string (must start with 'http://' or 'https://')"]
|
||||
};
|
||||
case CONFIG_KEYS.OCO_PROXY:
|
||||
return {
|
||||
description: 'HTTP/HTTPS Proxy URL',
|
||||
values: [
|
||||
"URL string (must start with 'http://' or 'https://')",
|
||||
'null (disable proxy even when HTTP_PROXY/HTTPS_PROXY are set)'
|
||||
]
|
||||
};
|
||||
case CONFIG_KEYS.OCO_MESSAGE_TEMPLATE_PLACEHOLDER:
|
||||
return {
|
||||
description: 'Message template placeholder',
|
||||
|
||||
155
src/commands/models.ts
Normal file
155
src/commands/models.ts
Normal file
@@ -0,0 +1,155 @@
|
||||
import { intro, outro, spinner } from '@clack/prompts';
|
||||
import chalk from 'chalk';
|
||||
import { command } from 'cleye';
|
||||
import { COMMANDS } from './ENUMS';
|
||||
import { MODEL_LIST, OCO_AI_PROVIDER_ENUM, getConfig } from './config';
|
||||
import {
|
||||
fetchModelsForProvider,
|
||||
clearModelCache,
|
||||
getCacheInfo,
|
||||
getCachedModels
|
||||
} from '../utils/modelCache';
|
||||
|
||||
function formatCacheAge(timestamp: number | null): string {
|
||||
if (!timestamp) return 'never';
|
||||
const ageMs = Date.now() - timestamp;
|
||||
const days = Math.floor(ageMs / (1000 * 60 * 60 * 24));
|
||||
const hours = Math.floor(ageMs / (1000 * 60 * 60));
|
||||
const minutes = Math.floor(ageMs / (1000 * 60));
|
||||
|
||||
if (days > 0) {
|
||||
return `${days} day${days === 1 ? '' : 's'} ago`;
|
||||
} else if (hours > 0) {
|
||||
return `${hours} hour${hours === 1 ? '' : 's'} ago`;
|
||||
} else if (minutes > 0) {
|
||||
return `${minutes} minute${minutes === 1 ? '' : 's'} ago`;
|
||||
}
|
||||
return 'just now';
|
||||
}
|
||||
|
||||
async function listModels(
|
||||
provider: string,
|
||||
useCache: boolean = true
|
||||
): Promise<void> {
|
||||
const config = getConfig();
|
||||
const apiKey = config.OCO_API_KEY;
|
||||
const currentModel = config.OCO_MODEL;
|
||||
|
||||
// Get cached models or fetch new ones
|
||||
let models: string[] = [];
|
||||
|
||||
if (useCache) {
|
||||
const cached = getCachedModels(provider);
|
||||
if (cached) {
|
||||
models = cached;
|
||||
}
|
||||
}
|
||||
|
||||
if (models.length === 0) {
|
||||
// Fallback to hardcoded list
|
||||
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
|
||||
models = MODEL_LIST[providerKey] || [];
|
||||
}
|
||||
|
||||
console.log(
|
||||
`\n${chalk.bold('Available models for')} ${chalk.cyan(provider)}:\n`
|
||||
);
|
||||
|
||||
if (models.length === 0) {
|
||||
console.log(chalk.dim(' No models found'));
|
||||
} else {
|
||||
models.forEach((model) => {
|
||||
const isCurrent = model === currentModel;
|
||||
const prefix = isCurrent ? chalk.green('* ') : ' ';
|
||||
const label = isCurrent ? chalk.green(model) : model;
|
||||
console.log(`${prefix}${label}`);
|
||||
});
|
||||
}
|
||||
|
||||
console.log('');
|
||||
}
|
||||
|
||||
async function refreshModels(provider: string): Promise<void> {
|
||||
const config = getConfig();
|
||||
const apiKey = config.OCO_API_KEY;
|
||||
|
||||
const loadingSpinner = spinner();
|
||||
loadingSpinner.start(`Fetching models from ${provider}...`);
|
||||
|
||||
// Clear cache first
|
||||
clearModelCache();
|
||||
|
||||
try {
|
||||
const models = await fetchModelsForProvider(
|
||||
provider,
|
||||
apiKey,
|
||||
undefined,
|
||||
true
|
||||
);
|
||||
loadingSpinner.stop(`${chalk.green('+')} Fetched ${models.length} models`);
|
||||
|
||||
// List the models
|
||||
await listModels(provider, true);
|
||||
} catch (error) {
|
||||
loadingSpinner.stop(chalk.red('Failed to fetch models'));
|
||||
console.error(
|
||||
chalk.red(
|
||||
`Error: ${error instanceof Error ? error.message : 'Unknown error'}`
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export const modelsCommand = command(
|
||||
{
|
||||
name: COMMANDS.models,
|
||||
help: {
|
||||
description: 'List and manage cached models for your AI provider'
|
||||
},
|
||||
flags: {
|
||||
refresh: {
|
||||
type: Boolean,
|
||||
alias: 'r',
|
||||
description: 'Clear cache and re-fetch models from the provider',
|
||||
default: false
|
||||
},
|
||||
provider: {
|
||||
type: String,
|
||||
alias: 'p',
|
||||
description: 'Specify provider (defaults to current OCO_AI_PROVIDER)'
|
||||
}
|
||||
}
|
||||
},
|
||||
async ({ flags }) => {
|
||||
const config = getConfig();
|
||||
const provider =
|
||||
flags.provider || config.OCO_AI_PROVIDER || OCO_AI_PROVIDER_ENUM.OPENAI;
|
||||
|
||||
intro(chalk.bgCyan(' OpenCommit Models '));
|
||||
|
||||
// Show cache info
|
||||
const cacheInfo = getCacheInfo();
|
||||
if (cacheInfo.timestamp) {
|
||||
console.log(
|
||||
chalk.dim(
|
||||
` Cache last updated: ${formatCacheAge(cacheInfo.timestamp)}`
|
||||
)
|
||||
);
|
||||
if (cacheInfo.providers.length > 0) {
|
||||
console.log(
|
||||
chalk.dim(` Cached providers: ${cacheInfo.providers.join(', ')}`)
|
||||
);
|
||||
}
|
||||
} else {
|
||||
console.log(chalk.dim(' No cached models'));
|
||||
}
|
||||
|
||||
if (flags.refresh) {
|
||||
await refreshModels(provider);
|
||||
} else {
|
||||
await listModels(provider);
|
||||
}
|
||||
|
||||
outro(`Run ${chalk.cyan('oco models --refresh')} to update the model list`);
|
||||
}
|
||||
);
|
||||
@@ -49,9 +49,16 @@ export const prepareCommitMessageHook = async (
|
||||
const spin = spinner();
|
||||
spin.start('Generating commit message');
|
||||
|
||||
const commitMessage = await generateCommitMessageByDiff(
|
||||
let commitMessage: string;
|
||||
try {
|
||||
commitMessage = await generateCommitMessageByDiff(
|
||||
await getDiff({ files: staged })
|
||||
);
|
||||
} catch (error) {
|
||||
spin.stop('Done');
|
||||
throw error;
|
||||
}
|
||||
|
||||
spin.stop('Done');
|
||||
|
||||
const fileContent = await fs.readFile(messageFilePath);
|
||||
@@ -63,9 +70,19 @@ export const prepareCommitMessageHook = async (
|
||||
? messageWithoutComment
|
||||
: messageWithComment;
|
||||
|
||||
await fs.writeFile(messageFilePath, message);
|
||||
} catch (error) {
|
||||
try {
|
||||
outro(`${chalk.red('✖')} ${error}`);
|
||||
const fileContent = await fs.readFile(messageFilePath);
|
||||
|
||||
const commentedError = String(error).replace(new RegExp('^', 'gm'), '# ');
|
||||
const message = `\n\n# ---------- [OpenCommit] ---------- #\n# Failed to generate the commit message.\n# To cancel the commit, just close this window without making any changes.\n\n${commentedError}\n\n${fileContent.toString()}`
|
||||
|
||||
await fs.writeFile(messageFilePath, message);
|
||||
} catch (error) {
|
||||
outro(`${chalk.red('✖')} ${error}`);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
490
src/commands/setup.ts
Normal file
490
src/commands/setup.ts
Normal file
@@ -0,0 +1,490 @@
|
||||
import { intro, outro, select, text, isCancel, spinner } from '@clack/prompts';
|
||||
import chalk from 'chalk';
|
||||
import { command } from 'cleye';
|
||||
import { COMMANDS } from './ENUMS';
|
||||
import {
|
||||
CONFIG_KEYS,
|
||||
MODEL_LIST,
|
||||
OCO_AI_PROVIDER_ENUM,
|
||||
getConfig,
|
||||
setGlobalConfig,
|
||||
getGlobalConfig,
|
||||
getIsGlobalConfigFileExist,
|
||||
DEFAULT_CONFIG,
|
||||
PROVIDER_API_KEY_URLS,
|
||||
RECOMMENDED_MODELS
|
||||
} from './config';
|
||||
import {
|
||||
fetchModelsForProvider,
|
||||
fetchOllamaModels,
|
||||
getCacheInfo
|
||||
} from '../utils/modelCache';
|
||||
|
||||
const PROVIDER_DISPLAY_NAMES: Record<string, string> = {
|
||||
[OCO_AI_PROVIDER_ENUM.OPENAI]: 'OpenAI (GPT-4o, GPT-4)',
|
||||
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]: 'Anthropic (Claude Sonnet, Opus)',
|
||||
[OCO_AI_PROVIDER_ENUM.OLLAMA]: 'Ollama (Free, runs locally)',
|
||||
[OCO_AI_PROVIDER_ENUM.GEMINI]: 'Google Gemini',
|
||||
[OCO_AI_PROVIDER_ENUM.GROQ]: 'Groq (Fast inference, free tier)',
|
||||
[OCO_AI_PROVIDER_ENUM.MISTRAL]: 'Mistral AI',
|
||||
[OCO_AI_PROVIDER_ENUM.DEEPSEEK]: 'DeepSeek',
|
||||
[OCO_AI_PROVIDER_ENUM.OPENROUTER]: 'OpenRouter (Multiple providers)',
|
||||
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'AI/ML API',
|
||||
[OCO_AI_PROVIDER_ENUM.AZURE]: 'Azure OpenAI',
|
||||
[OCO_AI_PROVIDER_ENUM.MLX]: 'MLX (Apple Silicon, local)'
|
||||
};
|
||||
|
||||
const PRIMARY_PROVIDERS = [
|
||||
OCO_AI_PROVIDER_ENUM.OPENAI,
|
||||
OCO_AI_PROVIDER_ENUM.ANTHROPIC,
|
||||
OCO_AI_PROVIDER_ENUM.OLLAMA
|
||||
];
|
||||
|
||||
const OTHER_PROVIDERS = [
|
||||
OCO_AI_PROVIDER_ENUM.GEMINI,
|
||||
OCO_AI_PROVIDER_ENUM.GROQ,
|
||||
OCO_AI_PROVIDER_ENUM.MISTRAL,
|
||||
OCO_AI_PROVIDER_ENUM.DEEPSEEK,
|
||||
OCO_AI_PROVIDER_ENUM.OPENROUTER,
|
||||
OCO_AI_PROVIDER_ENUM.AIMLAPI,
|
||||
OCO_AI_PROVIDER_ENUM.AZURE,
|
||||
OCO_AI_PROVIDER_ENUM.MLX
|
||||
];
|
||||
|
||||
const NO_API_KEY_PROVIDERS = [
|
||||
OCO_AI_PROVIDER_ENUM.OLLAMA,
|
||||
OCO_AI_PROVIDER_ENUM.MLX,
|
||||
OCO_AI_PROVIDER_ENUM.TEST
|
||||
];
|
||||
|
||||
const MODEL_REQUIRED_PROVIDERS = [
|
||||
OCO_AI_PROVIDER_ENUM.OLLAMA,
|
||||
OCO_AI_PROVIDER_ENUM.MLX
|
||||
];
|
||||
|
||||
async function selectProvider(): Promise<string | symbol> {
|
||||
const primaryOptions = PRIMARY_PROVIDERS.map((provider) => ({
|
||||
value: provider,
|
||||
label: PROVIDER_DISPLAY_NAMES[provider] || provider
|
||||
}));
|
||||
|
||||
primaryOptions.push({
|
||||
value: 'other',
|
||||
label: 'Other providers...'
|
||||
});
|
||||
|
||||
const selection = await select({
|
||||
message: 'Select your AI provider:',
|
||||
options: primaryOptions
|
||||
});
|
||||
|
||||
if (isCancel(selection)) return selection;
|
||||
|
||||
if (selection === 'other') {
|
||||
const otherOptions = OTHER_PROVIDERS.map((provider) => ({
|
||||
value: provider,
|
||||
label: PROVIDER_DISPLAY_NAMES[provider] || provider
|
||||
}));
|
||||
|
||||
return await select({
|
||||
message: 'Select provider:',
|
||||
options: otherOptions
|
||||
});
|
||||
}
|
||||
|
||||
return selection;
|
||||
}
|
||||
|
||||
async function getApiKey(provider: string): Promise<string | symbol> {
|
||||
const url =
|
||||
PROVIDER_API_KEY_URLS[provider as keyof typeof PROVIDER_API_KEY_URLS];
|
||||
|
||||
let message = `Enter your ${provider} API key:`;
|
||||
if (url) {
|
||||
message = `Enter your API key:\n${chalk.dim(` Get your key at: ${url}`)}`;
|
||||
}
|
||||
|
||||
return await text({
|
||||
message,
|
||||
placeholder: 'sk-...',
|
||||
validate: (value) => {
|
||||
if (!value || value.trim().length === 0) {
|
||||
return 'API key is required';
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function formatCacheAge(timestamp: number | null): string {
|
||||
if (!timestamp) return '';
|
||||
const ageMs = Date.now() - timestamp;
|
||||
const days = Math.floor(ageMs / (1000 * 60 * 60 * 24));
|
||||
const hours = Math.floor(ageMs / (1000 * 60 * 60));
|
||||
|
||||
if (days > 0) {
|
||||
return `${days} day${days === 1 ? '' : 's'} ago`;
|
||||
} else if (hours > 0) {
|
||||
return `${hours} hour${hours === 1 ? '' : 's'} ago`;
|
||||
}
|
||||
return 'just now';
|
||||
}
|
||||
|
||||
async function selectModel(
|
||||
provider: string,
|
||||
apiKey?: string
|
||||
): Promise<string | symbol> {
|
||||
const providerDisplayName =
|
||||
PROVIDER_DISPLAY_NAMES[provider]?.split(' (')[0] || provider;
|
||||
const loadingSpinner = spinner();
|
||||
loadingSpinner.start(`Fetching models from ${providerDisplayName}...`);
|
||||
|
||||
let models: string[] = [];
|
||||
let usedFallback = false;
|
||||
|
||||
try {
|
||||
models = await fetchModelsForProvider(provider, apiKey);
|
||||
} catch {
|
||||
// Fall back to hardcoded list
|
||||
usedFallback = true;
|
||||
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
|
||||
models = MODEL_LIST[providerKey] || [];
|
||||
}
|
||||
|
||||
// Check cache info for display
|
||||
const cacheInfo = getCacheInfo();
|
||||
const cacheAge = formatCacheAge(cacheInfo.timestamp);
|
||||
|
||||
if (usedFallback) {
|
||||
loadingSpinner.stop(
|
||||
chalk.yellow('Could not fetch models from API. Using default list.')
|
||||
);
|
||||
} else if (cacheAge) {
|
||||
loadingSpinner.stop(`Models loaded ${chalk.dim(`(cached ${cacheAge})`)}`);
|
||||
} else {
|
||||
loadingSpinner.stop('Models loaded');
|
||||
}
|
||||
|
||||
if (models.length === 0) {
|
||||
// For Ollama/MLX, prompt for manual entry
|
||||
if (NO_API_KEY_PROVIDERS.includes(provider as OCO_AI_PROVIDER_ENUM)) {
|
||||
return await text({
|
||||
message: 'Enter model name (e.g., llama3:8b, mistral):',
|
||||
placeholder: 'llama3:8b',
|
||||
validate: (value) => {
|
||||
if (!value || value.trim().length === 0) {
|
||||
return 'Model name is required';
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Use default from config
|
||||
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
|
||||
return MODEL_LIST[providerKey]?.[0] || 'gpt-4o-mini';
|
||||
}
|
||||
|
||||
// Get recommended model for this provider
|
||||
const recommended =
|
||||
RECOMMENDED_MODELS[provider as keyof typeof RECOMMENDED_MODELS];
|
||||
|
||||
// Build options with recommended first
|
||||
const options: Array<{ value: string; label: string }> = [];
|
||||
|
||||
if (recommended && models.includes(recommended)) {
|
||||
options.push({
|
||||
value: recommended,
|
||||
label: `${recommended} (Recommended)`
|
||||
});
|
||||
}
|
||||
|
||||
// Add other models (first 10, excluding recommended)
|
||||
const otherModels = models.filter((m) => m !== recommended).slice(0, 10);
|
||||
|
||||
otherModels.forEach((model) => {
|
||||
options.push({ value: model, label: model });
|
||||
});
|
||||
|
||||
// Add option to see all or enter custom
|
||||
if (models.length > 11) {
|
||||
options.push({ value: '__show_all__', label: 'Show all models...' });
|
||||
}
|
||||
options.push({ value: '__custom__', label: 'Enter custom model...' });
|
||||
|
||||
const selection = await select({
|
||||
message: 'Select a model:',
|
||||
options
|
||||
});
|
||||
|
||||
if (isCancel(selection)) return selection;
|
||||
|
||||
if (selection === '__show_all__') {
|
||||
const allOptions = models.map((model) => ({
|
||||
value: model,
|
||||
label: model === recommended ? `${model} (Recommended)` : model
|
||||
}));
|
||||
|
||||
return await select({
|
||||
message: 'Select a model:',
|
||||
options: allOptions
|
||||
});
|
||||
}
|
||||
|
||||
if (selection === '__custom__') {
|
||||
return await text({
|
||||
message: 'Enter model name:',
|
||||
validate: (value) => {
|
||||
if (!value || value.trim().length === 0) {
|
||||
return 'Model name is required';
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return selection;
|
||||
}
|
||||
|
||||
async function setupOllama(): Promise<{
|
||||
provider: string;
|
||||
model: string;
|
||||
apiUrl: string;
|
||||
} | null> {
|
||||
console.log(chalk.cyan('\n Ollama - Free Local AI\n'));
|
||||
console.log(chalk.dim(' Setup steps:'));
|
||||
console.log(chalk.dim(' 1. Install: https://ollama.ai/download'));
|
||||
console.log(chalk.dim(' 2. Pull a model: ollama pull llama3:8b'));
|
||||
console.log(chalk.dim(' 3. Start server: ollama serve\n'));
|
||||
|
||||
// Try to fetch available models
|
||||
const loadingSpinner = spinner();
|
||||
loadingSpinner.start('Checking for local Ollama installation...');
|
||||
|
||||
const defaultUrl = 'http://localhost:11434';
|
||||
let ollamaModels: string[] = [];
|
||||
|
||||
try {
|
||||
ollamaModels = await fetchOllamaModels(defaultUrl);
|
||||
if (ollamaModels.length > 0) {
|
||||
loadingSpinner.stop(
|
||||
`${chalk.green('✔')} Found ${ollamaModels.length} local model(s)`
|
||||
);
|
||||
} else {
|
||||
loadingSpinner.stop(
|
||||
chalk.yellow(
|
||||
'Ollama is running but no models found. Pull a model first: ollama pull llama3:8b'
|
||||
)
|
||||
);
|
||||
}
|
||||
} catch {
|
||||
loadingSpinner.stop(
|
||||
chalk.yellow(
|
||||
'Could not connect to Ollama. Make sure it is running: ollama serve'
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
// Model selection
|
||||
let model: string | symbol;
|
||||
if (ollamaModels.length > 0) {
|
||||
model = await select({
|
||||
message: 'Select a model:',
|
||||
options: [
|
||||
...ollamaModels.map((m) => ({ value: m, label: m })),
|
||||
{ value: '__custom__', label: 'Enter custom model name...' }
|
||||
]
|
||||
});
|
||||
|
||||
if (isCancel(model)) return null;
|
||||
|
||||
if (model === '__custom__') {
|
||||
model = await text({
|
||||
message: 'Enter model name (e.g., llama3:8b, mistral):',
|
||||
placeholder: 'llama3:8b'
|
||||
});
|
||||
}
|
||||
} else {
|
||||
model = await text({
|
||||
message: 'Enter model name (e.g., llama3:8b, mistral):',
|
||||
placeholder: 'llama3:8b',
|
||||
validate: (value) => {
|
||||
if (!value || value.trim().length === 0) {
|
||||
return 'Model name is required';
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (isCancel(model)) return null;
|
||||
|
||||
// API URL (optional)
|
||||
const apiUrl = await text({
|
||||
message: 'Ollama URL (press Enter for default):',
|
||||
placeholder: defaultUrl,
|
||||
defaultValue: defaultUrl
|
||||
});
|
||||
|
||||
if (isCancel(apiUrl)) return null;
|
||||
|
||||
return {
|
||||
provider: OCO_AI_PROVIDER_ENUM.OLLAMA,
|
||||
model: model as string,
|
||||
apiUrl: (apiUrl as string) || defaultUrl
|
||||
};
|
||||
}
|
||||
|
||||
export async function runSetup(): Promise<boolean> {
|
||||
intro(chalk.bgCyan(' Welcome to OpenCommit! '));
|
||||
|
||||
// Select provider
|
||||
const provider = await selectProvider();
|
||||
if (isCancel(provider)) {
|
||||
outro('Setup cancelled');
|
||||
return false;
|
||||
}
|
||||
|
||||
let config: Partial<Record<string, any>> = {};
|
||||
|
||||
// Handle Ollama specially
|
||||
if (provider === OCO_AI_PROVIDER_ENUM.OLLAMA) {
|
||||
const ollamaConfig = await setupOllama();
|
||||
if (!ollamaConfig) {
|
||||
outro('Setup cancelled');
|
||||
return false;
|
||||
}
|
||||
|
||||
config = {
|
||||
OCO_AI_PROVIDER: ollamaConfig.provider,
|
||||
OCO_MODEL: ollamaConfig.model,
|
||||
OCO_API_URL: ollamaConfig.apiUrl,
|
||||
OCO_API_KEY: 'ollama' // Placeholder
|
||||
};
|
||||
} else if (provider === OCO_AI_PROVIDER_ENUM.MLX) {
|
||||
// MLX setup
|
||||
console.log(chalk.cyan('\n MLX - Apple Silicon Local AI\n'));
|
||||
console.log(chalk.dim(' MLX runs locally on Apple Silicon Macs.'));
|
||||
console.log(chalk.dim(' No API key required.\n'));
|
||||
|
||||
const model = await text({
|
||||
message: 'Enter model name:',
|
||||
placeholder: 'mlx-community/Llama-3-8B-Instruct-4bit'
|
||||
});
|
||||
|
||||
if (isCancel(model)) {
|
||||
outro('Setup cancelled');
|
||||
return false;
|
||||
}
|
||||
|
||||
config = {
|
||||
OCO_AI_PROVIDER: OCO_AI_PROVIDER_ENUM.MLX,
|
||||
OCO_MODEL: model,
|
||||
OCO_API_KEY: 'mlx' // Placeholder
|
||||
};
|
||||
} else {
|
||||
// Standard provider flow: API key then model
|
||||
const apiKey = await getApiKey(provider as string);
|
||||
if (isCancel(apiKey)) {
|
||||
outro('Setup cancelled');
|
||||
return false;
|
||||
}
|
||||
|
||||
const model = await selectModel(provider as string, apiKey as string);
|
||||
if (isCancel(model)) {
|
||||
outro('Setup cancelled');
|
||||
return false;
|
||||
}
|
||||
|
||||
config = {
|
||||
OCO_AI_PROVIDER: provider,
|
||||
OCO_API_KEY: apiKey,
|
||||
OCO_MODEL: model
|
||||
};
|
||||
}
|
||||
|
||||
// Save configuration
|
||||
const existingConfig = getIsGlobalConfigFileExist()
|
||||
? getGlobalConfig()
|
||||
: DEFAULT_CONFIG;
|
||||
|
||||
const newConfig = {
|
||||
...existingConfig,
|
||||
...config
|
||||
};
|
||||
|
||||
setGlobalConfig(newConfig as any);
|
||||
|
||||
outro(
|
||||
`${chalk.green(
|
||||
'✔'
|
||||
)} Configuration saved to ~/.opencommit\n\n Run ${chalk.cyan(
|
||||
'oco'
|
||||
)} to generate commit messages!`
|
||||
);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
export function isFirstRun(): boolean {
|
||||
const hasGlobalConfig = getIsGlobalConfigFileExist();
|
||||
const config = getConfig();
|
||||
|
||||
const provider = config.OCO_AI_PROVIDER || OCO_AI_PROVIDER_ENUM.OPENAI;
|
||||
|
||||
if (provider === OCO_AI_PROVIDER_ENUM.TEST) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const hasRequiredConfig = MODEL_REQUIRED_PROVIDERS.includes(
|
||||
provider as OCO_AI_PROVIDER_ENUM
|
||||
)
|
||||
? Boolean(config.OCO_MODEL)
|
||||
: Boolean(config.OCO_API_KEY);
|
||||
|
||||
// Trigger the full setup wizard only when nothing usable was configured yet.
|
||||
return !hasGlobalConfig && !hasRequiredConfig;
|
||||
}
|
||||
|
||||
export async function promptForMissingApiKey(): Promise<boolean> {
|
||||
const config = getConfig();
|
||||
const provider = config.OCO_AI_PROVIDER || OCO_AI_PROVIDER_ENUM.OPENAI;
|
||||
|
||||
if (NO_API_KEY_PROVIDERS.includes(provider as OCO_AI_PROVIDER_ENUM)) {
|
||||
return true; // No API key needed
|
||||
}
|
||||
|
||||
if (config.OCO_API_KEY) {
|
||||
return true; // Already has key
|
||||
}
|
||||
|
||||
console.log(
|
||||
chalk.yellow(`\nAPI key missing for ${provider}. Let's set it up.\n`)
|
||||
);
|
||||
|
||||
const apiKey = await getApiKey(provider);
|
||||
if (isCancel(apiKey)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const existingConfig = getGlobalConfig();
|
||||
setGlobalConfig({
|
||||
...existingConfig,
|
||||
OCO_API_KEY: apiKey as string
|
||||
} as any);
|
||||
|
||||
console.log(chalk.green('✔') + ' API key saved\n');
|
||||
return true;
|
||||
}
|
||||
|
||||
export const setupCommand = command(
|
||||
{
|
||||
name: COMMANDS.setup,
|
||||
help: {
|
||||
description: 'Interactive setup wizard for OpenCommit'
|
||||
}
|
||||
},
|
||||
async () => {
|
||||
await runSetup();
|
||||
}
|
||||
);
|
||||
@@ -11,7 +11,9 @@ export interface AiEngineConfig {
|
||||
maxTokensOutput: number;
|
||||
maxTokensInput: number;
|
||||
baseURL?: string;
|
||||
proxy?: string | null;
|
||||
customHeaders?: Record<string, string>;
|
||||
ollamaThink?: boolean;
|
||||
}
|
||||
|
||||
type Client =
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import OpenAI from 'openai';
|
||||
import axios, { AxiosInstance } from 'axios';
|
||||
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
||||
import { AiEngine, AiEngineConfig } from './Engine';
|
||||
|
||||
interface AimlApiConfig extends AiEngineConfig {}
|
||||
@@ -32,16 +33,7 @@ export class AimlApiEngine implements AiEngine {
|
||||
const message = response.data.choices?.[0]?.message;
|
||||
return message?.content ?? null;
|
||||
} catch (error) {
|
||||
const err = error as Error;
|
||||
if (
|
||||
axios.isAxiosError<{ error?: { message: string } }>(error) &&
|
||||
error.response?.status === 401
|
||||
) {
|
||||
const apiError = error.response.data.error;
|
||||
if (apiError) throw new Error(apiError.message);
|
||||
}
|
||||
|
||||
throw err;
|
||||
throw normalizeEngineError(error, 'aimlapi', this.config.model);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
import AnthropicClient from '@anthropic-ai/sdk';
|
||||
import { HttpsProxyAgent } from 'https-proxy-agent';
|
||||
import {
|
||||
MessageCreateParamsNonStreaming,
|
||||
MessageParam
|
||||
} from '@anthropic-ai/sdk/resources/messages.mjs';
|
||||
import { outro } from '@clack/prompts';
|
||||
import axios from 'axios';
|
||||
import chalk from 'chalk';
|
||||
import { OpenAI } from 'openai';
|
||||
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
|
||||
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
||||
import { GenerateCommitMessageErrorEnum } from '../utils/generateCommitMessageErrors';
|
||||
import { removeContentTags } from '../utils/removeContentTags';
|
||||
import { tokenCount } from '../utils/tokenCount';
|
||||
import { AiEngine, AiEngineConfig } from './Engine';
|
||||
@@ -20,7 +19,14 @@ export class AnthropicEngine implements AiEngine {
|
||||
|
||||
constructor(config) {
|
||||
this.config = config;
|
||||
this.client = new AnthropicClient({ apiKey: this.config.apiKey });
|
||||
const clientOptions: any = { apiKey: this.config.apiKey };
|
||||
|
||||
const proxy = config.proxy;
|
||||
if (proxy) {
|
||||
clientOptions.httpAgent = new HttpsProxyAgent(proxy);
|
||||
}
|
||||
|
||||
this.client = new AnthropicClient(clientOptions);
|
||||
}
|
||||
|
||||
public generateCommitMessage = async (
|
||||
@@ -37,9 +43,14 @@ export class AnthropicEngine implements AiEngine {
|
||||
system: systemMessage,
|
||||
messages: restMessages,
|
||||
temperature: 0,
|
||||
top_p: 0.1,
|
||||
max_tokens: this.config.maxTokensOutput
|
||||
};
|
||||
|
||||
// add top_p for non-4.5 models
|
||||
if (!/claude.*-4-5/.test(params.model)) {
|
||||
params.top_p = 0.1;
|
||||
}
|
||||
|
||||
try {
|
||||
const REQUEST_TOKENS = messages
|
||||
.map((msg) => tokenCount(msg.content as string) + 4)
|
||||
@@ -58,22 +69,7 @@ export class AnthropicEngine implements AiEngine {
|
||||
let content = message;
|
||||
return removeContentTags(content, 'think');
|
||||
} catch (error) {
|
||||
const err = error as Error;
|
||||
outro(`${chalk.red('✖')} ${err?.message || err}`);
|
||||
|
||||
if (
|
||||
axios.isAxiosError<{ error?: { message: string } }>(error) &&
|
||||
error.response?.status === 401
|
||||
) {
|
||||
const anthropicAiError = error.response.data.error;
|
||||
|
||||
if (anthropicAiError?.message) outro(anthropicAiError.message);
|
||||
outro(
|
||||
'For help look into README https://github.com/di-sukharev/opencommit#setup'
|
||||
);
|
||||
}
|
||||
|
||||
throw err;
|
||||
throw normalizeEngineError(error, 'anthropic', this.config.model);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -2,11 +2,9 @@ import {
|
||||
AzureKeyCredential,
|
||||
OpenAIClient as AzureOpenAIClient
|
||||
} from '@azure/openai';
|
||||
import { outro } from '@clack/prompts';
|
||||
import axios from 'axios';
|
||||
import chalk from 'chalk';
|
||||
import { OpenAI } from 'openai';
|
||||
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
|
||||
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
||||
import { GenerateCommitMessageErrorEnum } from '../utils/generateCommitMessageErrors';
|
||||
import { removeContentTags } from '../utils/removeContentTags';
|
||||
import { tokenCount } from '../utils/tokenCount';
|
||||
import { AiEngine, AiEngineConfig } from './Engine';
|
||||
@@ -57,24 +55,7 @@ export class AzureEngine implements AiEngine {
|
||||
let content = message?.content;
|
||||
return removeContentTags(content, 'think');
|
||||
} catch (error) {
|
||||
outro(`${chalk.red('✖')} ${this.config.model}`);
|
||||
|
||||
const err = error as Error;
|
||||
outro(`${chalk.red('✖')} ${JSON.stringify(error)}`);
|
||||
|
||||
if (
|
||||
axios.isAxiosError<{ error?: { message: string } }>(error) &&
|
||||
error.response?.status === 401
|
||||
) {
|
||||
const openAiError = error.response.data.error;
|
||||
|
||||
if (openAiError?.message) outro(openAiError.message);
|
||||
outro(
|
||||
'For help look into README https://github.com/di-sukharev/opencommit#setup'
|
||||
);
|
||||
}
|
||||
|
||||
throw err;
|
||||
throw normalizeEngineError(error, 'azure', this.config.model);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import axios from 'axios';
|
||||
import { OpenAI } from 'openai';
|
||||
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
|
||||
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
||||
import { GenerateCommitMessageErrorEnum } from '../utils/generateCommitMessageErrors';
|
||||
import { removeContentTags } from '../utils/removeContentTags';
|
||||
import { tokenCount } from '../utils/tokenCount';
|
||||
import { OpenAiEngine, OpenAiConfig } from './openAi';
|
||||
@@ -10,9 +10,10 @@ export interface DeepseekConfig extends OpenAiConfig {}
|
||||
export class DeepseekEngine extends OpenAiEngine {
|
||||
constructor(config: DeepseekConfig) {
|
||||
// Call OpenAIEngine constructor with forced Deepseek baseURL
|
||||
// Put baseURL first so user config can override it
|
||||
super({
|
||||
...config,
|
||||
baseURL: 'https://api.deepseek.com/v1'
|
||||
baseURL: 'https://api.deepseek.com/v1',
|
||||
...config
|
||||
});
|
||||
}
|
||||
|
||||
@@ -45,17 +46,7 @@ export class DeepseekEngine extends OpenAiEngine {
|
||||
let content = message?.content;
|
||||
return removeContentTags(content, 'think');
|
||||
} catch (error) {
|
||||
const err = error as Error;
|
||||
if (
|
||||
axios.isAxiosError<{ error?: { message: string } }>(error) &&
|
||||
error.response?.status === 401
|
||||
) {
|
||||
const openAiError = error.response.data.error;
|
||||
|
||||
if (openAiError) throw new Error(openAiError.message);
|
||||
}
|
||||
|
||||
throw err;
|
||||
throw normalizeEngineError(error, 'deepseek', this.config.model);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import axios, { AxiosInstance } from 'axios';
|
||||
import { OpenAI } from 'openai';
|
||||
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
||||
import { removeContentTags } from '../utils/removeContentTags';
|
||||
import { AiEngine, AiEngineConfig } from './Engine';
|
||||
|
||||
@@ -39,9 +40,8 @@ export class FlowiseEngine implements AiEngine {
|
||||
const message = response.data;
|
||||
let content = message?.text;
|
||||
return removeContentTags(content, 'think');
|
||||
} catch (err: any) {
|
||||
const message = err.response?.data?.error ?? err.message;
|
||||
throw new Error('local model issues. details: ' + message);
|
||||
} catch (error) {
|
||||
throw normalizeEngineError(error, 'flowise', this.config.model);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,17 +1,72 @@
|
||||
import {
|
||||
Content,
|
||||
FinishReason,
|
||||
GenerateContentResponse,
|
||||
GoogleGenerativeAI,
|
||||
HarmBlockThreshold,
|
||||
HarmCategory,
|
||||
Part
|
||||
} from '@google/generative-ai';
|
||||
import axios from 'axios';
|
||||
import { OpenAI } from 'openai';
|
||||
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
||||
import { removeContentTags } from '../utils/removeContentTags';
|
||||
import { AiEngine, AiEngineConfig } from './Engine';
|
||||
|
||||
interface GeminiConfig extends AiEngineConfig {}
|
||||
|
||||
const GEMINI_BLOCKING_FINISH_REASONS = new Set<FinishReason>([
|
||||
FinishReason.RECITATION,
|
||||
FinishReason.SAFETY,
|
||||
FinishReason.LANGUAGE
|
||||
]);
|
||||
|
||||
const formatGeminiBlockMessage = (
|
||||
response: GenerateContentResponse
|
||||
): string => {
|
||||
const promptFeedback = response.promptFeedback;
|
||||
if (promptFeedback?.blockReason) {
|
||||
return promptFeedback.blockReasonMessage
|
||||
? `Gemini response was blocked due to ${promptFeedback.blockReason}: ${promptFeedback.blockReasonMessage}`
|
||||
: `Gemini response was blocked due to ${promptFeedback.blockReason}`;
|
||||
}
|
||||
|
||||
const firstCandidate = response.candidates?.[0];
|
||||
if (firstCandidate?.finishReason) {
|
||||
return firstCandidate.finishMessage
|
||||
? `Gemini response was blocked due to ${firstCandidate.finishReason}: ${firstCandidate.finishMessage}`
|
||||
: `Gemini response was blocked due to ${firstCandidate.finishReason}`;
|
||||
}
|
||||
|
||||
return 'Gemini response did not contain usable text';
|
||||
};
|
||||
|
||||
const extractGeminiText = (response: GenerateContentResponse): string => {
|
||||
const firstCandidate = response.candidates?.[0];
|
||||
|
||||
if (
|
||||
firstCandidate?.finishReason &&
|
||||
GEMINI_BLOCKING_FINISH_REASONS.has(firstCandidate.finishReason)
|
||||
) {
|
||||
throw new Error(formatGeminiBlockMessage(response));
|
||||
}
|
||||
|
||||
const text = firstCandidate?.content?.parts
|
||||
?.flatMap((part) =>
|
||||
'text' in part && typeof part.text === 'string' ? [part.text] : []
|
||||
)
|
||||
.join('');
|
||||
|
||||
if (typeof text === 'string' && text.length > 0) {
|
||||
return text;
|
||||
}
|
||||
|
||||
if (response.promptFeedback?.blockReason) {
|
||||
throw new Error(formatGeminiBlockMessage(response));
|
||||
}
|
||||
|
||||
return '';
|
||||
};
|
||||
|
||||
export class GeminiEngine implements AiEngine {
|
||||
config: GeminiConfig;
|
||||
client: GoogleGenerativeAI;
|
||||
@@ -29,10 +84,15 @@ export class GeminiEngine implements AiEngine {
|
||||
.map((m) => m.content)
|
||||
.join('\n');
|
||||
|
||||
const gemini = this.client.getGenerativeModel({
|
||||
const gemini = this.client.getGenerativeModel(
|
||||
{
|
||||
model: this.config.model,
|
||||
systemInstruction
|
||||
});
|
||||
},
|
||||
{
|
||||
baseUrl: this.config.baseURL
|
||||
}
|
||||
);
|
||||
|
||||
const contents = messages
|
||||
.filter((m) => m.role !== 'system')
|
||||
@@ -72,19 +132,10 @@ export class GeminiEngine implements AiEngine {
|
||||
}
|
||||
});
|
||||
|
||||
const content = result.response.text();
|
||||
const content = extractGeminiText(result.response);
|
||||
return removeContentTags(content, 'think');
|
||||
} catch (error) {
|
||||
const err = error as Error;
|
||||
if (
|
||||
axios.isAxiosError<{ error?: { message: string } }>(error) &&
|
||||
error.response?.status === 401
|
||||
) {
|
||||
const geminiError = error.response.data.error;
|
||||
if (geminiError) throw new Error(geminiError?.message);
|
||||
}
|
||||
|
||||
throw err;
|
||||
throw normalizeEngineError(error, 'gemini', this.config.model);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import axios from 'axios';
|
||||
import { Mistral } from '@mistralai/mistralai';
|
||||
import { OpenAI } from 'openai';
|
||||
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
|
||||
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
||||
import { GenerateCommitMessageErrorEnum } from '../utils/generateCommitMessageErrors';
|
||||
import { removeContentTags } from '../utils/removeContentTags';
|
||||
import { tokenCount } from '../utils/tokenCount';
|
||||
import { AiEngine, AiEngineConfig } from './Engine';
|
||||
@@ -9,10 +10,6 @@ import { AiEngine, AiEngineConfig } from './Engine';
|
||||
export interface MistralAiConfig extends AiEngineConfig {}
|
||||
export type MistralCompletionMessageParam = Array<any>;
|
||||
|
||||
// Import Mistral dynamically to avoid TS errors
|
||||
// eslint-disable-next-line @typescript-eslint/no-var-requires
|
||||
const Mistral = require('@mistralai/mistralai').Mistral;
|
||||
|
||||
export class MistralAiEngine implements AiEngine {
|
||||
config: MistralAiConfig;
|
||||
client: any; // Using any type for Mistral client to avoid TS errors
|
||||
@@ -63,17 +60,7 @@ export class MistralAiEngine implements AiEngine {
|
||||
let content = message.content as string;
|
||||
return removeContentTags(content, 'think');
|
||||
} catch (error) {
|
||||
const err = error as Error;
|
||||
if (
|
||||
axios.isAxiosError<{ error?: { message: string } }>(error) &&
|
||||
error.response?.status === 401
|
||||
) {
|
||||
const mistralError = error.response.data.error;
|
||||
|
||||
if (mistralError) throw new Error(mistralError.message);
|
||||
}
|
||||
|
||||
throw err;
|
||||
throw normalizeEngineError(error, 'mistral', this.config.model);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,20 +1,26 @@
|
||||
import axios, { AxiosInstance } from 'axios';
|
||||
import { OpenAI } from 'openai';
|
||||
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
||||
import { removeContentTags } from '../utils/removeContentTags';
|
||||
import { AiEngine, AiEngineConfig } from './Engine';
|
||||
|
||||
interface MLXConfig extends AiEngineConfig {}
|
||||
|
||||
const DEFAULT_MLX_URL = 'http://localhost:8080';
|
||||
const MLX_CHAT_PATH = '/v1/chat/completions';
|
||||
|
||||
export class MLXEngine implements AiEngine {
|
||||
config: MLXConfig;
|
||||
client: AxiosInstance;
|
||||
private chatUrl: string;
|
||||
|
||||
constructor(config) {
|
||||
this.config = config;
|
||||
|
||||
const baseUrl = config.baseURL || DEFAULT_MLX_URL;
|
||||
this.chatUrl = `${baseUrl}${MLX_CHAT_PATH}`;
|
||||
|
||||
this.client = axios.create({
|
||||
url: config.baseURL
|
||||
? `${config.baseURL}/${config.apiKey}`
|
||||
: 'http://localhost:8080/v1/chat/completions',
|
||||
headers: { 'Content-Type': 'application/json' }
|
||||
});
|
||||
}
|
||||
@@ -30,18 +36,14 @@ export class MLXEngine implements AiEngine {
|
||||
stream: false
|
||||
};
|
||||
try {
|
||||
const response = await this.client.post(
|
||||
this.client.getUri(this.config),
|
||||
params
|
||||
);
|
||||
const response = await this.client.post(this.chatUrl, params);
|
||||
|
||||
const choices = response.data.choices;
|
||||
const message = choices[0].message;
|
||||
let content = message?.content;
|
||||
return removeContentTags(content, 'think');
|
||||
} catch (err: any) {
|
||||
const message = err.response?.data?.error ?? err.message;
|
||||
throw new Error(`MLX provider error: ${message}`);
|
||||
} catch (error) {
|
||||
throw normalizeEngineError(error, 'mlx', this.config.model);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,52 +1,56 @@
|
||||
import axios, { AxiosInstance } from 'axios';
|
||||
import { OpenAI } from 'openai';
|
||||
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
||||
import { removeContentTags } from '../utils/removeContentTags';
|
||||
import { AiEngine, AiEngineConfig } from './Engine';
|
||||
|
||||
interface OllamaConfig extends AiEngineConfig {}
|
||||
interface OllamaConfig extends AiEngineConfig {
|
||||
ollamaThink?: boolean;
|
||||
}
|
||||
|
||||
const DEFAULT_OLLAMA_URL = 'http://localhost:11434';
|
||||
const OLLAMA_CHAT_PATH = '/api/chat';
|
||||
|
||||
export class OllamaEngine implements AiEngine {
|
||||
config: OllamaConfig;
|
||||
client: AxiosInstance;
|
||||
private chatUrl: string;
|
||||
|
||||
constructor(config) {
|
||||
this.config = config;
|
||||
|
||||
const baseUrl = config.baseURL || DEFAULT_OLLAMA_URL;
|
||||
this.chatUrl = `${baseUrl}${OLLAMA_CHAT_PATH}`;
|
||||
|
||||
// Combine base headers with custom headers
|
||||
const headers = {
|
||||
'Content-Type': 'application/json',
|
||||
...config.customHeaders
|
||||
};
|
||||
|
||||
this.client = axios.create({
|
||||
url: config.baseURL
|
||||
? `${config.baseURL}/${config.apiKey}`
|
||||
: 'http://localhost:11434/api/chat',
|
||||
headers
|
||||
});
|
||||
this.client = axios.create({ headers });
|
||||
}
|
||||
|
||||
async generateCommitMessage(
|
||||
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
|
||||
): Promise<string | undefined> {
|
||||
const params = {
|
||||
const params: Record<string, any> = {
|
||||
model: this.config.model ?? 'mistral',
|
||||
messages,
|
||||
options: { temperature: 0, top_p: 0.1 },
|
||||
stream: false
|
||||
};
|
||||
if (typeof this.config.ollamaThink === 'boolean') {
|
||||
params.think = this.config.ollamaThink;
|
||||
}
|
||||
try {
|
||||
const response = await this.client.post(
|
||||
this.client.getUri(this.config),
|
||||
params
|
||||
);
|
||||
const response = await this.client.post(this.chatUrl, params);
|
||||
|
||||
const { message } = response.data;
|
||||
let content = message?.content;
|
||||
return removeContentTags(content, 'think');
|
||||
} catch (err: any) {
|
||||
const message = err.response?.data?.error ?? err.message;
|
||||
throw new Error(`Ollama provider error: ${message}`);
|
||||
} catch (error) {
|
||||
throw normalizeEngineError(error, 'ollama', this.config.model);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import axios from 'axios';
|
||||
import { OpenAI } from 'openai';
|
||||
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
|
||||
import { parseCustomHeaders } from '../utils/engine';
|
||||
import { HttpsProxyAgent } from 'https-proxy-agent';
|
||||
import { parseCustomHeaders } from '../utils/customHeaders';
|
||||
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
||||
import { GenerateCommitMessageErrorEnum } from '../utils/generateCommitMessageErrors';
|
||||
import { removeContentTags } from '../utils/removeContentTags';
|
||||
import { tokenCount } from '../utils/tokenCount';
|
||||
import { AiEngine, AiEngineConfig } from './Engine';
|
||||
@@ -23,6 +24,11 @@ export class OpenAiEngine implements AiEngine {
|
||||
clientOptions.baseURL = config.baseURL;
|
||||
}
|
||||
|
||||
const proxy = config.proxy;
|
||||
if (proxy) {
|
||||
clientOptions.httpAgent = new HttpsProxyAgent(proxy);
|
||||
}
|
||||
|
||||
if (config.customHeaders) {
|
||||
const headers = parseCustomHeaders(config.customHeaders);
|
||||
if (Object.keys(headers).length > 0) {
|
||||
@@ -36,12 +42,18 @@ export class OpenAiEngine implements AiEngine {
|
||||
public generateCommitMessage = async (
|
||||
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
|
||||
): Promise<string | null> => {
|
||||
const isReasoningModel = /^(o[1-9]|gpt-5)/.test(this.config.model);
|
||||
|
||||
const params = {
|
||||
model: this.config.model,
|
||||
messages,
|
||||
...(isReasoningModel
|
||||
? { max_completion_tokens: this.config.maxTokensOutput }
|
||||
: {
|
||||
temperature: 0,
|
||||
top_p: 0.1,
|
||||
max_tokens: this.config.maxTokensOutput
|
||||
})
|
||||
};
|
||||
|
||||
try {
|
||||
@@ -55,23 +67,15 @@ export class OpenAiEngine implements AiEngine {
|
||||
)
|
||||
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
|
||||
|
||||
const completion = await this.client.chat.completions.create(params);
|
||||
const completion = await this.client.chat.completions.create(
|
||||
params as OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming
|
||||
);
|
||||
|
||||
const message = completion.choices[0].message;
|
||||
let content = message?.content;
|
||||
return removeContentTags(content, 'think');
|
||||
} catch (error) {
|
||||
const err = error as Error;
|
||||
if (
|
||||
axios.isAxiosError<{ error?: { message: string } }>(error) &&
|
||||
error.response?.status === 401
|
||||
) {
|
||||
const openAiError = error.response.data.error;
|
||||
|
||||
if (openAiError) throw new Error(openAiError.message);
|
||||
}
|
||||
|
||||
throw err;
|
||||
throw normalizeEngineError(error, 'openai', this.config.model);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import OpenAI from 'openai';
|
||||
import { AiEngine, AiEngineConfig } from './Engine';
|
||||
import axios, { AxiosInstance } from 'axios';
|
||||
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
||||
import { removeContentTags } from '../utils/removeContentTags';
|
||||
import { AiEngine, AiEngineConfig } from './Engine';
|
||||
|
||||
interface OpenRouterConfig extends AiEngineConfig {}
|
||||
|
||||
@@ -33,17 +34,7 @@ export class OpenRouterEngine implements AiEngine {
|
||||
let content = message?.content;
|
||||
return removeContentTags(content, 'think');
|
||||
} catch (error) {
|
||||
const err = error as Error;
|
||||
if (
|
||||
axios.isAxiosError<{ error?: { message: string } }>(error) &&
|
||||
error.response?.status === 401
|
||||
) {
|
||||
const openRouterError = error.response.data.error;
|
||||
|
||||
if (openRouterError) throw new Error(openRouterError.message);
|
||||
}
|
||||
|
||||
throw err;
|
||||
throw normalizeEngineError(error, 'openrouter', this.config.model);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,7 +1,22 @@
|
||||
import { select, confirm, isCancel } from '@clack/prompts';
|
||||
import chalk from 'chalk';
|
||||
import { OpenAI } from 'openai';
|
||||
import { DEFAULT_TOKEN_LIMITS, getConfig } from './commands/config';
|
||||
import {
|
||||
DEFAULT_TOKEN_LIMITS,
|
||||
getConfig,
|
||||
setGlobalConfig,
|
||||
getGlobalConfig,
|
||||
MODEL_LIST,
|
||||
RECOMMENDED_MODELS
|
||||
} from './commands/config';
|
||||
import { getMainCommitPrompt } from './prompts';
|
||||
import { getEngine } from './utils/engine';
|
||||
import {
|
||||
isModelNotFoundError,
|
||||
getSuggestedModels,
|
||||
ModelNotFoundError
|
||||
} from './utils/errors';
|
||||
import { GenerateCommitMessageErrorEnum } from './utils/generateCommitMessageErrors';
|
||||
import { mergeDiffs } from './utils/mergeDiffs';
|
||||
import { tokenCount } from './utils/tokenCount';
|
||||
|
||||
@@ -29,11 +44,90 @@ const generateCommitMessageChatCompletionPrompt = async (
|
||||
return chatContextAsCompletionRequest;
|
||||
};
|
||||
|
||||
export enum GenerateCommitMessageErrorEnum {
|
||||
tooMuchTokens = 'TOO_MUCH_TOKENS',
|
||||
internalError = 'INTERNAL_ERROR',
|
||||
emptyMessage = 'EMPTY_MESSAGE',
|
||||
outputTokensTooHigh = `Token limit exceeded, OCO_TOKENS_MAX_OUTPUT must not be much higher than the default ${DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT} tokens.`
|
||||
async function handleModelNotFoundError(
|
||||
error: Error,
|
||||
provider: string,
|
||||
currentModel: string
|
||||
): Promise<string | null> {
|
||||
console.log(chalk.red(`\n✖ Model '${currentModel}' not found\n`));
|
||||
|
||||
const suggestedModels = getSuggestedModels(provider, currentModel);
|
||||
const recommended =
|
||||
RECOMMENDED_MODELS[provider as keyof typeof RECOMMENDED_MODELS];
|
||||
|
||||
if (suggestedModels.length === 0) {
|
||||
console.log(
|
||||
chalk.yellow(
|
||||
`No alternative models available. Run 'oco setup' to configure a different model.`
|
||||
)
|
||||
);
|
||||
return null;
|
||||
}
|
||||
|
||||
const options: Array<{ value: string; label: string }> = [];
|
||||
|
||||
// Add recommended first if available
|
||||
if (recommended && suggestedModels.includes(recommended)) {
|
||||
options.push({
|
||||
value: recommended,
|
||||
label: `${recommended} (Recommended)`
|
||||
});
|
||||
}
|
||||
|
||||
// Add other suggestions
|
||||
suggestedModels
|
||||
.filter((m) => m !== recommended)
|
||||
.forEach((model) => {
|
||||
options.push({ value: model, label: model });
|
||||
});
|
||||
|
||||
options.push({ value: '__custom__', label: 'Enter custom model...' });
|
||||
|
||||
const selection = await select({
|
||||
message: 'Select an alternative model:',
|
||||
options
|
||||
});
|
||||
|
||||
if (isCancel(selection)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
let newModel: string;
|
||||
if (selection === '__custom__') {
|
||||
const { text } = await import('@clack/prompts');
|
||||
const customModel = await text({
|
||||
message: 'Enter model name:',
|
||||
validate: (value) => {
|
||||
if (!value || value.trim().length === 0) {
|
||||
return 'Model name is required';
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
});
|
||||
|
||||
if (isCancel(customModel)) {
|
||||
return null;
|
||||
}
|
||||
newModel = customModel as string;
|
||||
} else {
|
||||
newModel = selection as string;
|
||||
}
|
||||
|
||||
// Ask if user wants to save as default
|
||||
const saveAsDefault = await confirm({
|
||||
message: 'Save as default model?'
|
||||
});
|
||||
|
||||
if (!isCancel(saveAsDefault) && saveAsDefault) {
|
||||
const existingConfig = getGlobalConfig();
|
||||
setGlobalConfig({
|
||||
...existingConfig,
|
||||
OCO_MODEL: newModel
|
||||
} as any);
|
||||
console.log(chalk.green('√') + ' Model saved as default\n');
|
||||
}
|
||||
|
||||
return newModel;
|
||||
}
|
||||
|
||||
const ADJUSTMENT_FACTOR = 20;
|
||||
@@ -41,8 +135,13 @@ const ADJUSTMENT_FACTOR = 20;
|
||||
export const generateCommitMessageByDiff = async (
|
||||
diff: string,
|
||||
fullGitMojiSpec: boolean = false,
|
||||
context: string = ''
|
||||
context: string = '',
|
||||
retryWithModel?: string
|
||||
): Promise<string> => {
|
||||
const currentConfig = getConfig();
|
||||
const provider = currentConfig.OCO_AI_PROVIDER || 'openai';
|
||||
const currentModel = retryWithModel || currentConfig.OCO_MODEL;
|
||||
|
||||
try {
|
||||
const INIT_MESSAGES_PROMPT = await getMainCommitPrompt(
|
||||
fullGitMojiSpec,
|
||||
@@ -63,14 +162,11 @@ export const generateCommitMessageByDiff = async (
|
||||
const commitMessagePromises = await getCommitMsgsPromisesFromFileDiffs(
|
||||
diff,
|
||||
MAX_REQUEST_TOKENS,
|
||||
fullGitMojiSpec
|
||||
fullGitMojiSpec,
|
||||
context
|
||||
);
|
||||
|
||||
const commitMessages = [] as string[];
|
||||
for (const promise of commitMessagePromises) {
|
||||
commitMessages.push((await promise) as string);
|
||||
await delay(2000);
|
||||
}
|
||||
const commitMessages = await Promise.all(commitMessagePromises);
|
||||
|
||||
return commitMessages.join('\n\n');
|
||||
}
|
||||
@@ -89,6 +185,32 @@ export const generateCommitMessageByDiff = async (
|
||||
|
||||
return commitMessage;
|
||||
} catch (error) {
|
||||
// Handle model-not-found errors with interactive recovery
|
||||
if (isModelNotFoundError(error)) {
|
||||
const newModel = await handleModelNotFoundError(
|
||||
error as Error,
|
||||
provider,
|
||||
currentModel
|
||||
);
|
||||
|
||||
if (newModel) {
|
||||
console.log(chalk.cyan(`Retrying with ${newModel}...\n`));
|
||||
// Retry with the new model by updating config temporarily
|
||||
const existingConfig = getGlobalConfig();
|
||||
setGlobalConfig({
|
||||
...existingConfig,
|
||||
OCO_MODEL: newModel
|
||||
} as any);
|
||||
|
||||
return generateCommitMessageByDiff(
|
||||
diff,
|
||||
fullGitMojiSpec,
|
||||
context,
|
||||
newModel
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
@@ -97,7 +219,8 @@ function getMessagesPromisesByChangesInFile(
|
||||
fileDiff: string,
|
||||
separator: string,
|
||||
maxChangeLength: number,
|
||||
fullGitMojiSpec: boolean
|
||||
fullGitMojiSpec: boolean,
|
||||
context: string
|
||||
) {
|
||||
const hunkHeaderSeparator = '@@ ';
|
||||
const [fileHeader, ...fileDiffByLines] = fileDiff.split(hunkHeaderSeparator);
|
||||
@@ -125,7 +248,8 @@ function getMessagesPromisesByChangesInFile(
|
||||
async (lineDiff) => {
|
||||
const messages = await generateCommitMessageChatCompletionPrompt(
|
||||
separator + lineDiff,
|
||||
fullGitMojiSpec
|
||||
fullGitMojiSpec,
|
||||
context
|
||||
);
|
||||
|
||||
return engine.generateCommitMessage(messages);
|
||||
@@ -174,7 +298,8 @@ function splitDiff(diff: string, maxChangeLength: number) {
|
||||
export const getCommitMsgsPromisesFromFileDiffs = async (
|
||||
diff: string,
|
||||
maxDiffLength: number,
|
||||
fullGitMojiSpec: boolean
|
||||
fullGitMojiSpec: boolean,
|
||||
context: string
|
||||
) => {
|
||||
const separator = 'diff --git ';
|
||||
|
||||
@@ -192,14 +317,16 @@ export const getCommitMsgsPromisesFromFileDiffs = async (
|
||||
fileDiff,
|
||||
separator,
|
||||
maxDiffLength,
|
||||
fullGitMojiSpec
|
||||
fullGitMojiSpec,
|
||||
context
|
||||
);
|
||||
|
||||
commitMessagePromises.push(...messagesPromises);
|
||||
} else {
|
||||
const messages = await generateCommitMessageChatCompletionPrompt(
|
||||
separator + fileDiff,
|
||||
fullGitMojiSpec
|
||||
fullGitMojiSpec,
|
||||
context
|
||||
);
|
||||
|
||||
const engine = getEngine();
|
||||
|
||||
@@ -95,10 +95,10 @@ const CONVENTIONAL_COMMIT_KEYWORDS =
|
||||
'Do not preface the commit with anything, except for the conventional commit keywords: fix, feat, build, chore, ci, docs, style, refactor, perf, test.';
|
||||
|
||||
const getCommitConvention = (fullGitMojiSpec: boolean) =>
|
||||
config.OCO_EMOJI
|
||||
? fullGitMojiSpec
|
||||
fullGitMojiSpec
|
||||
? FULL_GITMOJI_SPEC
|
||||
: GITMOJI_HELP
|
||||
: config.OCO_EMOJI
|
||||
? GITMOJI_HELP
|
||||
: CONVENTIONAL_COMMIT_KEYWORDS;
|
||||
|
||||
const getDescriptionInstruction = () =>
|
||||
@@ -123,20 +123,19 @@ const getScopeInstruction = () =>
|
||||
* $ oco -- This is a context used to generate the commit message
|
||||
* @returns - The context of the user input
|
||||
*/
|
||||
const userInputCodeContext = (context: string) => {
|
||||
if (context !== '' && context !== ' ') {
|
||||
return `Additional context provided by the user: <context>${context}</context>\nConsider this context when generating the commit message, incorporating relevant information when appropriate.`;
|
||||
}
|
||||
const userInputCodeContext = (context: string | undefined | null) => {
|
||||
const trimmed = (context ?? '').trim();
|
||||
if (trimmed === '') {
|
||||
return '';
|
||||
}
|
||||
return `Additional context provided by the user: <context>${trimmed}</context>\nConsider this context when generating the commit message, incorporating relevant information when appropriate.`;
|
||||
};
|
||||
|
||||
const INIT_MAIN_PROMPT = (
|
||||
language: string,
|
||||
fullGitMojiSpec: boolean,
|
||||
context: string
|
||||
): OpenAI.Chat.Completions.ChatCompletionMessageParam => ({
|
||||
role: 'system',
|
||||
content: (() => {
|
||||
): OpenAI.Chat.Completions.ChatCompletionMessageParam => {
|
||||
const commitConvention = fullGitMojiSpec
|
||||
? 'GitMoji specification'
|
||||
: 'Conventional Commit Convention';
|
||||
@@ -150,9 +149,10 @@ const INIT_MAIN_PROMPT = (
|
||||
const generalGuidelines = `Use the present tense. Lines must not be longer than 74 characters. Use ${language} for the commit message.`;
|
||||
const userInputContext = userInputCodeContext(context);
|
||||
|
||||
return `${missionStatement}\n${diffInstruction}\n${conventionGuidelines}\n${descriptionGuideline}\n${oneLineCommitGuideline}\n${scopeInstruction}\n${generalGuidelines}\n${userInputContext}`;
|
||||
})()
|
||||
});
|
||||
const content = `${missionStatement}\n${diffInstruction}\n${conventionGuidelines}\n${descriptionGuideline}\n${oneLineCommitGuideline}\n${scopeInstruction}\n${generalGuidelines}\n${userInputContext}`;
|
||||
|
||||
return { role: 'system', content };
|
||||
};
|
||||
|
||||
export const INIT_DIFF_PROMPT: OpenAI.Chat.Completions.ChatCompletionMessageParam =
|
||||
{
|
||||
|
||||
@@ -6,6 +6,10 @@ import currentPackage from '../../package.json';
|
||||
import { getOpenCommitLatestVersion } from '../version';
|
||||
|
||||
export const checkIsLatestVersion = async () => {
|
||||
if (process.env.OCO_TEST_SKIP_VERSION_CHECK === 'true') {
|
||||
return;
|
||||
}
|
||||
|
||||
const latestVersion = await getOpenCommitLatestVersion();
|
||||
|
||||
if (latestVersion) {
|
||||
|
||||
21
src/utils/customHeaders.ts
Normal file
21
src/utils/customHeaders.ts
Normal file
@@ -0,0 +1,21 @@
|
||||
export function parseCustomHeaders(headers: any): Record<string, string> {
|
||||
let parsedHeaders = {};
|
||||
|
||||
if (!headers) {
|
||||
return parsedHeaders;
|
||||
}
|
||||
|
||||
try {
|
||||
if (typeof headers === 'object' && !Array.isArray(headers)) {
|
||||
parsedHeaders = headers;
|
||||
} else {
|
||||
parsedHeaders = JSON.parse(headers);
|
||||
}
|
||||
} catch {
|
||||
console.warn(
|
||||
'Invalid OCO_API_CUSTOM_HEADERS format, ignoring custom headers'
|
||||
);
|
||||
}
|
||||
|
||||
return parsedHeaders;
|
||||
}
|
||||
@@ -13,47 +13,32 @@ import { MLXEngine } from '../engine/mlx';
|
||||
import { DeepseekEngine } from '../engine/deepseek';
|
||||
import { AimlApiEngine } from '../engine/aimlapi';
|
||||
import { OpenRouterEngine } from '../engine/openrouter';
|
||||
|
||||
export function parseCustomHeaders(headers: any): Record<string, string> {
|
||||
let parsedHeaders = {};
|
||||
|
||||
if (!headers) {
|
||||
return parsedHeaders;
|
||||
}
|
||||
|
||||
try {
|
||||
if (typeof headers === 'object' && !Array.isArray(headers)) {
|
||||
parsedHeaders = headers;
|
||||
} else {
|
||||
parsedHeaders = JSON.parse(headers);
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn(
|
||||
'Invalid OCO_API_CUSTOM_HEADERS format, ignoring custom headers'
|
||||
);
|
||||
}
|
||||
|
||||
return parsedHeaders;
|
||||
}
|
||||
import { parseCustomHeaders } from './customHeaders';
|
||||
import { resolveProxy } from './proxy';
|
||||
|
||||
export function getEngine(): AiEngine {
|
||||
const config = getConfig();
|
||||
const provider = config.OCO_AI_PROVIDER;
|
||||
|
||||
const customHeaders = parseCustomHeaders(config.OCO_API_CUSTOM_HEADERS);
|
||||
const resolvedProxy = resolveProxy(config.OCO_PROXY);
|
||||
|
||||
const DEFAULT_CONFIG = {
|
||||
model: config.OCO_MODEL!,
|
||||
maxTokensOutput: config.OCO_TOKENS_MAX_OUTPUT!,
|
||||
maxTokensInput: config.OCO_TOKENS_MAX_INPUT!,
|
||||
baseURL: config.OCO_API_URL!,
|
||||
proxy: resolvedProxy,
|
||||
apiKey: config.OCO_API_KEY!,
|
||||
customHeaders
|
||||
};
|
||||
|
||||
switch (provider) {
|
||||
case OCO_AI_PROVIDER_ENUM.OLLAMA:
|
||||
return new OllamaEngine(DEFAULT_CONFIG);
|
||||
return new OllamaEngine({
|
||||
...DEFAULT_CONFIG,
|
||||
ollamaThink: config.OCO_OLLAMA_THINK
|
||||
});
|
||||
|
||||
case OCO_AI_PROVIDER_ENUM.ANTHROPIC:
|
||||
return new AnthropicEngine(DEFAULT_CONFIG);
|
||||
|
||||
205
src/utils/engineErrorHandler.ts
Normal file
205
src/utils/engineErrorHandler.ts
Normal file
@@ -0,0 +1,205 @@
|
||||
import axios from 'axios';
|
||||
import {
|
||||
AuthenticationError,
|
||||
InsufficientCreditsError,
|
||||
ModelNotFoundError,
|
||||
RateLimitError,
|
||||
ServiceUnavailableError
|
||||
} from './errors';
|
||||
|
||||
/**
|
||||
* Extracts HTTP status code from various error types
|
||||
*/
|
||||
function getStatusCode(error: unknown): number | null {
|
||||
// Direct status property (common in API SDKs)
|
||||
if (typeof (error as any)?.status === 'number') {
|
||||
return (error as any).status;
|
||||
}
|
||||
|
||||
// Axios-style errors
|
||||
if (axios.isAxiosError(error)) {
|
||||
return error.response?.status ?? null;
|
||||
}
|
||||
|
||||
// Response object with status
|
||||
if (typeof (error as any)?.response?.status === 'number') {
|
||||
return (error as any).response.status;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts retry-after value from error headers (for rate limiting)
|
||||
*/
|
||||
function getRetryAfter(error: unknown): number | undefined {
|
||||
const headers = (error as any)?.response?.headers;
|
||||
if (headers) {
|
||||
const retryAfter = headers['retry-after'] || headers['Retry-After'];
|
||||
if (retryAfter) {
|
||||
const seconds = parseInt(retryAfter, 10);
|
||||
if (!isNaN(seconds)) {
|
||||
return seconds;
|
||||
}
|
||||
}
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts the error message from various error structures
|
||||
*/
|
||||
function extractErrorMessage(error: unknown): string {
|
||||
if (error instanceof Error) {
|
||||
return error.message;
|
||||
}
|
||||
|
||||
// API error response structures
|
||||
const apiError = (error as any)?.response?.data?.error;
|
||||
if (apiError) {
|
||||
if (typeof apiError === 'string') {
|
||||
return apiError;
|
||||
}
|
||||
if (apiError.message) {
|
||||
return apiError.message;
|
||||
}
|
||||
}
|
||||
|
||||
// Direct error data
|
||||
const errorData = (error as any)?.error;
|
||||
if (errorData) {
|
||||
if (typeof errorData === 'string') {
|
||||
return errorData;
|
||||
}
|
||||
if (errorData.message) {
|
||||
return errorData.message;
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback
|
||||
if (typeof error === 'string') {
|
||||
return error;
|
||||
}
|
||||
|
||||
return 'An unknown error occurred';
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the error message indicates a model not found error
|
||||
*/
|
||||
function isModelNotFoundMessage(message: string): boolean {
|
||||
const lowerMessage = message.toLowerCase();
|
||||
return (
|
||||
(lowerMessage.includes('model') &&
|
||||
(lowerMessage.includes('not found') ||
|
||||
lowerMessage.includes('does not exist') ||
|
||||
lowerMessage.includes('invalid') ||
|
||||
lowerMessage.includes('pull'))) ||
|
||||
lowerMessage.includes('does_not_exist')
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the error message indicates insufficient credits
|
||||
*/
|
||||
function isInsufficientCreditsMessage(message: string): boolean {
|
||||
const lowerMessage = message.toLowerCase();
|
||||
return (
|
||||
lowerMessage.includes('insufficient') ||
|
||||
lowerMessage.includes('credit') ||
|
||||
lowerMessage.includes('quota') ||
|
||||
lowerMessage.includes('balance too low') ||
|
||||
lowerMessage.includes('billing') ||
|
||||
lowerMessage.includes('payment required') ||
|
||||
lowerMessage.includes('exceeded')
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalizes raw API errors into typed error classes.
|
||||
* This provides consistent error handling across all engine implementations.
|
||||
*
|
||||
* @param error - The raw error from the API call
|
||||
* @param provider - The AI provider name (e.g., 'openai', 'anthropic')
|
||||
* @param model - The model being used
|
||||
* @returns A typed Error instance
|
||||
*/
|
||||
export function normalizeEngineError(
|
||||
error: unknown,
|
||||
provider: string,
|
||||
model: string
|
||||
): Error {
|
||||
// If it's already one of our custom errors, return as-is
|
||||
if (
|
||||
error instanceof ModelNotFoundError ||
|
||||
error instanceof AuthenticationError ||
|
||||
error instanceof InsufficientCreditsError ||
|
||||
error instanceof RateLimitError ||
|
||||
error instanceof ServiceUnavailableError
|
||||
) {
|
||||
return error;
|
||||
}
|
||||
|
||||
const statusCode = getStatusCode(error);
|
||||
const message = extractErrorMessage(error);
|
||||
|
||||
// Handle based on HTTP status codes
|
||||
switch (statusCode) {
|
||||
case 401:
|
||||
return new AuthenticationError(provider, message);
|
||||
|
||||
case 402:
|
||||
return new InsufficientCreditsError(provider, message);
|
||||
|
||||
case 404:
|
||||
// Could be model not found or endpoint not found
|
||||
if (isModelNotFoundMessage(message)) {
|
||||
return new ModelNotFoundError(model, provider, 404);
|
||||
}
|
||||
// Return generic error for other 404s
|
||||
return error instanceof Error ? error : new Error(message);
|
||||
|
||||
case 429:
|
||||
const retryAfter = getRetryAfter(error);
|
||||
return new RateLimitError(provider, retryAfter, message);
|
||||
|
||||
case 500:
|
||||
case 502:
|
||||
case 503:
|
||||
case 504:
|
||||
return new ServiceUnavailableError(provider, statusCode, message);
|
||||
}
|
||||
|
||||
// Handle based on error message content
|
||||
if (isModelNotFoundMessage(message)) {
|
||||
return new ModelNotFoundError(model, provider, 404);
|
||||
}
|
||||
|
||||
if (isInsufficientCreditsMessage(message)) {
|
||||
return new InsufficientCreditsError(provider, message);
|
||||
}
|
||||
|
||||
// Check for rate limit patterns in message
|
||||
const lowerMessage = message.toLowerCase();
|
||||
if (
|
||||
lowerMessage.includes('rate limit') ||
|
||||
lowerMessage.includes('rate_limit') ||
|
||||
lowerMessage.includes('too many requests')
|
||||
) {
|
||||
return new RateLimitError(provider, undefined, message);
|
||||
}
|
||||
|
||||
// Check for auth patterns in message
|
||||
if (
|
||||
lowerMessage.includes('unauthorized') ||
|
||||
lowerMessage.includes('api key') ||
|
||||
lowerMessage.includes('apikey') ||
|
||||
lowerMessage.includes('authentication') ||
|
||||
lowerMessage.includes('invalid_api_key')
|
||||
) {
|
||||
return new AuthenticationError(provider, message);
|
||||
}
|
||||
|
||||
// Return original error or wrap in Error if needed
|
||||
return error instanceof Error ? error : new Error(message);
|
||||
}
|
||||
515
src/utils/errors.ts
Normal file
515
src/utils/errors.ts
Normal file
@@ -0,0 +1,515 @@
|
||||
import chalk from 'chalk';
|
||||
import { MODEL_LIST, OCO_AI_PROVIDER_ENUM } from '../commands/config';
|
||||
|
||||
// Provider billing/help URLs for common errors
|
||||
export const PROVIDER_BILLING_URLS: Record<string, string | null> = {
|
||||
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]:
|
||||
'https://console.anthropic.com/settings/billing',
|
||||
[OCO_AI_PROVIDER_ENUM.OPENAI]:
|
||||
'https://platform.openai.com/settings/organization/billing',
|
||||
[OCO_AI_PROVIDER_ENUM.GEMINI]: 'https://aistudio.google.com/app/plan',
|
||||
[OCO_AI_PROVIDER_ENUM.GROQ]: 'https://console.groq.com/settings/billing',
|
||||
[OCO_AI_PROVIDER_ENUM.MISTRAL]: 'https://console.mistral.ai/billing/',
|
||||
[OCO_AI_PROVIDER_ENUM.DEEPSEEK]: 'https://platform.deepseek.com/usage',
|
||||
[OCO_AI_PROVIDER_ENUM.OPENROUTER]: 'https://openrouter.ai/credits',
|
||||
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'https://aimlapi.com/app/billing',
|
||||
[OCO_AI_PROVIDER_ENUM.AZURE]:
|
||||
'https://portal.azure.com/#view/Microsoft_Azure_CostManagement',
|
||||
[OCO_AI_PROVIDER_ENUM.OLLAMA]: null,
|
||||
[OCO_AI_PROVIDER_ENUM.MLX]: null,
|
||||
[OCO_AI_PROVIDER_ENUM.FLOWISE]: null,
|
||||
[OCO_AI_PROVIDER_ENUM.TEST]: null
|
||||
};
|
||||
|
||||
// Error type for insufficient credits/quota
|
||||
export class InsufficientCreditsError extends Error {
|
||||
public readonly provider: string;
|
||||
|
||||
constructor(provider: string, message?: string) {
|
||||
super(
|
||||
message || `Insufficient credits or quota for provider '${provider}'`
|
||||
);
|
||||
this.name = 'InsufficientCreditsError';
|
||||
this.provider = provider;
|
||||
}
|
||||
}
|
||||
|
||||
// Error type for rate limiting (429 errors)
|
||||
export class RateLimitError extends Error {
|
||||
public readonly provider: string;
|
||||
public readonly retryAfter?: number;
|
||||
|
||||
constructor(provider: string, retryAfter?: number, message?: string) {
|
||||
super(message || `Rate limit exceeded for provider '${provider}'`);
|
||||
this.name = 'RateLimitError';
|
||||
this.provider = provider;
|
||||
this.retryAfter = retryAfter;
|
||||
}
|
||||
}
|
||||
|
||||
// Error type for service unavailable (5xx errors)
|
||||
export class ServiceUnavailableError extends Error {
|
||||
public readonly provider: string;
|
||||
public readonly statusCode: number;
|
||||
|
||||
constructor(provider: string, statusCode: number = 503, message?: string) {
|
||||
super(message || `Service unavailable for provider '${provider}'`);
|
||||
this.name = 'ServiceUnavailableError';
|
||||
this.provider = provider;
|
||||
this.statusCode = statusCode;
|
||||
}
|
||||
}
|
||||
|
||||
// Error type for authentication failures
|
||||
export class AuthenticationError extends Error {
|
||||
public readonly provider: string;
|
||||
|
||||
constructor(provider: string, message?: string) {
|
||||
super(message || `Authentication failed for provider '${provider}'`);
|
||||
this.name = 'AuthenticationError';
|
||||
this.provider = provider;
|
||||
}
|
||||
}
|
||||
|
||||
export class ModelNotFoundError extends Error {
|
||||
public readonly modelName: string;
|
||||
public readonly provider: string;
|
||||
public readonly statusCode: number;
|
||||
|
||||
constructor(modelName: string, provider: string, statusCode: number = 404) {
|
||||
super(`Model '${modelName}' not found for provider '${provider}'`);
|
||||
this.name = 'ModelNotFoundError';
|
||||
this.modelName = modelName;
|
||||
this.provider = provider;
|
||||
this.statusCode = statusCode;
|
||||
}
|
||||
}
|
||||
|
||||
export class ApiKeyMissingError extends Error {
|
||||
public readonly provider: string;
|
||||
|
||||
constructor(provider: string) {
|
||||
super(`API key is missing for provider '${provider}'`);
|
||||
this.name = 'ApiKeyMissingError';
|
||||
this.provider = provider;
|
||||
}
|
||||
}
|
||||
|
||||
export function isModelNotFoundError(error: unknown): boolean {
|
||||
if (error instanceof ModelNotFoundError) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (error instanceof Error) {
|
||||
const message = error.message.toLowerCase();
|
||||
|
||||
// OpenAI error patterns
|
||||
if (
|
||||
message.includes('model') &&
|
||||
(message.includes('not found') ||
|
||||
message.includes('does not exist') ||
|
||||
message.includes('invalid model'))
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Anthropic error patterns
|
||||
if (
|
||||
message.includes('model') &&
|
||||
(message.includes('not found') || message.includes('invalid'))
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check for 404 status in axios/fetch errors
|
||||
if (
|
||||
'status' in (error as any) &&
|
||||
(error as any).status === 404 &&
|
||||
message.includes('model')
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check for response status
|
||||
if ('response' in (error as any)) {
|
||||
const response = (error as any).response;
|
||||
if (response?.status === 404) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
export function isApiKeyError(error: unknown): boolean {
|
||||
if (error instanceof ApiKeyMissingError) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (error instanceof Error) {
|
||||
const message = error.message.toLowerCase();
|
||||
|
||||
// Common API key error patterns
|
||||
if (
|
||||
message.includes('api key') ||
|
||||
message.includes('apikey') ||
|
||||
message.includes('authentication') ||
|
||||
message.includes('unauthorized') ||
|
||||
message.includes('invalid_api_key') ||
|
||||
message.includes('incorrect api key')
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check for 401 status
|
||||
if ('response' in (error as any)) {
|
||||
const response = (error as any).response;
|
||||
if (response?.status === 401) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
export function getSuggestedModels(
|
||||
provider: string,
|
||||
failedModel: string
|
||||
): string[] {
|
||||
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
|
||||
const models = MODEL_LIST[providerKey];
|
||||
|
||||
if (!models || !Array.isArray(models)) {
|
||||
return [];
|
||||
}
|
||||
|
||||
// Return first 5 models as suggestions, excluding the failed one
|
||||
return models.filter((m) => m !== failedModel).slice(0, 5);
|
||||
}
|
||||
|
||||
export function getRecommendedModel(provider: string): string | null {
|
||||
switch (provider.toLowerCase()) {
|
||||
case OCO_AI_PROVIDER_ENUM.OPENAI:
|
||||
return 'gpt-4o-mini';
|
||||
case OCO_AI_PROVIDER_ENUM.ANTHROPIC:
|
||||
return 'claude-sonnet-4-20250514';
|
||||
case OCO_AI_PROVIDER_ENUM.GEMINI:
|
||||
return 'gemini-1.5-flash';
|
||||
case OCO_AI_PROVIDER_ENUM.GROQ:
|
||||
return 'llama3-70b-8192';
|
||||
case OCO_AI_PROVIDER_ENUM.MISTRAL:
|
||||
return 'mistral-small-latest';
|
||||
case OCO_AI_PROVIDER_ENUM.DEEPSEEK:
|
||||
return 'deepseek-chat';
|
||||
case OCO_AI_PROVIDER_ENUM.OPENROUTER:
|
||||
return 'openai/gpt-4o-mini';
|
||||
case OCO_AI_PROVIDER_ENUM.AIMLAPI:
|
||||
return 'gpt-4o-mini';
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export function formatErrorWithRecovery(
|
||||
error: Error,
|
||||
provider: string,
|
||||
model: string
|
||||
): string {
|
||||
const suggestions = getSuggestedModels(provider, model);
|
||||
const recommended = getRecommendedModel(provider);
|
||||
|
||||
let message = `\n${error.message}\n`;
|
||||
|
||||
if (suggestions.length > 0) {
|
||||
message += '\nSuggested alternatives:\n';
|
||||
suggestions.forEach((m, i) => {
|
||||
const isRecommended = m === recommended;
|
||||
message += ` ${i + 1}. ${m}${isRecommended ? ' (Recommended)' : ''}\n`;
|
||||
});
|
||||
}
|
||||
|
||||
message += '\nTo fix this, run: oco config set OCO_MODEL=<model-name>\n';
|
||||
message += 'Or run: oco setup\n';
|
||||
|
||||
return message;
|
||||
}
|
||||
|
||||
// Detect insufficient credits/quota errors from various providers
|
||||
export function isInsufficientCreditsError(error: unknown): boolean {
|
||||
if (error instanceof InsufficientCreditsError) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (error instanceof Error) {
|
||||
const message = error.message.toLowerCase();
|
||||
|
||||
// Common patterns for insufficient credits/quota
|
||||
if (
|
||||
message.includes('insufficient') ||
|
||||
message.includes('credit') ||
|
||||
message.includes('quota') ||
|
||||
message.includes('balance') ||
|
||||
message.includes('billing') ||
|
||||
message.includes('payment') ||
|
||||
message.includes('exceeded') ||
|
||||
message.includes('limit reached') ||
|
||||
message.includes('no remaining')
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check for 402 Payment Required status
|
||||
if ('status' in (error as any) && (error as any).status === 402) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if ('response' in (error as any)) {
|
||||
const response = (error as any).response;
|
||||
if (response?.status === 402) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// Detect rate limit errors (429)
|
||||
export function isRateLimitError(error: unknown): boolean {
|
||||
if (error instanceof RateLimitError) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (error instanceof Error) {
|
||||
const message = error.message.toLowerCase();
|
||||
|
||||
// Common patterns for rate limiting
|
||||
if (
|
||||
message.includes('rate limit') ||
|
||||
message.includes('rate_limit') ||
|
||||
message.includes('too many requests') ||
|
||||
message.includes('throttle')
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check for 429 status
|
||||
if ('status' in (error as any) && (error as any).status === 429) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if ('response' in (error as any)) {
|
||||
const response = (error as any).response;
|
||||
if (response?.status === 429) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// Detect service unavailable errors (5xx)
|
||||
export function isServiceUnavailableError(error: unknown): boolean {
|
||||
if (error instanceof ServiceUnavailableError) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (error instanceof Error) {
|
||||
const message = error.message.toLowerCase();
|
||||
|
||||
// Common patterns for service unavailable
|
||||
if (
|
||||
message.includes('service unavailable') ||
|
||||
message.includes('server error') ||
|
||||
message.includes('internal error') ||
|
||||
message.includes('temporarily unavailable') ||
|
||||
message.includes('overloaded')
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check for 5xx status
|
||||
const status = (error as any).status || (error as any).response?.status;
|
||||
if (status && status >= 500 && status < 600) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// User-friendly formatted error structure
|
||||
export interface FormattedError {
|
||||
title: string;
|
||||
message: string;
|
||||
helpUrl: string | null;
|
||||
suggestion: string | null;
|
||||
}
|
||||
|
||||
export interface ErrorFormattingContext {
|
||||
baseURL?: string;
|
||||
}
|
||||
|
||||
function getCustomEndpointLabel(baseURL?: string): string | null {
|
||||
if (!baseURL) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
return new URL(baseURL).host;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function getServiceUnavailableMessage(
|
||||
provider: string,
|
||||
context?: ErrorFormattingContext
|
||||
): string {
|
||||
const endpointLabel = getCustomEndpointLabel(context?.baseURL);
|
||||
|
||||
if (endpointLabel) {
|
||||
return `The configured API endpoint (${endpointLabel}) is temporarily unavailable.`;
|
||||
}
|
||||
|
||||
if (context?.baseURL) {
|
||||
return 'The configured API endpoint is temporarily unavailable.';
|
||||
}
|
||||
|
||||
return `The ${provider} service is temporarily unavailable.`;
|
||||
}
|
||||
|
||||
// Format an error into a user-friendly structure
|
||||
export function formatUserFriendlyError(
|
||||
error: unknown,
|
||||
provider: string,
|
||||
context?: ErrorFormattingContext
|
||||
): FormattedError {
|
||||
const billingUrl = PROVIDER_BILLING_URLS[provider] || null;
|
||||
|
||||
// Handle our custom error types first
|
||||
if (error instanceof InsufficientCreditsError) {
|
||||
return {
|
||||
title: 'Insufficient Credits',
|
||||
message: `Your ${provider} account has insufficient credits or quota.`,
|
||||
helpUrl: billingUrl,
|
||||
suggestion: 'Add credits to your account to continue using the service.'
|
||||
};
|
||||
}
|
||||
|
||||
if (error instanceof RateLimitError) {
|
||||
const retryMsg = error.retryAfter
|
||||
? `Please wait ${error.retryAfter} seconds before retrying.`
|
||||
: 'Please wait a moment before retrying.';
|
||||
return {
|
||||
title: 'Rate Limit Exceeded',
|
||||
message: `You've made too many requests to ${provider}.`,
|
||||
helpUrl: billingUrl,
|
||||
suggestion: retryMsg
|
||||
};
|
||||
}
|
||||
|
||||
if (error instanceof ServiceUnavailableError) {
|
||||
return {
|
||||
title: 'Service Unavailable',
|
||||
message: getServiceUnavailableMessage(provider, context),
|
||||
helpUrl: null,
|
||||
suggestion: 'Please try again in a few moments.'
|
||||
};
|
||||
}
|
||||
|
||||
if (error instanceof AuthenticationError) {
|
||||
return {
|
||||
title: 'Authentication Failed',
|
||||
message: `Your ${provider} API key is invalid or expired.`,
|
||||
helpUrl: billingUrl,
|
||||
suggestion: 'Run `oco setup` to configure a valid API key.'
|
||||
};
|
||||
}
|
||||
|
||||
if (error instanceof ModelNotFoundError) {
|
||||
return {
|
||||
title: 'Model Not Found',
|
||||
message: `The model '${error.modelName}' is not available for ${provider}.`,
|
||||
helpUrl: null,
|
||||
suggestion: 'Run `oco setup` to select a valid model.'
|
||||
};
|
||||
}
|
||||
|
||||
// Detect error type from raw errors
|
||||
if (isInsufficientCreditsError(error)) {
|
||||
return {
|
||||
title: 'Insufficient Credits',
|
||||
message: `Your ${provider} account has insufficient credits or quota.`,
|
||||
helpUrl: billingUrl,
|
||||
suggestion: 'Add credits to your account to continue using the service.'
|
||||
};
|
||||
}
|
||||
|
||||
if (isRateLimitError(error)) {
|
||||
return {
|
||||
title: 'Rate Limit Exceeded',
|
||||
message: `You've made too many requests to ${provider}.`,
|
||||
helpUrl: billingUrl,
|
||||
suggestion: 'Please wait a moment before retrying.'
|
||||
};
|
||||
}
|
||||
|
||||
if (isServiceUnavailableError(error)) {
|
||||
return {
|
||||
title: 'Service Unavailable',
|
||||
message: getServiceUnavailableMessage(provider, context),
|
||||
helpUrl: null,
|
||||
suggestion: 'Please try again in a few moments.'
|
||||
};
|
||||
}
|
||||
|
||||
if (isApiKeyError(error)) {
|
||||
return {
|
||||
title: 'Authentication Failed',
|
||||
message: `Your ${provider} API key is invalid or expired.`,
|
||||
helpUrl: billingUrl,
|
||||
suggestion: 'Run `oco setup` to configure a valid API key.'
|
||||
};
|
||||
}
|
||||
|
||||
if (isModelNotFoundError(error)) {
|
||||
const model = (error as any).modelName || (error as any).model || 'unknown';
|
||||
return {
|
||||
title: 'Model Not Found',
|
||||
message: `The model '${model}' is not available for ${provider}.`,
|
||||
helpUrl: null,
|
||||
suggestion: 'Run `oco setup` to select a valid model.'
|
||||
};
|
||||
}
|
||||
|
||||
// Default: generic error
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
return {
|
||||
title: 'Error',
|
||||
message: errorMessage,
|
||||
helpUrl: null,
|
||||
suggestion: 'Run `oco setup` to reconfigure or check your settings.'
|
||||
};
|
||||
}
|
||||
|
||||
// Print a formatted error as a chalk-styled string
|
||||
export function printFormattedError(formatted: FormattedError): string {
|
||||
let output = `\n${chalk.red('✖')} ${chalk.bold.red(formatted.title)}\n`;
|
||||
output += ` ${formatted.message}\n`;
|
||||
|
||||
if (formatted.helpUrl) {
|
||||
output += `\n ${chalk.cyan('Help:')} ${chalk.underline(
|
||||
formatted.helpUrl
|
||||
)}\n`;
|
||||
}
|
||||
|
||||
if (formatted.suggestion) {
|
||||
output += `\n ${chalk.yellow('Suggestion:')} ${formatted.suggestion}\n`;
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
8
src/utils/generateCommitMessageErrors.ts
Normal file
8
src/utils/generateCommitMessageErrors.ts
Normal file
@@ -0,0 +1,8 @@
|
||||
import { DEFAULT_TOKEN_LIMITS } from '../commands/config';
|
||||
|
||||
export enum GenerateCommitMessageErrorEnum {
|
||||
tooMuchTokens = 'TOO_MUCH_TOKENS',
|
||||
internalError = 'INTERNAL_ERROR',
|
||||
emptyMessage = 'EMPTY_MESSAGE',
|
||||
outputTokensTooHigh = `Token limit exceeded, OCO_TOKENS_MAX_OUTPUT must not be much higher than the default ${DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT} tokens.`
|
||||
}
|
||||
@@ -93,11 +93,7 @@ export const gitAdd = async ({ files }: { files: string[] }) => {
|
||||
gitAddSpinner.stop(`Staged ${files.length} files`);
|
||||
};
|
||||
|
||||
export const getDiff = async ({ files }: { files: string[] }) => {
|
||||
const gitDir = await getGitDir();
|
||||
|
||||
const lockFiles = files.filter(
|
||||
(file) =>
|
||||
const isFileExcludedFromDiff = (file: string) =>
|
||||
file.includes('.lock') ||
|
||||
file.includes('-lock.') ||
|
||||
file.includes('.svg') ||
|
||||
@@ -105,24 +101,26 @@ export const getDiff = async ({ files }: { files: string[] }) => {
|
||||
file.includes('.jpg') ||
|
||||
file.includes('.jpeg') ||
|
||||
file.includes('.webp') ||
|
||||
file.includes('.gif')
|
||||
);
|
||||
file.includes('.gif');
|
||||
|
||||
if (lockFiles.length) {
|
||||
export const getDiff = async ({ files }: { files: string[] }) => {
|
||||
const gitDir = await getGitDir();
|
||||
|
||||
const excludedFiles = files.filter(isFileExcludedFromDiff);
|
||||
|
||||
if (excludedFiles.length) {
|
||||
outro(
|
||||
`Some files are excluded by default from 'git diff'. No commit messages are generated for this files:\n${lockFiles.join(
|
||||
`Some files are excluded by default from 'git diff'. No commit messages are generated for this files:\n${excludedFiles.join(
|
||||
'\n'
|
||||
)}`
|
||||
);
|
||||
}
|
||||
|
||||
const filesWithoutLocks = files.filter(
|
||||
(file) => !file.includes('.lock') && !file.includes('-lock.')
|
||||
);
|
||||
const diffableFiles = files.filter((file) => !isFileExcludedFromDiff(file));
|
||||
|
||||
const { stdout: diff } = await execa(
|
||||
'git',
|
||||
['diff', '--staged', '--', ...filesWithoutLocks],
|
||||
['diff', '--staged', '--', ...diffableFiles],
|
||||
{ cwd: gitDir }
|
||||
);
|
||||
|
||||
|
||||
330
src/utils/modelCache.ts
Normal file
330
src/utils/modelCache.ts
Normal file
@@ -0,0 +1,330 @@
|
||||
import { existsSync, readFileSync, writeFileSync } from 'fs';
|
||||
import { homedir } from 'os';
|
||||
import { join as pathJoin } from 'path';
|
||||
import { MODEL_LIST, OCO_AI_PROVIDER_ENUM } from '../commands/config';
|
||||
|
||||
const MODEL_CACHE_PATH = pathJoin(homedir(), '.opencommit-models.json');
|
||||
const CACHE_TTL_MS = 7 * 24 * 60 * 60 * 1000; // 7 days
|
||||
|
||||
interface ModelCache {
|
||||
timestamp: number;
|
||||
models: Record<string, string[]>;
|
||||
}
|
||||
|
||||
function readCache(): ModelCache | null {
|
||||
try {
|
||||
if (!existsSync(MODEL_CACHE_PATH)) {
|
||||
return null;
|
||||
}
|
||||
const data = readFileSync(MODEL_CACHE_PATH, 'utf8');
|
||||
return JSON.parse(data);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function writeCache(models: Record<string, string[]>): void {
|
||||
try {
|
||||
const cache: ModelCache = {
|
||||
timestamp: Date.now(),
|
||||
models
|
||||
};
|
||||
writeFileSync(MODEL_CACHE_PATH, JSON.stringify(cache, null, 2), 'utf8');
|
||||
} catch {
|
||||
// Silently fail if we can't write cache
|
||||
}
|
||||
}
|
||||
|
||||
function isCacheValid(cache: ModelCache | null): boolean {
|
||||
if (!cache) return false;
|
||||
return Date.now() - cache.timestamp < CACHE_TTL_MS;
|
||||
}
|
||||
|
||||
export async function fetchOpenAIModels(apiKey: string): Promise<string[]> {
|
||||
try {
|
||||
const response = await fetch('https://api.openai.com/v1/models', {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`
|
||||
}
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
return MODEL_LIST.openai;
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const models = data.data
|
||||
.map((m: { id: string }) => m.id)
|
||||
.filter(
|
||||
(id: string) =>
|
||||
id.startsWith('gpt-') ||
|
||||
id.startsWith('o1') ||
|
||||
id.startsWith('o3') ||
|
||||
id.startsWith('o4')
|
||||
)
|
||||
.sort();
|
||||
|
||||
return models.length > 0 ? models : MODEL_LIST.openai;
|
||||
} catch {
|
||||
return MODEL_LIST.openai;
|
||||
}
|
||||
}
|
||||
|
||||
export async function fetchOllamaModels(
|
||||
baseUrl: string = 'http://localhost:11434'
|
||||
): Promise<string[]> {
|
||||
try {
|
||||
const response = await fetch(`${baseUrl}/api/tags`);
|
||||
|
||||
if (!response.ok) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
return data.models?.map((m: { name: string }) => m.name) || [];
|
||||
} catch {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
export async function fetchAnthropicModels(apiKey: string): Promise<string[]> {
|
||||
try {
|
||||
const response = await fetch('https://api.anthropic.com/v1/models', {
|
||||
headers: {
|
||||
'x-api-key': apiKey,
|
||||
'anthropic-version': '2023-06-01'
|
||||
}
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
return MODEL_LIST.anthropic;
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const models = data.data
|
||||
?.map((m: { id: string }) => m.id)
|
||||
.filter((id: string) => id.startsWith('claude-'))
|
||||
.sort();
|
||||
|
||||
return models && models.length > 0 ? models : MODEL_LIST.anthropic;
|
||||
} catch {
|
||||
return MODEL_LIST.anthropic;
|
||||
}
|
||||
}
|
||||
|
||||
export async function fetchMistralModels(apiKey: string): Promise<string[]> {
|
||||
try {
|
||||
const response = await fetch('https://api.mistral.ai/v1/models', {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`
|
||||
}
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
return MODEL_LIST.mistral;
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const models = data.data?.map((m: { id: string }) => m.id).sort();
|
||||
|
||||
return models && models.length > 0 ? models : MODEL_LIST.mistral;
|
||||
} catch {
|
||||
return MODEL_LIST.mistral;
|
||||
}
|
||||
}
|
||||
|
||||
export async function fetchGroqModels(apiKey: string): Promise<string[]> {
|
||||
try {
|
||||
const response = await fetch('https://api.groq.com/openai/v1/models', {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`
|
||||
}
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
return MODEL_LIST.groq;
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const models = data.data?.map((m: { id: string }) => m.id).sort();
|
||||
|
||||
return models && models.length > 0 ? models : MODEL_LIST.groq;
|
||||
} catch {
|
||||
return MODEL_LIST.groq;
|
||||
}
|
||||
}
|
||||
|
||||
export async function fetchOpenRouterModels(apiKey: string): Promise<string[]> {
|
||||
try {
|
||||
const response = await fetch('https://openrouter.ai/api/v1/models', {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`
|
||||
}
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
return MODEL_LIST.openrouter;
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
// Filter to text-capable models only (exclude image/audio models)
|
||||
const models = data.data
|
||||
?.filter(
|
||||
(m: { id: string; context_length?: number }) =>
|
||||
m.context_length && m.context_length > 0
|
||||
)
|
||||
.map((m: { id: string }) => m.id)
|
||||
.sort();
|
||||
|
||||
return models && models.length > 0 ? models : MODEL_LIST.openrouter;
|
||||
} catch {
|
||||
return MODEL_LIST.openrouter;
|
||||
}
|
||||
}
|
||||
|
||||
export async function fetchDeepSeekModels(apiKey: string): Promise<string[]> {
|
||||
try {
|
||||
const response = await fetch('https://api.deepseek.com/v1/models', {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`
|
||||
}
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
return MODEL_LIST.deepseek;
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const models = data.data?.map((m: { id: string }) => m.id).sort();
|
||||
|
||||
return models && models.length > 0 ? models : MODEL_LIST.deepseek;
|
||||
} catch {
|
||||
return MODEL_LIST.deepseek;
|
||||
}
|
||||
}
|
||||
|
||||
export async function fetchModelsForProvider(
|
||||
provider: string,
|
||||
apiKey?: string,
|
||||
baseUrl?: string,
|
||||
forceRefresh: boolean = false
|
||||
): Promise<string[]> {
|
||||
const cache = readCache();
|
||||
|
||||
// Return cached models if valid (unless force refresh)
|
||||
if (!forceRefresh && isCacheValid(cache) && cache!.models[provider]) {
|
||||
return cache!.models[provider];
|
||||
}
|
||||
|
||||
let models: string[] = [];
|
||||
|
||||
switch (provider.toLowerCase()) {
|
||||
case OCO_AI_PROVIDER_ENUM.OPENAI:
|
||||
if (apiKey) {
|
||||
models = await fetchOpenAIModels(apiKey);
|
||||
} else {
|
||||
models = MODEL_LIST.openai;
|
||||
}
|
||||
break;
|
||||
|
||||
case OCO_AI_PROVIDER_ENUM.OLLAMA:
|
||||
models = await fetchOllamaModels(baseUrl);
|
||||
break;
|
||||
|
||||
case OCO_AI_PROVIDER_ENUM.ANTHROPIC:
|
||||
if (apiKey) {
|
||||
models = await fetchAnthropicModels(apiKey);
|
||||
} else {
|
||||
models = MODEL_LIST.anthropic;
|
||||
}
|
||||
break;
|
||||
|
||||
case OCO_AI_PROVIDER_ENUM.GEMINI:
|
||||
// Google's API doesn't easily list generative models, use hardcoded list
|
||||
models = MODEL_LIST.gemini;
|
||||
break;
|
||||
|
||||
case OCO_AI_PROVIDER_ENUM.GROQ:
|
||||
if (apiKey) {
|
||||
models = await fetchGroqModels(apiKey);
|
||||
} else {
|
||||
models = MODEL_LIST.groq;
|
||||
}
|
||||
break;
|
||||
|
||||
case OCO_AI_PROVIDER_ENUM.MISTRAL:
|
||||
if (apiKey) {
|
||||
models = await fetchMistralModels(apiKey);
|
||||
} else {
|
||||
models = MODEL_LIST.mistral;
|
||||
}
|
||||
break;
|
||||
|
||||
case OCO_AI_PROVIDER_ENUM.DEEPSEEK:
|
||||
if (apiKey) {
|
||||
models = await fetchDeepSeekModels(apiKey);
|
||||
} else {
|
||||
models = MODEL_LIST.deepseek;
|
||||
}
|
||||
break;
|
||||
|
||||
case OCO_AI_PROVIDER_ENUM.AIMLAPI:
|
||||
models = MODEL_LIST.aimlapi;
|
||||
break;
|
||||
|
||||
case OCO_AI_PROVIDER_ENUM.OPENROUTER:
|
||||
if (apiKey) {
|
||||
models = await fetchOpenRouterModels(apiKey);
|
||||
} else {
|
||||
models = MODEL_LIST.openrouter;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
models = MODEL_LIST.openai;
|
||||
}
|
||||
|
||||
// Update cache
|
||||
const existingCache = cache?.models || {};
|
||||
existingCache[provider] = models;
|
||||
writeCache(existingCache);
|
||||
|
||||
return models;
|
||||
}
|
||||
|
||||
export function getModelsForProvider(provider: string): string[] {
|
||||
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
|
||||
return MODEL_LIST[providerKey] || MODEL_LIST.openai;
|
||||
}
|
||||
|
||||
export function clearModelCache(): void {
|
||||
try {
|
||||
if (existsSync(MODEL_CACHE_PATH)) {
|
||||
writeFileSync(MODEL_CACHE_PATH, '{}', 'utf8');
|
||||
}
|
||||
} catch {
|
||||
// Silently fail
|
||||
}
|
||||
}
|
||||
|
||||
export function getCacheInfo(): {
|
||||
timestamp: number | null;
|
||||
providers: string[];
|
||||
} {
|
||||
const cache = readCache();
|
||||
if (!cache) {
|
||||
return { timestamp: null, providers: [] };
|
||||
}
|
||||
return {
|
||||
timestamp: cache.timestamp,
|
||||
providers: Object.keys(cache.models || {})
|
||||
};
|
||||
}
|
||||
|
||||
export function getCachedModels(provider: string): string[] | null {
|
||||
const cache = readCache();
|
||||
if (!cache || !cache.models[provider]) {
|
||||
return null;
|
||||
}
|
||||
return cache.models[provider];
|
||||
}
|
||||
52
src/utils/proxy.ts
Normal file
52
src/utils/proxy.ts
Normal file
@@ -0,0 +1,52 @@
|
||||
import axios from 'axios';
|
||||
import { HttpsProxyAgent } from 'https-proxy-agent';
|
||||
import { Agent, ProxyAgent, setGlobalDispatcher } from 'undici';
|
||||
|
||||
export type ProxySetting = string | null | undefined;
|
||||
|
||||
export function resolveProxy(proxySetting?: ProxySetting): ProxySetting {
|
||||
if (proxySetting === null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (typeof proxySetting === 'string' && proxySetting.trim().length > 0) {
|
||||
return proxySetting;
|
||||
}
|
||||
|
||||
return process.env.HTTPS_PROXY || process.env.HTTP_PROXY;
|
||||
}
|
||||
|
||||
function resetProxySetup(disableEnvProxy: boolean) {
|
||||
setGlobalDispatcher(new Agent());
|
||||
axios.defaults.httpAgent = undefined;
|
||||
axios.defaults.httpsAgent = undefined;
|
||||
axios.defaults.proxy = disableEnvProxy ? false : undefined;
|
||||
}
|
||||
|
||||
export function setupProxy(proxySetting?: ProxySetting) {
|
||||
try {
|
||||
if (proxySetting === null) {
|
||||
resetProxySetup(true);
|
||||
return;
|
||||
}
|
||||
|
||||
resetProxySetup(false);
|
||||
|
||||
if (!proxySetting) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Set global dispatcher for undici (affects globalThis.fetch used by Gemini and others)
|
||||
const dispatcher = new ProxyAgent(proxySetting);
|
||||
setGlobalDispatcher(dispatcher);
|
||||
|
||||
// Set axios global agents and disable axios built-in proxy handling.
|
||||
const agent = new HttpsProxyAgent(proxySetting);
|
||||
axios.defaults.httpAgent = agent;
|
||||
axios.defaults.httpsAgent = agent;
|
||||
axios.defaults.proxy = false;
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error);
|
||||
console.warn(`[Proxy Error] Failed to set proxy: ${message}`);
|
||||
}
|
||||
}
|
||||
757
test/e2e/cliBehavior.test.ts
Normal file
757
test/e2e/cliBehavior.test.ts
Normal file
@@ -0,0 +1,757 @@
|
||||
import {
|
||||
existsSync,
|
||||
lstatSync,
|
||||
readFileSync,
|
||||
realpathSync,
|
||||
rmSync,
|
||||
writeFileSync
|
||||
} from 'fs';
|
||||
import { resolve } from 'path';
|
||||
import 'cli-testing-library/extend-expect';
|
||||
import {
|
||||
assertGitStatus,
|
||||
assertHeadCommit,
|
||||
getHeadCommitFiles,
|
||||
getMockOpenAiEnv,
|
||||
prepareEnvironment,
|
||||
prepareRepo,
|
||||
prepareTempDir,
|
||||
runCli,
|
||||
runGit,
|
||||
runProcess,
|
||||
seedMigrations,
|
||||
seedModelCache,
|
||||
startMockOpenAiServer,
|
||||
waitForExit,
|
||||
writeGlobalConfig,
|
||||
writeRepoFile
|
||||
} from './utils';
|
||||
|
||||
it('cli flow passes --context through to the model prompt and skips confirmation with --yes', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment();
|
||||
const server = await startMockOpenAiServer(
|
||||
'fix(context): handle production incident'
|
||||
);
|
||||
|
||||
try {
|
||||
await prepareRepo(
|
||||
gitDir,
|
||||
{
|
||||
'index.ts': 'console.log("Hello World");\n'
|
||||
},
|
||||
{ stage: true }
|
||||
);
|
||||
|
||||
const oco = await runCli(['--yes', '--context=production-incident'], {
|
||||
cwd: gitDir,
|
||||
env: getMockOpenAiEnv(server.baseUrl)
|
||||
});
|
||||
|
||||
expect(
|
||||
await oco.queryByText('Confirm the commit message?')
|
||||
).not.toBeInTheConsole();
|
||||
expect(
|
||||
await oco.queryByText('Do you want to run `git push`?')
|
||||
).not.toBeInTheConsole();
|
||||
expect(await waitForExit(oco)).toBe(0);
|
||||
await assertHeadCommit(gitDir, 'fix(context): handle production incident');
|
||||
|
||||
const requestPayload = server.requestBodies[
|
||||
server.requestBodies.length - 1
|
||||
] as { messages: Array<{ content: string }> };
|
||||
const requestContents = requestPayload.messages
|
||||
.map((message) => message.content)
|
||||
.join('\n');
|
||||
|
||||
expect(requestContents).toContain('<context>production-incident</context>');
|
||||
expect(requestContents).toContain('console.log("Hello World");');
|
||||
expect(server.authHeaders).toContain('Bearer test-openai-key');
|
||||
} finally {
|
||||
await server.cleanup();
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
it('cli flow passes --fgm through to the full GitMoji prompt', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment();
|
||||
const server = await startMockOpenAiServer(
|
||||
'feat(fgm): use the extended gitmoji specification'
|
||||
);
|
||||
|
||||
try {
|
||||
await prepareRepo(
|
||||
gitDir,
|
||||
{
|
||||
'index.ts': 'console.log("Hello World");\n'
|
||||
},
|
||||
{ stage: true }
|
||||
);
|
||||
|
||||
const oco = await runCli(['--fgm', '--yes'], {
|
||||
cwd: gitDir,
|
||||
env: getMockOpenAiEnv(server.baseUrl)
|
||||
});
|
||||
|
||||
expect(await waitForExit(oco)).toBe(0);
|
||||
await assertHeadCommit(
|
||||
gitDir,
|
||||
'feat(fgm): use the extended gitmoji specification'
|
||||
);
|
||||
|
||||
const requestPayload = server.requestBodies[
|
||||
server.requestBodies.length - 1
|
||||
] as { messages: Array<{ content: string }> };
|
||||
const requestContents = requestPayload.messages
|
||||
.map((message) => message.content)
|
||||
.join('\n');
|
||||
|
||||
expect(requestContents).toContain(
|
||||
'🎨, Improve structure / format of the code;'
|
||||
);
|
||||
expect(requestContents).toContain('GitMoji specification');
|
||||
} finally {
|
||||
await server.cleanup();
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
it('cli flow allows editing the generated commit message before committing', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment();
|
||||
const server = await startMockOpenAiServer(
|
||||
'fix(cli): allow editing the generated message'
|
||||
);
|
||||
|
||||
try {
|
||||
await prepareRepo(
|
||||
gitDir,
|
||||
{
|
||||
'index.ts': 'console.log("Hello World");\n'
|
||||
},
|
||||
{ stage: true }
|
||||
);
|
||||
|
||||
const oco = await runCli([], {
|
||||
cwd: gitDir,
|
||||
env: getMockOpenAiEnv(server.baseUrl)
|
||||
});
|
||||
|
||||
expect(
|
||||
await oco.findByText('Confirm the commit message?')
|
||||
).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('[ArrowDown][ArrowDown][Enter]');
|
||||
|
||||
expect(
|
||||
await oco.findByText(
|
||||
'Please edit the commit message: (press Enter to continue)'
|
||||
)
|
||||
).toBeInTheConsole();
|
||||
oco.userEvent.keyboard(' before commit[Enter]');
|
||||
|
||||
expect(await oco.findByText('Successfully committed')).toBeInTheConsole();
|
||||
expect(await waitForExit(oco)).toBe(0);
|
||||
await assertHeadCommit(
|
||||
gitDir,
|
||||
'fix(cli): allow editing the generated message before commit'
|
||||
);
|
||||
} finally {
|
||||
await server.cleanup();
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
it('cli flow regenerates the message when the user rejects the first suggestion', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment();
|
||||
const server = await startMockOpenAiServer(({ requestIndex }) => ({
|
||||
body: {
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
content:
|
||||
requestIndex === 0
|
||||
? 'fix(cli): first generated message'
|
||||
: 'fix(cli): regenerated message after retry'
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}));
|
||||
|
||||
try {
|
||||
await prepareRepo(
|
||||
gitDir,
|
||||
{
|
||||
'index.ts': 'console.log("Hello World");\n'
|
||||
},
|
||||
{ stage: true }
|
||||
);
|
||||
|
||||
const oco = await runCli([], {
|
||||
cwd: gitDir,
|
||||
env: getMockOpenAiEnv(server.baseUrl)
|
||||
});
|
||||
|
||||
expect(
|
||||
await oco.findByText('Confirm the commit message?')
|
||||
).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('[ArrowDown][Enter]');
|
||||
|
||||
expect(
|
||||
await oco.findByText('Do you want to regenerate the message?')
|
||||
).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('[Enter]');
|
||||
|
||||
oco.clear();
|
||||
expect(
|
||||
await oco.findByText('fix(cli): regenerated message after retry')
|
||||
).toBeInTheConsole();
|
||||
expect(
|
||||
await oco.findByText('Confirm the commit message?')
|
||||
).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(await oco.findByText('Successfully committed')).toBeInTheConsole();
|
||||
expect(await waitForExit(oco)).toBe(0);
|
||||
await assertHeadCommit(gitDir, 'fix(cli): regenerated message after retry');
|
||||
expect(server.requestBodies).toHaveLength(2);
|
||||
} finally {
|
||||
await server.cleanup();
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
it('cli flow lets the user select only specific unstaged files', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment();
|
||||
const server = await startMockOpenAiServer(
|
||||
'fix(cli): commit only the selected files'
|
||||
);
|
||||
|
||||
try {
|
||||
await prepareRepo(gitDir, {
|
||||
'alpha.ts': 'console.log("alpha");\n',
|
||||
'beta.ts': 'console.log("beta");\n'
|
||||
});
|
||||
|
||||
const oco = await runCli([], {
|
||||
cwd: gitDir,
|
||||
env: getMockOpenAiEnv(server.baseUrl)
|
||||
});
|
||||
|
||||
expect(await oco.findByText('No files are staged')).toBeInTheConsole();
|
||||
expect(
|
||||
await oco.findByText(
|
||||
'Do you want to stage all files and generate commit message?'
|
||||
)
|
||||
).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('[ArrowDown][Enter]');
|
||||
|
||||
expect(
|
||||
await oco.findByText('Select the files you want to add to the commit:')
|
||||
).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('[Space][Enter]');
|
||||
|
||||
expect(
|
||||
await oco.findByText('Confirm the commit message?')
|
||||
).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(await oco.findByText('Successfully committed')).toBeInTheConsole();
|
||||
expect(await waitForExit(oco)).toBe(0);
|
||||
expect(await getHeadCommitFiles(gitDir)).toEqual(['alpha.ts']);
|
||||
await assertGitStatus(gitDir, '?? beta.ts');
|
||||
} finally {
|
||||
await server.cleanup();
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
it('cli applies the documented message template placeholder from extra args', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment();
|
||||
const server = await startMockOpenAiServer(
|
||||
'feat(template): keep generated subject'
|
||||
);
|
||||
|
||||
try {
|
||||
await prepareRepo(
|
||||
gitDir,
|
||||
{
|
||||
'index.ts': 'console.log("Hello World");\n'
|
||||
},
|
||||
{ stage: true }
|
||||
);
|
||||
|
||||
const oco = await runCli(["'$msg #205'"], {
|
||||
cwd: gitDir,
|
||||
env: getMockOpenAiEnv(server.baseUrl)
|
||||
});
|
||||
|
||||
expect(
|
||||
await oco.findByText('Confirm the commit message?')
|
||||
).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(await oco.findByText('Successfully committed')).toBeInTheConsole();
|
||||
expect(await waitForExit(oco)).toBe(0);
|
||||
await assertHeadCommit(
|
||||
gitDir,
|
||||
'feat(template): keep generated subject #205'
|
||||
);
|
||||
} finally {
|
||||
await server.cleanup();
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
it('hook command sets and unsets the prepare-commit-msg symlink', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment();
|
||||
const hookPath = resolve(gitDir, '.git/hooks/prepare-commit-msg');
|
||||
const cliPath = resolve('./out/cli.cjs');
|
||||
|
||||
try {
|
||||
const setHook = await runCli(['hook', 'set'], {
|
||||
cwd: gitDir
|
||||
});
|
||||
|
||||
expect(await setHook.findByText('Hook set')).toBeInTheConsole();
|
||||
expect(await waitForExit(setHook)).toBe(0);
|
||||
expect(existsSync(hookPath)).toBe(true);
|
||||
expect(lstatSync(hookPath).isSymbolicLink()).toBe(true);
|
||||
expect(realpathSync(hookPath)).toBe(cliPath);
|
||||
|
||||
const unsetHook = await runCli(['hook', 'unset'], {
|
||||
cwd: gitDir
|
||||
});
|
||||
|
||||
expect(await unsetHook.findByText('Hook is removed')).toBeInTheConsole();
|
||||
expect(await waitForExit(unsetHook)).toBe(0);
|
||||
expect(existsSync(hookPath)).toBe(false);
|
||||
} finally {
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
it('prepare-commit-msg hook writes the generated message into the commit message file', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment();
|
||||
const server = await startMockOpenAiServer(
|
||||
'fix(hook): populate the commit message file'
|
||||
);
|
||||
const hookPath = resolve(gitDir, '.git/hooks/prepare-commit-msg');
|
||||
const messageFile = resolve(gitDir, '.git/COMMIT_EDITMSG');
|
||||
|
||||
try {
|
||||
await prepareRepo(
|
||||
gitDir,
|
||||
{
|
||||
'index.ts': 'console.log("Hello World");\n'
|
||||
},
|
||||
{ stage: true }
|
||||
);
|
||||
|
||||
const setHook = await runCli(['hook', 'set'], {
|
||||
cwd: gitDir
|
||||
});
|
||||
expect(await setHook.findByText('Hook set')).toBeInTheConsole();
|
||||
expect(await waitForExit(setHook)).toBe(0);
|
||||
|
||||
writeFileSync(messageFile, '# existing\n');
|
||||
|
||||
const hookRun = await runProcess(hookPath, [messageFile], {
|
||||
cwd: gitDir,
|
||||
env: getMockOpenAiEnv(server.baseUrl)
|
||||
});
|
||||
|
||||
expect(await hookRun.findByText('Done')).toBeInTheConsole();
|
||||
expect(await waitForExit(hookRun)).toBe(0);
|
||||
|
||||
const commitMessage = readFileSync(messageFile, 'utf8');
|
||||
expect(commitMessage).toContain(
|
||||
'# fix(hook): populate the commit message file'
|
||||
);
|
||||
expect(commitMessage).toContain('# ---------- [OpenCommit] ---------- #');
|
||||
expect(commitMessage).toContain('# existing');
|
||||
} finally {
|
||||
await server.cleanup();
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
it('cli flow prompts for a missing API key, saves it, and completes the commit', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment();
|
||||
const homeDir = await prepareTempDir();
|
||||
const server = await startMockOpenAiServer(
|
||||
'fix(api): recovered after prompt'
|
||||
);
|
||||
|
||||
try {
|
||||
const configPath = writeGlobalConfig(homeDir, [
|
||||
'OCO_AI_PROVIDER=openai',
|
||||
'OCO_MODEL=gpt-4o-mini',
|
||||
`OCO_API_URL=${server.baseUrl}`,
|
||||
'OCO_GITPUSH=false'
|
||||
]);
|
||||
seedMigrations(homeDir);
|
||||
|
||||
await prepareRepo(
|
||||
gitDir,
|
||||
{
|
||||
'index.ts': 'console.log("Hello World");\n'
|
||||
},
|
||||
{ stage: true }
|
||||
);
|
||||
|
||||
const oco = await runCli([], {
|
||||
cwd: gitDir,
|
||||
env: {
|
||||
HOME: homeDir
|
||||
}
|
||||
});
|
||||
|
||||
expect(
|
||||
await oco.findByText("API key missing for openai. Let's set it up.")
|
||||
).toBeInTheConsole();
|
||||
expect(await oco.findByText('Enter your API key:')).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('test-openai-key[Enter]');
|
||||
|
||||
expect(await oco.findByText('API key saved')).toBeInTheConsole();
|
||||
expect(
|
||||
await oco.findByText('Confirm the commit message?')
|
||||
).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(await oco.findByText('Successfully committed')).toBeInTheConsole();
|
||||
expect(await waitForExit(oco)).toBe(0);
|
||||
await assertHeadCommit(gitDir, 'fix(api): recovered after prompt');
|
||||
expect(server.authHeaders).toContain('Bearer test-openai-key');
|
||||
expect(readFileSync(configPath, 'utf8')).toContain(
|
||||
'OCO_API_KEY=test-openai-key'
|
||||
);
|
||||
} finally {
|
||||
await server.cleanup();
|
||||
rmSync(homeDir, { force: true, recursive: true });
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
it('cli ignores files listed in .opencommitignore when they are the only staged changes', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment();
|
||||
|
||||
try {
|
||||
await prepareRepo(
|
||||
gitDir,
|
||||
{
|
||||
'.opencommitignore': 'ignored.ts\n'
|
||||
},
|
||||
{
|
||||
stage: true,
|
||||
commitMessage: 'add opencommit ignore'
|
||||
}
|
||||
);
|
||||
|
||||
writeRepoFile(gitDir, 'ignored.ts', 'console.log("ignored");\n');
|
||||
await runGit(['add', 'ignored.ts'], gitDir);
|
||||
|
||||
const oco = await runCli([], {
|
||||
cwd: gitDir,
|
||||
env: {
|
||||
OCO_AI_PROVIDER: 'openai',
|
||||
OCO_API_KEY: 'dummy-openai-key',
|
||||
OCO_GITPUSH: 'false'
|
||||
}
|
||||
});
|
||||
|
||||
expect(await oco.findByText('No changes detected')).toBeInTheConsole();
|
||||
expect(await waitForExit(oco)).toBe(1);
|
||||
await assertHeadCommit(gitDir, 'add opencommit ignore');
|
||||
} finally {
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
it('cli excludes .opencommitignore files from the generated prompt while still committing staged changes', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment();
|
||||
const server = await startMockOpenAiServer(
|
||||
'fix(ignore): keep only relevant diff context'
|
||||
);
|
||||
|
||||
try {
|
||||
await prepareRepo(
|
||||
gitDir,
|
||||
{
|
||||
'.opencommitignore': 'ignored.ts\n'
|
||||
},
|
||||
{
|
||||
stage: true,
|
||||
commitMessage: 'add opencommit ignore'
|
||||
}
|
||||
);
|
||||
|
||||
writeRepoFile(gitDir, 'kept.ts', 'console.log("kept");\n');
|
||||
writeRepoFile(gitDir, 'ignored.ts', 'console.log("ignored");\n');
|
||||
await runGit(['add', 'kept.ts', 'ignored.ts'], gitDir);
|
||||
|
||||
const oco = await runCli([], {
|
||||
cwd: gitDir,
|
||||
env: getMockOpenAiEnv(server.baseUrl)
|
||||
});
|
||||
|
||||
expect(
|
||||
await oco.findByText('Confirm the commit message?')
|
||||
).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(await oco.findByText('Successfully committed')).toBeInTheConsole();
|
||||
expect(await waitForExit(oco)).toBe(0);
|
||||
|
||||
const requestPayload = server.requestBodies[
|
||||
server.requestBodies.length - 1
|
||||
] as { messages: Array<{ content: string }> };
|
||||
const requestContents = requestPayload.messages
|
||||
.map((message) => message.content)
|
||||
.join('\n');
|
||||
|
||||
expect(requestContents).toContain('kept.ts');
|
||||
expect(requestContents).toContain('console.log("kept");');
|
||||
expect(requestContents).not.toContain('ignored.ts');
|
||||
expect(requestContents).not.toContain('console.log("ignored");');
|
||||
expect(await getHeadCommitFiles(gitDir)).toEqual(['ignored.ts', 'kept.ts']);
|
||||
} finally {
|
||||
await server.cleanup();
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
it('first run launches setup, saves config, and completes a commit with the configured provider', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment();
|
||||
const homeDir = await prepareTempDir();
|
||||
const server = await startMockOpenAiServer(
|
||||
'feat(setup): finish first run successfully'
|
||||
);
|
||||
|
||||
try {
|
||||
const configPath = resolve(homeDir, '.opencommit');
|
||||
|
||||
await seedModelCache(homeDir, {
|
||||
openai: ['gpt-4o-mini', 'gpt-4o']
|
||||
});
|
||||
seedMigrations(homeDir);
|
||||
|
||||
await prepareRepo(
|
||||
gitDir,
|
||||
{
|
||||
'index.ts': 'console.log("Hello World");\n'
|
||||
},
|
||||
{ stage: true }
|
||||
);
|
||||
|
||||
const oco = await runCli([], {
|
||||
cwd: gitDir,
|
||||
env: {
|
||||
HOME: homeDir,
|
||||
OCO_API_URL: server.baseUrl,
|
||||
OCO_GITPUSH: 'false'
|
||||
}
|
||||
});
|
||||
|
||||
expect(await oco.findByText('Select your AI provider:')).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(await oco.findByText('Enter your API key:')).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('first-run-openai-key[Enter]');
|
||||
|
||||
expect(await oco.findByText('Select a model:')).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(
|
||||
await oco.findByText('Configuration saved to ~/.opencommit')
|
||||
).toBeInTheConsole();
|
||||
expect(
|
||||
await oco.findByText('Confirm the commit message?')
|
||||
).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(await oco.findByText('Successfully committed')).toBeInTheConsole();
|
||||
expect(await waitForExit(oco)).toBe(0);
|
||||
await assertHeadCommit(
|
||||
gitDir,
|
||||
'feat(setup): finish first run successfully'
|
||||
);
|
||||
expect(readFileSync(configPath, 'utf8')).toContain(
|
||||
'OCO_AI_PROVIDER=openai'
|
||||
);
|
||||
expect(readFileSync(configPath, 'utf8')).toContain(
|
||||
'OCO_API_KEY=first-run-openai-key'
|
||||
);
|
||||
expect(readFileSync(configPath, 'utf8')).toContain('OCO_MODEL=gpt-4o-mini');
|
||||
expect(server.authHeaders).toContain('Bearer first-run-openai-key');
|
||||
} finally {
|
||||
await server.cleanup();
|
||||
rmSync(homeDir, { force: true, recursive: true });
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
it('cli recovers from a missing model by prompting for an alternative and retrying', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment();
|
||||
const homeDir = await prepareTempDir();
|
||||
const server = await startMockOpenAiServer(({ requestIndex, body }) => {
|
||||
if (requestIndex === 0) {
|
||||
return {
|
||||
status: 404,
|
||||
body: {
|
||||
error: {
|
||||
message: `The model '${body?.model}' does not exist`,
|
||||
type: 'invalid_request_error',
|
||||
code: 'model_not_found'
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
body: {
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
content: 'fix(model): recover from invalid default model'
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
try {
|
||||
const configPath = writeGlobalConfig(homeDir, [
|
||||
'OCO_AI_PROVIDER=openai',
|
||||
'OCO_API_KEY=test-openai-key',
|
||||
'OCO_MODEL=missing-model',
|
||||
`OCO_API_URL=${server.baseUrl}`,
|
||||
'OCO_GITPUSH=false'
|
||||
]);
|
||||
seedMigrations(homeDir);
|
||||
|
||||
await prepareRepo(
|
||||
gitDir,
|
||||
{
|
||||
'index.ts': 'console.log("Hello World");\n'
|
||||
},
|
||||
{ stage: true }
|
||||
);
|
||||
|
||||
const oco = await runCli([], {
|
||||
cwd: gitDir,
|
||||
env: {
|
||||
HOME: homeDir
|
||||
}
|
||||
});
|
||||
|
||||
expect(
|
||||
await oco.findByText("Model 'missing-model' not found")
|
||||
).toBeInTheConsole();
|
||||
expect(
|
||||
await oco.findByText('Select an alternative model:')
|
||||
).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(await oco.findByText('Save as default model?')).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(await oco.findByText('Model saved as default')).toBeInTheConsole();
|
||||
expect(
|
||||
await oco.findByText('Confirm the commit message?')
|
||||
).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(await oco.findByText('Successfully committed')).toBeInTheConsole();
|
||||
expect(await waitForExit(oco)).toBe(0);
|
||||
await assertHeadCommit(
|
||||
gitDir,
|
||||
'fix(model): recover from invalid default model'
|
||||
);
|
||||
expect(server.requestBodies.map((request) => request.model)).toEqual([
|
||||
'missing-model',
|
||||
'gpt-4o-mini'
|
||||
]);
|
||||
expect(readFileSync(configPath, 'utf8')).toContain('OCO_MODEL=gpt-4o-mini');
|
||||
} finally {
|
||||
await server.cleanup();
|
||||
rmSync(homeDir, { force: true, recursive: true });
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
it('cli excludes lockfiles and assets from the generated prompt while still committing them', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment();
|
||||
const server = await startMockOpenAiServer(
|
||||
'fix(diff): focus prompt on meaningful source changes'
|
||||
);
|
||||
|
||||
try {
|
||||
await prepareRepo(
|
||||
gitDir,
|
||||
{
|
||||
'kept.ts': 'console.log("kept");\n',
|
||||
'package-lock.json': '{"name":"opencommit","lockfileVersion":3}\n',
|
||||
'logo.svg':
|
||||
'<svg viewBox="0 0 1 1"><rect width="1" height="1" /></svg>\n'
|
||||
},
|
||||
{ stage: true }
|
||||
);
|
||||
|
||||
const oco = await runCli([], {
|
||||
cwd: gitDir,
|
||||
env: getMockOpenAiEnv(server.baseUrl)
|
||||
});
|
||||
|
||||
expect(
|
||||
await oco.findByText('Confirm the commit message?')
|
||||
).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(await oco.findByText('Successfully committed')).toBeInTheConsole();
|
||||
expect(await waitForExit(oco)).toBe(0);
|
||||
|
||||
const requestPayload = server.requestBodies[
|
||||
server.requestBodies.length - 1
|
||||
] as { messages: Array<{ content: string }> };
|
||||
const requestContents = requestPayload.messages
|
||||
.map((message) => message.content)
|
||||
.join('\n');
|
||||
|
||||
expect(requestContents).toContain('kept.ts');
|
||||
expect(requestContents).toContain('console.log("kept");');
|
||||
expect(requestContents).not.toContain('package-lock.json');
|
||||
expect(requestContents).not.toContain('lockfileVersion');
|
||||
expect(requestContents).not.toContain('logo.svg');
|
||||
expect(requestContents).not.toContain('<svg');
|
||||
expect(await getHeadCommitFiles(gitDir)).toEqual([
|
||||
'kept.ts',
|
||||
'logo.svg',
|
||||
'package-lock.json'
|
||||
]);
|
||||
} finally {
|
||||
await server.cleanup();
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
it('fails with a non-zero exit code outside a git repository', async () => {
|
||||
const tempDir = await prepareTempDir();
|
||||
|
||||
try {
|
||||
const oco = await runCli([], {
|
||||
cwd: tempDir,
|
||||
env: {
|
||||
OCO_AI_PROVIDER: 'openai',
|
||||
OCO_API_KEY: 'dummy-openai-key',
|
||||
OCO_GITPUSH: 'false'
|
||||
}
|
||||
});
|
||||
|
||||
expect(await waitForExit(oco)).toBe(1);
|
||||
expect(oco.getStdallStr()).toMatch(
|
||||
/No changes detected|not a git repository/
|
||||
);
|
||||
} finally {
|
||||
rmSync(tempDir, { force: true, recursive: true });
|
||||
}
|
||||
});
|
||||
108
test/e2e/geminiBehavior.test.ts
Normal file
108
test/e2e/geminiBehavior.test.ts
Normal file
@@ -0,0 +1,108 @@
|
||||
import 'cli-testing-library/extend-expect';
|
||||
import {
|
||||
assertHeadCommit,
|
||||
getHeadCommitMessage,
|
||||
getMockGeminiEnv,
|
||||
prepareEnvironment,
|
||||
prepareRepo,
|
||||
runCli,
|
||||
startMockGeminiServer,
|
||||
waitForExit
|
||||
} from './utils';
|
||||
|
||||
it('built CLI ignores Gemini executable code parts when creating the commit message', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment({ remotes: 0 });
|
||||
const server = await startMockGeminiServer({
|
||||
candidates: [
|
||||
{
|
||||
index: 0,
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: [
|
||||
{ text: 'feat(gemini): keep text output only' },
|
||||
{
|
||||
executableCode: {
|
||||
language: 'python',
|
||||
code: 'print("debug")'
|
||||
}
|
||||
},
|
||||
{
|
||||
codeExecutionResult: {
|
||||
outcome: 'outcome_ok',
|
||||
output: 'debug'
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
finishReason: 'STOP'
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
try {
|
||||
await prepareRepo(
|
||||
gitDir,
|
||||
{
|
||||
'index.ts': 'console.log("Hello World");\n'
|
||||
},
|
||||
{ stage: true }
|
||||
);
|
||||
|
||||
const oco = await runCli(['--yes'], {
|
||||
cwd: gitDir,
|
||||
env: getMockGeminiEnv(server.baseUrl)
|
||||
});
|
||||
|
||||
expect(await waitForExit(oco)).toBe(0);
|
||||
await assertHeadCommit(gitDir, 'feat(gemini): keep text output only');
|
||||
expect(await getHeadCommitMessage(gitDir)).toBe(
|
||||
'feat(gemini): keep text output only'
|
||||
);
|
||||
expect(server.apiKeys).toContain('test-gemini-key');
|
||||
} finally {
|
||||
await server.cleanup();
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
it('built CLI surfaces Gemini LANGUAGE finish reasons as errors', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment({ remotes: 0 });
|
||||
const server = await startMockGeminiServer({
|
||||
candidates: [
|
||||
{
|
||||
index: 0,
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: [{ text: 'feat(gemini): should not commit' }]
|
||||
},
|
||||
finishReason: 'LANGUAGE',
|
||||
finishMessage: 'Unsupported language'
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
try {
|
||||
await prepareRepo(
|
||||
gitDir,
|
||||
{
|
||||
'index.ts': 'console.log("Hello World");\n'
|
||||
},
|
||||
{ stage: true }
|
||||
);
|
||||
|
||||
const oco = await runCli(['--yes'], {
|
||||
cwd: gitDir,
|
||||
env: getMockGeminiEnv(server.baseUrl)
|
||||
});
|
||||
|
||||
expect(
|
||||
await oco.findByText(
|
||||
'Gemini response was blocked due to LANGUAGE: Unsupported language'
|
||||
)
|
||||
).toBeInTheConsole();
|
||||
expect(await waitForExit(oco)).toBe(1);
|
||||
} finally {
|
||||
await server.cleanup();
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
@@ -1,205 +1,216 @@
|
||||
import path from 'path';
|
||||
import 'cli-testing-library/extend-expect';
|
||||
import { exec } from 'child_process';
|
||||
import { prepareTempDir } from './utils';
|
||||
import { promisify } from 'util';
|
||||
import { render } from 'cli-testing-library';
|
||||
import { resolve } from 'path';
|
||||
import { rm } from 'fs';
|
||||
const fsExec = promisify(exec);
|
||||
const fsRemove = promisify(rm);
|
||||
|
||||
/**
|
||||
* git remote -v
|
||||
*
|
||||
* [no remotes]
|
||||
*/
|
||||
const prepareNoRemoteGitRepository = async (): Promise<{
|
||||
gitDir: string;
|
||||
cleanup: () => Promise<void>;
|
||||
}> => {
|
||||
const tempDir = await prepareTempDir();
|
||||
await fsExec('git init test', { cwd: tempDir });
|
||||
const gitDir = path.resolve(tempDir, 'test');
|
||||
|
||||
const cleanup = async () => {
|
||||
return fsRemove(tempDir, { recursive: true });
|
||||
};
|
||||
return {
|
||||
gitDir,
|
||||
cleanup
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* git remote -v
|
||||
*
|
||||
* origin /tmp/remote.git (fetch)
|
||||
* origin /tmp/remote.git (push)
|
||||
*/
|
||||
const prepareOneRemoteGitRepository = async (): Promise<{
|
||||
gitDir: string;
|
||||
cleanup: () => Promise<void>;
|
||||
}> => {
|
||||
const tempDir = await prepareTempDir();
|
||||
await fsExec('git init --bare remote.git', { cwd: tempDir });
|
||||
await fsExec('git clone remote.git test', { cwd: tempDir });
|
||||
const gitDir = path.resolve(tempDir, 'test');
|
||||
|
||||
const cleanup = async () => {
|
||||
return fsRemove(tempDir, { recursive: true });
|
||||
};
|
||||
return {
|
||||
gitDir,
|
||||
cleanup
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* git remote -v
|
||||
*
|
||||
* origin /tmp/remote.git (fetch)
|
||||
* origin /tmp/remote.git (push)
|
||||
* other ../remote2.git (fetch)
|
||||
* other ../remote2.git (push)
|
||||
*/
|
||||
const prepareTwoRemotesGitRepository = async (): Promise<{
|
||||
gitDir: string;
|
||||
cleanup: () => Promise<void>;
|
||||
}> => {
|
||||
const tempDir = await prepareTempDir();
|
||||
await fsExec('git init --bare remote.git', { cwd: tempDir });
|
||||
await fsExec('git init --bare other.git', { cwd: tempDir });
|
||||
await fsExec('git clone remote.git test', { cwd: tempDir });
|
||||
const gitDir = path.resolve(tempDir, 'test');
|
||||
await fsExec('git remote add other ../other.git', { cwd: gitDir });
|
||||
|
||||
const cleanup = async () => {
|
||||
return fsRemove(tempDir, { recursive: true });
|
||||
};
|
||||
return {
|
||||
gitDir,
|
||||
cleanup
|
||||
};
|
||||
};
|
||||
import {
|
||||
assertHeadCommit,
|
||||
getCurrentBranchName,
|
||||
getMockOpenAiEnv,
|
||||
getRemoteBranchHeadSubject,
|
||||
prepareEnvironment,
|
||||
prepareRepo,
|
||||
remoteBranchExists,
|
||||
runCli,
|
||||
startMockOpenAiServer,
|
||||
waitForExit
|
||||
} from './utils';
|
||||
|
||||
describe('cli flow to push git branch', () => {
|
||||
it('do nothing when OCO_GITPUSH is set to false', async () => {
|
||||
const { gitDir, cleanup } = await prepareNoRemoteGitRepository();
|
||||
|
||||
await render('echo', [`'console.log("Hello World");' > index.ts`], {
|
||||
cwd: gitDir
|
||||
});
|
||||
await render('git', ['add index.ts'], { cwd: gitDir });
|
||||
|
||||
const { queryByText, findByText, userEvent } = await render(
|
||||
`OCO_AI_PROVIDER='test' OCO_GITPUSH='false' node`,
|
||||
[resolve('./out/cli.cjs')],
|
||||
{ cwd: gitDir }
|
||||
it('does nothing when OCO_GITPUSH is set to false', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment({ remotes: 0 });
|
||||
const server = await startMockOpenAiServer(
|
||||
'fix(push): keep the commit local when push is disabled'
|
||||
);
|
||||
expect(await findByText('Confirm the commit message?')).toBeInTheConsole();
|
||||
userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(
|
||||
await queryByText('Choose a remote to push to')
|
||||
).not.toBeInTheConsole();
|
||||
expect(
|
||||
await queryByText('Do you want to run `git push`?')
|
||||
).not.toBeInTheConsole();
|
||||
expect(
|
||||
await queryByText('Successfully pushed all commits to origin')
|
||||
).not.toBeInTheConsole();
|
||||
expect(
|
||||
await queryByText('Command failed with exit code 1')
|
||||
).not.toBeInTheConsole();
|
||||
|
||||
await cleanup();
|
||||
});
|
||||
|
||||
it('push and cause error when there is no remote', async () => {
|
||||
const { gitDir, cleanup } = await prepareNoRemoteGitRepository();
|
||||
|
||||
await render('echo', [`'console.log("Hello World");' > index.ts`], {
|
||||
cwd: gitDir
|
||||
});
|
||||
await render('git', ['add index.ts'], { cwd: gitDir });
|
||||
|
||||
const { queryByText, findByText, userEvent } = await render(
|
||||
`OCO_AI_PROVIDER='test' OCO_GITPUSH='true' node`,
|
||||
[resolve('./out/cli.cjs')],
|
||||
{ cwd: gitDir }
|
||||
try {
|
||||
await prepareRepo(
|
||||
gitDir,
|
||||
{
|
||||
'index.ts': 'console.log("Hello World");\n'
|
||||
},
|
||||
{ stage: true }
|
||||
);
|
||||
expect(await findByText('Confirm the commit message?')).toBeInTheConsole();
|
||||
userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(
|
||||
await queryByText('Choose a remote to push to')
|
||||
).not.toBeInTheConsole();
|
||||
expect(
|
||||
await queryByText('Do you want to run `git push`?')
|
||||
).not.toBeInTheConsole();
|
||||
expect(
|
||||
await queryByText('Successfully pushed all commits to origin')
|
||||
).not.toBeInTheConsole();
|
||||
|
||||
expect(
|
||||
await findByText('Command failed with exit code 1')
|
||||
).toBeInTheConsole();
|
||||
|
||||
await cleanup();
|
||||
const oco = await runCli(['--yes'], {
|
||||
cwd: gitDir,
|
||||
env: getMockOpenAiEnv(server.baseUrl, {
|
||||
OCO_GITPUSH: 'false'
|
||||
})
|
||||
});
|
||||
|
||||
it('push when one remote is set', async () => {
|
||||
const { gitDir, cleanup } = await prepareOneRemoteGitRepository();
|
||||
|
||||
await render('echo', [`'console.log("Hello World");' > index.ts`], {
|
||||
cwd: gitDir
|
||||
});
|
||||
await render('git', ['add index.ts'], { cwd: gitDir });
|
||||
|
||||
const { findByText, userEvent } = await render(
|
||||
`OCO_AI_PROVIDER='test' OCO_GITPUSH='true' node`,
|
||||
[resolve('./out/cli.cjs')],
|
||||
{ cwd: gitDir }
|
||||
expect(await waitForExit(oco)).toBe(0);
|
||||
await assertHeadCommit(
|
||||
gitDir,
|
||||
'fix(push): keep the commit local when push is disabled'
|
||||
);
|
||||
expect(await findByText('Confirm the commit message?')).toBeInTheConsole();
|
||||
userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(
|
||||
await findByText('Do you want to run `git push`?')
|
||||
).toBeInTheConsole();
|
||||
userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(
|
||||
await findByText('Successfully pushed all commits to origin')
|
||||
).toBeInTheConsole();
|
||||
|
||||
} finally {
|
||||
await server.cleanup();
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
it('push when two remotes are set', async () => {
|
||||
const { gitDir, cleanup } = await prepareTwoRemotesGitRepository();
|
||||
|
||||
await render('echo', [`'console.log("Hello World");' > index.ts`], {
|
||||
cwd: gitDir
|
||||
});
|
||||
await render('git', ['add index.ts'], { cwd: gitDir });
|
||||
|
||||
const { findByText, userEvent } = await render(
|
||||
`OCO_AI_PROVIDER='test' OCO_GITPUSH='true' node`,
|
||||
[resolve('./out/cli.cjs')],
|
||||
{ cwd: gitDir }
|
||||
it('fails after committing when push is enabled but there is no remote', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment({ remotes: 0 });
|
||||
const server = await startMockOpenAiServer(
|
||||
'fix(push): commit even when the push later fails'
|
||||
);
|
||||
expect(await findByText('Confirm the commit message?')).toBeInTheConsole();
|
||||
userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(await findByText('Choose a remote to push to')).toBeInTheConsole();
|
||||
userEvent.keyboard('[Enter]');
|
||||
try {
|
||||
await prepareRepo(
|
||||
gitDir,
|
||||
{
|
||||
'index.ts': 'console.log("Hello World");\n'
|
||||
},
|
||||
{ stage: true }
|
||||
);
|
||||
|
||||
const oco = await runCli(['--yes'], {
|
||||
cwd: gitDir,
|
||||
env: getMockOpenAiEnv(server.baseUrl, {
|
||||
OCO_GITPUSH: 'true'
|
||||
})
|
||||
});
|
||||
|
||||
expect(await waitForExit(oco)).toBe(1);
|
||||
await assertHeadCommit(
|
||||
gitDir,
|
||||
'fix(push): commit even when the push later fails'
|
||||
);
|
||||
} finally {
|
||||
await server.cleanup();
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
it('pushes to the only configured remote', async () => {
|
||||
const { gitDir, remoteDir, cleanup } = await prepareEnvironment({
|
||||
remotes: 1
|
||||
});
|
||||
const server = await startMockOpenAiServer(
|
||||
'feat(push): publish the commit to the only remote'
|
||||
);
|
||||
|
||||
try {
|
||||
await prepareRepo(
|
||||
gitDir,
|
||||
{
|
||||
'index.ts': 'console.log("Hello World");\n'
|
||||
},
|
||||
{ stage: true }
|
||||
);
|
||||
|
||||
const oco = await runCli(['--yes'], {
|
||||
cwd: gitDir,
|
||||
env: getMockOpenAiEnv(server.baseUrl, {
|
||||
OCO_GITPUSH: 'true'
|
||||
})
|
||||
});
|
||||
|
||||
expect(
|
||||
await findByText('Successfully pushed all commits to origin')
|
||||
await oco.findByText('Do you want to run `git push`?')
|
||||
).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(await waitForExit(oco)).toBe(0);
|
||||
await assertHeadCommit(
|
||||
gitDir,
|
||||
'feat(push): publish the commit to the only remote'
|
||||
);
|
||||
expect(
|
||||
await getRemoteBranchHeadSubject(
|
||||
remoteDir!,
|
||||
await getCurrentBranchName(gitDir)
|
||||
)
|
||||
).toBe('feat(push): publish the commit to the only remote');
|
||||
} finally {
|
||||
await server.cleanup();
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
it('pushes to the selected remote when multiple remotes are configured', async () => {
|
||||
const { gitDir, remoteDir, cleanup } = await prepareEnvironment({
|
||||
remotes: 2
|
||||
});
|
||||
const server = await startMockOpenAiServer(
|
||||
'feat(push): choose a remote explicitly when several exist'
|
||||
);
|
||||
|
||||
try {
|
||||
await prepareRepo(
|
||||
gitDir,
|
||||
{
|
||||
'index.ts': 'console.log("Hello World");\n'
|
||||
},
|
||||
{ stage: true }
|
||||
);
|
||||
|
||||
const oco = await runCli(['--yes'], {
|
||||
cwd: gitDir,
|
||||
env: getMockOpenAiEnv(server.baseUrl, {
|
||||
OCO_GITPUSH: 'true'
|
||||
})
|
||||
});
|
||||
|
||||
expect(
|
||||
await oco.findByText('Choose a remote to push to')
|
||||
).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(await waitForExit(oco)).toBe(0);
|
||||
await assertHeadCommit(
|
||||
gitDir,
|
||||
'feat(push): choose a remote explicitly when several exist'
|
||||
);
|
||||
expect(
|
||||
await getRemoteBranchHeadSubject(
|
||||
remoteDir!,
|
||||
await getCurrentBranchName(gitDir)
|
||||
)
|
||||
).toBe('feat(push): choose a remote explicitly when several exist');
|
||||
} finally {
|
||||
await server.cleanup();
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
it("keeps the commit local when the user chooses 'don't push'", async () => {
|
||||
const { gitDir, remoteDir, otherRemoteDir, cleanup } =
|
||||
await prepareEnvironment({ remotes: 2 });
|
||||
const server = await startMockOpenAiServer(
|
||||
"fix(push): skip the remote step when the user chooses don't push"
|
||||
);
|
||||
|
||||
try {
|
||||
await prepareRepo(
|
||||
gitDir,
|
||||
{
|
||||
'index.ts': 'console.log("Hello World");\n'
|
||||
},
|
||||
{ stage: true }
|
||||
);
|
||||
|
||||
const oco = await runCli(['--yes'], {
|
||||
cwd: gitDir,
|
||||
env: getMockOpenAiEnv(server.baseUrl, {
|
||||
OCO_GITPUSH: 'true'
|
||||
})
|
||||
});
|
||||
|
||||
expect(
|
||||
await oco.findByText('Choose a remote to push to')
|
||||
).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('[ArrowDown][ArrowDown][Enter]');
|
||||
|
||||
expect(await waitForExit(oco)).toBe(0);
|
||||
await assertHeadCommit(
|
||||
gitDir,
|
||||
"fix(push): skip the remote step when the user chooses don't push"
|
||||
);
|
||||
|
||||
const branchName = await getCurrentBranchName(gitDir);
|
||||
expect(await remoteBranchExists(remoteDir!, branchName)).toBe(false);
|
||||
expect(await remoteBranchExists(otherRemoteDir!, branchName)).toBe(false);
|
||||
} finally {
|
||||
await server.cleanup();
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,12 +1,22 @@
|
||||
import { resolve } from 'path'
|
||||
import { render } from 'cli-testing-library'
|
||||
import 'cli-testing-library/extend-expect';
|
||||
import { prepareEnvironment } from './utils';
|
||||
import { prepareEnvironment, runCli, waitForExit } from './utils';
|
||||
|
||||
it('cli flow when there are no changes', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment();
|
||||
const { findByText } = await render(`OCO_AI_PROVIDER='test' node`, [resolve('./out/cli.cjs')], { cwd: gitDir });
|
||||
expect(await findByText('No changes detected')).toBeInTheConsole();
|
||||
|
||||
try {
|
||||
const oco = await runCli([], {
|
||||
cwd: gitDir,
|
||||
env: {
|
||||
OCO_AI_PROVIDER: 'openai',
|
||||
OCO_API_KEY: 'dummy-openai-key',
|
||||
OCO_GITPUSH: 'false'
|
||||
}
|
||||
});
|
||||
|
||||
expect(await oco.findByText('No changes detected')).toBeInTheConsole();
|
||||
expect(await waitForExit(oco)).toBe(1);
|
||||
} finally {
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
@@ -1,55 +1,119 @@
|
||||
import { resolve } from 'path'
|
||||
import { render } from 'cli-testing-library'
|
||||
import 'cli-testing-library/extend-expect';
|
||||
import { prepareEnvironment } from './utils';
|
||||
import {
|
||||
assertHeadCommit,
|
||||
getCurrentBranchName,
|
||||
getMockOpenAiEnv,
|
||||
getRemoteBranchHeadSubject,
|
||||
prepareEnvironment,
|
||||
prepareRepo,
|
||||
runCli,
|
||||
startMockOpenAiServer,
|
||||
appendRepoFile,
|
||||
waitForExit
|
||||
} from './utils';
|
||||
|
||||
it('cli flow to generate commit message for 1 new file (staged)', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment();
|
||||
const { gitDir, remoteDir, cleanup } = await prepareEnvironment();
|
||||
const server = await startMockOpenAiServer(
|
||||
'feat(cli): commit one staged file through the CLI'
|
||||
);
|
||||
|
||||
await render('echo' ,[`'console.log("Hello World");' > index.ts`], { cwd: gitDir });
|
||||
await render('git' ,['add index.ts'], { cwd: gitDir });
|
||||
try {
|
||||
await prepareRepo(
|
||||
gitDir,
|
||||
{
|
||||
'index.ts': 'console.log("Hello World");\n'
|
||||
},
|
||||
{ stage: true }
|
||||
);
|
||||
|
||||
const { queryByText, findByText, userEvent } = await render(`OCO_AI_PROVIDER='test' OCO_GITPUSH='true' node`, [resolve('./out/cli.cjs')], { cwd: gitDir });
|
||||
expect(await queryByText('No files are staged')).not.toBeInTheConsole();
|
||||
expect(await queryByText('Do you want to stage all files and generate commit message?')).not.toBeInTheConsole();
|
||||
const oco = await runCli(['--yes'], {
|
||||
cwd: gitDir,
|
||||
env: getMockOpenAiEnv(server.baseUrl, {
|
||||
OCO_GITPUSH: 'true'
|
||||
})
|
||||
});
|
||||
|
||||
expect(await findByText('Generating the commit message')).toBeInTheConsole();
|
||||
expect(await findByText('Confirm the commit message?')).toBeInTheConsole();
|
||||
userEvent.keyboard('[Enter]');
|
||||
expect(await oco.queryByText('No files are staged')).not.toBeInTheConsole();
|
||||
expect(
|
||||
await oco.queryByText(
|
||||
'Do you want to stage all files and generate commit message?'
|
||||
)
|
||||
).not.toBeInTheConsole();
|
||||
|
||||
expect(await findByText('Do you want to run `git push`?')).toBeInTheConsole();
|
||||
userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(await findByText('Successfully pushed all commits to origin')).toBeInTheConsole();
|
||||
expect(
|
||||
await oco.findByText('Do you want to run `git push`?')
|
||||
).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(await waitForExit(oco)).toBe(0);
|
||||
await assertHeadCommit(
|
||||
gitDir,
|
||||
'feat(cli): commit one staged file through the CLI'
|
||||
);
|
||||
expect(
|
||||
await getRemoteBranchHeadSubject(
|
||||
remoteDir!,
|
||||
await getCurrentBranchName(gitDir)
|
||||
)
|
||||
).toBe('feat(cli): commit one staged file through the CLI');
|
||||
} finally {
|
||||
await server.cleanup();
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
it('cli flow to generate commit message for 1 changed file (not staged)', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment();
|
||||
const server = await startMockOpenAiServer(
|
||||
'fix(cli): stage modified files before committing'
|
||||
);
|
||||
|
||||
await render('echo' ,[`'console.log("Hello World");' > index.ts`], { cwd: gitDir });
|
||||
await render('git' ,['add index.ts'], { cwd: gitDir });
|
||||
await render('git' ,[`commit -m 'add new file'`], { cwd: gitDir });
|
||||
try {
|
||||
await prepareRepo(
|
||||
gitDir,
|
||||
{
|
||||
'index.ts': 'console.log("Hello World");\n'
|
||||
},
|
||||
{
|
||||
stage: true,
|
||||
commitMessage: 'add new file'
|
||||
}
|
||||
);
|
||||
appendRepoFile(gitDir, 'index.ts', 'console.log("Good night World");\n');
|
||||
|
||||
await render('echo' ,[`'console.log("Good night World");' >> index.ts`], { cwd: gitDir });
|
||||
const oco = await runCli(['--yes'], {
|
||||
cwd: gitDir,
|
||||
env: getMockOpenAiEnv(server.baseUrl, {
|
||||
OCO_GITPUSH: 'true'
|
||||
})
|
||||
});
|
||||
|
||||
const { findByText, userEvent } = await render(`OCO_AI_PROVIDER='test' OCO_GITPUSH='true' node`, [resolve('./out/cli.cjs')], { cwd: gitDir });
|
||||
expect(await oco.findByText('No files are staged')).toBeInTheConsole();
|
||||
expect(
|
||||
await oco.findByText(
|
||||
'Do you want to stage all files and generate commit message?'
|
||||
)
|
||||
).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(await findByText('No files are staged')).toBeInTheConsole();
|
||||
expect(await findByText('Do you want to stage all files and generate commit message?')).toBeInTheConsole();
|
||||
userEvent.keyboard('[Enter]');
|
||||
expect(
|
||||
await oco.findByText('Confirm the commit message?')
|
||||
).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(await findByText('Generating the commit message')).toBeInTheConsole();
|
||||
expect(await findByText('Confirm the commit message?')).toBeInTheConsole();
|
||||
userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(await findByText('Successfully committed')).toBeInTheConsole();
|
||||
|
||||
expect(await findByText('Do you want to run `git push`?')).toBeInTheConsole();
|
||||
userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(await findByText('Successfully pushed all commits to origin')).toBeInTheConsole();
|
||||
expect(
|
||||
await oco.findByText('Do you want to run `git push`?')
|
||||
).toBeInTheConsole();
|
||||
oco.userEvent.keyboard('[Enter]');
|
||||
|
||||
expect(await waitForExit(oco)).toBe(0);
|
||||
await assertHeadCommit(
|
||||
gitDir,
|
||||
'fix(cli): stage modified files before committing'
|
||||
);
|
||||
} finally {
|
||||
await server.cleanup();
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
@@ -1,146 +1,158 @@
|
||||
import { resolve } from 'path';
|
||||
import { render } from 'cli-testing-library';
|
||||
import 'cli-testing-library/extend-expect';
|
||||
import { prepareEnvironment, wait } from '../utils';
|
||||
import { cpSync } from 'fs';
|
||||
import path from 'path';
|
||||
import { execFile } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import 'cli-testing-library/extend-expect';
|
||||
import {
|
||||
assertHeadCommit,
|
||||
prepareEnvironment,
|
||||
prepareRepo,
|
||||
runCli,
|
||||
waitForExit
|
||||
} from '../utils';
|
||||
|
||||
function getAbsolutePath(relativePath: string) {
|
||||
// Use process.cwd() which should be the project root during test execution
|
||||
return path.resolve(process.cwd(), 'test/e2e/prompt-module', relativePath);
|
||||
}
|
||||
async function setupCommitlint(dir: string, ver: 9 | 18 | 19) {
|
||||
let packagePath, packageJsonPath, configPath;
|
||||
switch (ver) {
|
||||
case 9:
|
||||
packagePath = getAbsolutePath('./data/commitlint_9/node_modules');
|
||||
packageJsonPath = getAbsolutePath('./data/commitlint_9/package.json');
|
||||
configPath = getAbsolutePath('./data/commitlint_9/commitlint.config.js');
|
||||
break;
|
||||
case 18:
|
||||
packagePath = getAbsolutePath('./data/commitlint_18/node_modules');
|
||||
packageJsonPath = getAbsolutePath('./data/commitlint_18/package.json');
|
||||
configPath = getAbsolutePath('./data/commitlint_18/commitlint.config.js');
|
||||
break;
|
||||
case 19:
|
||||
packagePath = getAbsolutePath('./data/commitlint_19/node_modules');
|
||||
packageJsonPath = getAbsolutePath('./data/commitlint_19/package.json');
|
||||
configPath = getAbsolutePath('./data/commitlint_19/commitlint.config.js');
|
||||
break;
|
||||
const execFileAsync = promisify(execFile);
|
||||
|
||||
const getFixturePath = (version: 9 | 18 | 19, fileName: string) =>
|
||||
path.resolve(
|
||||
process.cwd(),
|
||||
`test/e2e/prompt-module/data/commitlint_${version}/${fileName}`
|
||||
);
|
||||
|
||||
const getPromptModuleEnv = (
|
||||
mockType: 'commit-message' | 'prompt-module-commitlint-config'
|
||||
): NodeJS.ProcessEnv => ({
|
||||
OCO_TEST_MOCK_TYPE: mockType,
|
||||
OCO_PROMPT_MODULE: '@commitlint',
|
||||
OCO_AI_PROVIDER: 'test',
|
||||
OCO_GITPUSH: 'true'
|
||||
});
|
||||
|
||||
async function setupCommitlint(dir: string, version: 9 | 18 | 19) {
|
||||
cpSync(
|
||||
getFixturePath(version, 'node_modules'),
|
||||
path.join(dir, 'node_modules'),
|
||||
{
|
||||
recursive: true
|
||||
}
|
||||
await render('cp', ['-r', packagePath, '.'], { cwd: dir });
|
||||
await render('cp', [packageJsonPath, '.'], { cwd: dir });
|
||||
await render('cp', [configPath, '.'], { cwd: dir });
|
||||
await wait(3000); // Avoid flakiness by waiting
|
||||
);
|
||||
cpSync(
|
||||
getFixturePath(version, 'package.json'),
|
||||
path.join(dir, 'package.json')
|
||||
);
|
||||
cpSync(
|
||||
getFixturePath(version, 'commitlint.config.js'),
|
||||
path.join(dir, 'commitlint.config.js')
|
||||
);
|
||||
}
|
||||
|
||||
async function assertInstalledCommitlintVersion(
|
||||
cwd: string,
|
||||
version: string
|
||||
): Promise<void> {
|
||||
const { stdout = '', stderr = '' } = await execFileAsync(
|
||||
'npm',
|
||||
['list', '@commitlint/load'],
|
||||
{ cwd }
|
||||
);
|
||||
expect(`${stdout}\n${stderr}`).toContain(`@commitlint/load@${version}`);
|
||||
}
|
||||
|
||||
describe('cli flow to run "oco commitlint force"', () => {
|
||||
it('on commitlint@9 using CJS', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment();
|
||||
|
||||
try {
|
||||
await setupCommitlint(gitDir, 9);
|
||||
const npmList = await render('npm', ['list', '@commitlint/load'], {
|
||||
cwd: gitDir
|
||||
await assertInstalledCommitlintVersion(gitDir, '9');
|
||||
|
||||
const oco = await runCli(['commitlint', 'force'], {
|
||||
cwd: gitDir,
|
||||
env: getPromptModuleEnv('prompt-module-commitlint-config')
|
||||
});
|
||||
expect(await npmList.findByText('@commitlint/load@9')).toBeInTheConsole();
|
||||
|
||||
const { findByText } = await render(
|
||||
`
|
||||
OCO_TEST_MOCK_TYPE='prompt-module-commitlint-config' \
|
||||
OCO_PROMPT_MODULE='@commitlint' \
|
||||
OCO_AI_PROVIDER='test' OCO_GITPUSH='true' \
|
||||
node ${resolve('./out/cli.cjs')} commitlint force \
|
||||
`,
|
||||
[],
|
||||
{ cwd: gitDir }
|
||||
);
|
||||
|
||||
expect(
|
||||
await findByText('opencommit — configure @commitlint')
|
||||
await oco.findByText('opencommit — configure @commitlint')
|
||||
).toBeInTheConsole();
|
||||
expect(
|
||||
await findByText('Read @commitlint configuration')
|
||||
).toBeInTheConsole();
|
||||
|
||||
expect(
|
||||
await findByText('Generating consistency with given @commitlint rules')
|
||||
await oco.findByText('Read @commitlint configuration')
|
||||
).toBeInTheConsole();
|
||||
expect(
|
||||
await findByText('Done - please review contents of')
|
||||
await oco.findByText(
|
||||
'Generating consistency with given @commitlint rules'
|
||||
)
|
||||
).toBeInTheConsole();
|
||||
|
||||
expect(
|
||||
await oco.findByText('Done - please review contents of')
|
||||
).toBeInTheConsole();
|
||||
expect(await waitForExit(oco)).toBe(0);
|
||||
} finally {
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
it('on commitlint@18 using CJS', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment();
|
||||
|
||||
try {
|
||||
await setupCommitlint(gitDir, 18);
|
||||
const npmList = await render('npm', ['list', '@commitlint/load'], {
|
||||
cwd: gitDir
|
||||
await assertInstalledCommitlintVersion(gitDir, '18');
|
||||
|
||||
const oco = await runCli(['commitlint', 'force'], {
|
||||
cwd: gitDir,
|
||||
env: getPromptModuleEnv('prompt-module-commitlint-config')
|
||||
});
|
||||
expect(await npmList.findByText('@commitlint/load@18')).toBeInTheConsole();
|
||||
|
||||
const { findByText } = await render(
|
||||
`
|
||||
OCO_TEST_MOCK_TYPE='prompt-module-commitlint-config' \
|
||||
OCO_PROMPT_MODULE='@commitlint' \
|
||||
OCO_AI_PROVIDER='test' OCO_GITPUSH='true' \
|
||||
node ${resolve('./out/cli.cjs')} commitlint force \
|
||||
`,
|
||||
[],
|
||||
{ cwd: gitDir }
|
||||
);
|
||||
|
||||
expect(
|
||||
await findByText('opencommit — configure @commitlint')
|
||||
await oco.findByText('opencommit — configure @commitlint')
|
||||
).toBeInTheConsole();
|
||||
expect(
|
||||
await findByText('Read @commitlint configuration')
|
||||
).toBeInTheConsole();
|
||||
|
||||
expect(
|
||||
await findByText('Generating consistency with given @commitlint rules')
|
||||
await oco.findByText('Read @commitlint configuration')
|
||||
).toBeInTheConsole();
|
||||
expect(
|
||||
await findByText('Done - please review contents of')
|
||||
await oco.findByText(
|
||||
'Generating consistency with given @commitlint rules'
|
||||
)
|
||||
).toBeInTheConsole();
|
||||
|
||||
expect(
|
||||
await oco.findByText('Done - please review contents of')
|
||||
).toBeInTheConsole();
|
||||
expect(await waitForExit(oco)).toBe(0);
|
||||
} finally {
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
it('on commitlint@19 using ESM', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment();
|
||||
|
||||
try {
|
||||
await setupCommitlint(gitDir, 19);
|
||||
const npmList = await render('npm', ['list', '@commitlint/load'], {
|
||||
cwd: gitDir
|
||||
await assertInstalledCommitlintVersion(gitDir, '19');
|
||||
|
||||
const oco = await runCli(['commitlint', 'force'], {
|
||||
cwd: gitDir,
|
||||
env: getPromptModuleEnv('prompt-module-commitlint-config')
|
||||
});
|
||||
expect(await npmList.findByText('@commitlint/load@19')).toBeInTheConsole();
|
||||
|
||||
const { findByText } = await render(
|
||||
`
|
||||
OCO_TEST_MOCK_TYPE='prompt-module-commitlint-config' \
|
||||
OCO_PROMPT_MODULE='@commitlint' \
|
||||
OCO_AI_PROVIDER='test' OCO_GITPUSH='true' \
|
||||
node ${resolve('./out/cli.cjs')} commitlint force \
|
||||
`,
|
||||
[],
|
||||
{ cwd: gitDir }
|
||||
);
|
||||
|
||||
expect(
|
||||
await findByText('opencommit — configure @commitlint')
|
||||
await oco.findByText('opencommit — configure @commitlint')
|
||||
).toBeInTheConsole();
|
||||
expect(
|
||||
await findByText('Read @commitlint configuration')
|
||||
).toBeInTheConsole();
|
||||
|
||||
expect(
|
||||
await findByText('Generating consistency with given @commitlint rules')
|
||||
await oco.findByText('Read @commitlint configuration')
|
||||
).toBeInTheConsole();
|
||||
expect(
|
||||
await findByText('Done - please review contents of')
|
||||
await oco.findByText(
|
||||
'Generating consistency with given @commitlint rules'
|
||||
)
|
||||
).toBeInTheConsole();
|
||||
|
||||
expect(
|
||||
await oco.findByText('Done - please review contents of')
|
||||
).toBeInTheConsole();
|
||||
expect(await waitForExit(oco)).toBe(0);
|
||||
} finally {
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
@@ -148,58 +160,39 @@ describe('cli flow to generate commit message using @commitlint prompt-module',
|
||||
it('on commitlint@19 using ESM', async () => {
|
||||
const { gitDir, cleanup } = await prepareEnvironment();
|
||||
|
||||
// Setup commitlint@19
|
||||
try {
|
||||
await setupCommitlint(gitDir, 19);
|
||||
const npmList = await render('npm', ['list', '@commitlint/load'], {
|
||||
cwd: gitDir
|
||||
});
|
||||
expect(await npmList.findByText('@commitlint/load@19')).toBeInTheConsole();
|
||||
await assertInstalledCommitlintVersion(gitDir, '19');
|
||||
|
||||
// Run `oco commitlint force`
|
||||
const commitlintForce = await render(
|
||||
`
|
||||
OCO_TEST_MOCK_TYPE='prompt-module-commitlint-config' \
|
||||
OCO_PROMPT_MODULE='@commitlint' \
|
||||
OCO_AI_PROVIDER='test' OCO_GITPUSH='true' \
|
||||
node ${resolve('./out/cli.cjs')} commitlint force \
|
||||
`,
|
||||
[],
|
||||
{ cwd: gitDir }
|
||||
);
|
||||
const commitlintForce = await runCli(['commitlint', 'force'], {
|
||||
cwd: gitDir,
|
||||
env: getPromptModuleEnv('prompt-module-commitlint-config')
|
||||
});
|
||||
expect(
|
||||
await commitlintForce.findByText('Done - please review contents of')
|
||||
).toBeInTheConsole();
|
||||
expect(await waitForExit(commitlintForce)).toBe(0);
|
||||
|
||||
// Run `oco commitlint get`
|
||||
const commitlintGet = await render(
|
||||
`
|
||||
OCO_TEST_MOCK_TYPE='prompt-module-commitlint-config' \
|
||||
OCO_PROMPT_MODULE='@commitlint' \
|
||||
OCO_AI_PROVIDER='test' OCO_GITPUSH='true' \
|
||||
node ${resolve('./out/cli.cjs')} commitlint get \
|
||||
`,
|
||||
[],
|
||||
{ cwd: gitDir }
|
||||
);
|
||||
expect(await commitlintGet.findByText('consistency')).toBeInTheConsole();
|
||||
|
||||
// Run 'oco' using .opencommit-commitlint
|
||||
await render('echo', [`'console.log("Hello World");' > index.ts`], {
|
||||
cwd: gitDir
|
||||
const commitlintGet = await runCli(['commitlint', 'get'], {
|
||||
cwd: gitDir,
|
||||
env: getPromptModuleEnv('prompt-module-commitlint-config')
|
||||
});
|
||||
await render('git', ['add index.ts'], { cwd: gitDir });
|
||||
expect(await commitlintGet.findByText('consistency')).toBeInTheConsole();
|
||||
expect(await waitForExit(commitlintGet)).toBe(0);
|
||||
|
||||
const oco = await render(
|
||||
`
|
||||
OCO_TEST_MOCK_TYPE='commit-message' \
|
||||
OCO_PROMPT_MODULE='@commitlint' \
|
||||
OCO_AI_PROVIDER='test' OCO_GITPUSH='true' \
|
||||
node ${resolve('./out/cli.cjs')} \
|
||||
`,
|
||||
[],
|
||||
{ cwd: gitDir }
|
||||
await prepareRepo(
|
||||
gitDir,
|
||||
{
|
||||
'index.ts': 'console.log("Hello World");\n'
|
||||
},
|
||||
{ stage: true }
|
||||
);
|
||||
|
||||
const oco = await runCli([], {
|
||||
cwd: gitDir,
|
||||
env: getPromptModuleEnv('commit-message')
|
||||
});
|
||||
|
||||
expect(
|
||||
await oco.findByText('Generating the commit message')
|
||||
).toBeInTheConsole();
|
||||
@@ -216,7 +209,10 @@ describe('cli flow to generate commit message using @commitlint prompt-module',
|
||||
expect(
|
||||
await oco.findByText('Successfully pushed all commits to origin')
|
||||
).toBeInTheConsole();
|
||||
|
||||
expect(await waitForExit(oco)).toBe(0);
|
||||
await assertHeadCommit(gitDir, 'fix(testAi.ts): test commit message');
|
||||
} finally {
|
||||
await cleanup();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -eu
|
||||
|
||||
current_dir=$(pwd)
|
||||
setup_dir="$(cd "$(dirname "$0")" && pwd)"
|
||||
|
||||
|
||||
30
test/e2e/smoke.test.ts
Normal file
30
test/e2e/smoke.test.ts
Normal file
@@ -0,0 +1,30 @@
|
||||
import packageJson from '../../package.json';
|
||||
import 'cli-testing-library/extend-expect';
|
||||
import { runCli, waitForExit } from './utils';
|
||||
|
||||
it('prints help without entering the interactive flow', async () => {
|
||||
const help = await runCli(['--help'], {
|
||||
cwd: process.cwd()
|
||||
});
|
||||
|
||||
expect(await help.findByText('opencommit')).toBeInTheConsole();
|
||||
expect(await help.findByText('--context')).toBeInTheConsole();
|
||||
expect(await help.findByText('--yes')).toBeInTheConsole();
|
||||
expect(
|
||||
await help.queryByText('Select your AI provider:')
|
||||
).not.toBeInTheConsole();
|
||||
expect(await help.queryByText('Enter your API key:')).not.toBeInTheConsole();
|
||||
expect(await waitForExit(help)).toBe(0);
|
||||
});
|
||||
|
||||
it('prints the current version without booting the CLI runtime', async () => {
|
||||
const version = await runCli(['--version'], {
|
||||
cwd: process.cwd()
|
||||
});
|
||||
|
||||
expect(await version.findByText(packageJson.version)).toBeInTheConsole();
|
||||
expect(
|
||||
await version.queryByText('Generating the commit message')
|
||||
).not.toBeInTheConsole();
|
||||
expect(await waitForExit(version)).toBe(0);
|
||||
});
|
||||
@@ -1,37 +1,558 @@
|
||||
import path from 'path'
|
||||
import { mkdtemp, rm } from 'fs'
|
||||
import { promisify } from 'util';
|
||||
import path from 'path';
|
||||
import {
|
||||
appendFileSync,
|
||||
existsSync,
|
||||
mkdirSync,
|
||||
mkdtemp,
|
||||
rm,
|
||||
writeFileSync
|
||||
} from 'fs';
|
||||
import http from 'http';
|
||||
import { tmpdir } from 'os';
|
||||
import { exec } from 'child_process';
|
||||
import { execFile } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import type { AddressInfo } from 'net';
|
||||
import { render } from 'cli-testing-library';
|
||||
import type { RenderResult } from 'cli-testing-library';
|
||||
|
||||
const fsMakeTempDir = promisify(mkdtemp);
|
||||
const fsExec = promisify(exec);
|
||||
const fsExecFile = promisify(execFile);
|
||||
const fsRemove = promisify(rm);
|
||||
|
||||
/**
|
||||
* Prepare the environment for the test
|
||||
* Create a temporary git repository in the temp directory
|
||||
*/
|
||||
export const prepareEnvironment = async (): Promise<{
|
||||
const CLI_PATH = path.resolve(process.cwd(), 'out/cli.cjs');
|
||||
const DEFAULT_TEST_ENV = {
|
||||
OCO_TEST_SKIP_VERSION_CHECK: 'true'
|
||||
};
|
||||
const COMPLETED_MIGRATIONS = [
|
||||
'00_use_single_api_key_and_url',
|
||||
'01_remove_obsolete_config_keys_from_global_file',
|
||||
'02_set_missing_default_values'
|
||||
];
|
||||
|
||||
type ProcessOptions = {
|
||||
cwd: string;
|
||||
env?: NodeJS.ProcessEnv;
|
||||
};
|
||||
|
||||
type PrepareEnvironmentOptions = {
|
||||
remotes?: 0 | 1 | 2;
|
||||
};
|
||||
|
||||
export const getCliPath = () => CLI_PATH;
|
||||
|
||||
export const runProcess = async (
|
||||
command: string,
|
||||
args: string[] = [],
|
||||
{ cwd, env = {} }: ProcessOptions
|
||||
): Promise<RenderResult> => {
|
||||
return render(command, args, {
|
||||
cwd,
|
||||
spawnOpts: {
|
||||
env: {
|
||||
...process.env,
|
||||
...DEFAULT_TEST_ENV,
|
||||
...env
|
||||
}
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
export const runCli = async (
|
||||
args: string[] = [],
|
||||
options: ProcessOptions
|
||||
): Promise<RenderResult> => {
|
||||
return runProcess(process.execPath, [getCliPath(), ...args], options);
|
||||
};
|
||||
|
||||
export const runGit = async (
|
||||
args: string[],
|
||||
cwd: string
|
||||
): Promise<{ stdout: string; stderr: string }> => {
|
||||
const { stdout = '', stderr = '' } = await fsExecFile('git', args, { cwd });
|
||||
return { stdout, stderr };
|
||||
};
|
||||
|
||||
export const configureGitUser = async (gitDir: string): Promise<void> => {
|
||||
await runGit(['config', 'user.email', 'test@example.com'], gitDir);
|
||||
await runGit(['config', 'user.name', 'Test User'], gitDir);
|
||||
};
|
||||
|
||||
export const prepareEnvironment = async ({
|
||||
remotes = 1
|
||||
}: PrepareEnvironmentOptions = {}): Promise<{
|
||||
tempDir: string;
|
||||
gitDir: string;
|
||||
remoteDir?: string;
|
||||
otherRemoteDir?: string;
|
||||
cleanup: () => Promise<void>;
|
||||
}> => {
|
||||
const tempDir = await prepareTempDir();
|
||||
// Create a remote git repository int the temp directory. This is necessary to execute the `git push` command
|
||||
await fsExec('git init --bare remote.git', { cwd: tempDir });
|
||||
await fsExec('git clone remote.git test', { cwd: tempDir });
|
||||
const gitDir = path.resolve(tempDir, 'test');
|
||||
let remoteDir: string | undefined;
|
||||
let otherRemoteDir: string | undefined;
|
||||
|
||||
if (remotes === 0) {
|
||||
await fsExecFile('git', ['init', 'test'], { cwd: tempDir });
|
||||
} else {
|
||||
await fsExecFile('git', ['init', '--bare', 'remote.git'], {
|
||||
cwd: tempDir
|
||||
});
|
||||
remoteDir = path.resolve(tempDir, 'remote.git');
|
||||
|
||||
if (remotes === 2) {
|
||||
await fsExecFile('git', ['init', '--bare', 'other.git'], {
|
||||
cwd: tempDir
|
||||
});
|
||||
otherRemoteDir = path.resolve(tempDir, 'other.git');
|
||||
}
|
||||
|
||||
await fsExecFile('git', ['clone', 'remote.git', 'test'], { cwd: tempDir });
|
||||
|
||||
if (remotes === 2) {
|
||||
await runGit(['remote', 'add', 'other', '../other.git'], gitDir);
|
||||
}
|
||||
}
|
||||
|
||||
await configureGitUser(gitDir);
|
||||
|
||||
const cleanup = async () => {
|
||||
return fsRemove(tempDir, { recursive: true });
|
||||
if (existsSync(tempDir)) {
|
||||
await fsRemove(tempDir, { force: true, recursive: true });
|
||||
}
|
||||
};
|
||||
|
||||
return {
|
||||
tempDir,
|
||||
gitDir,
|
||||
cleanup,
|
||||
remoteDir,
|
||||
otherRemoteDir,
|
||||
cleanup
|
||||
};
|
||||
};
|
||||
|
||||
export const prepareTempDir = async (): Promise<string> => {
|
||||
return fsMakeTempDir(path.join(tmpdir(), 'opencommit-test-'));
|
||||
};
|
||||
|
||||
export const prepareRepo = async (
|
||||
gitDir: string,
|
||||
files: Record<string, string>,
|
||||
options: {
|
||||
stage?: string[] | true;
|
||||
commitMessage?: string;
|
||||
} = {}
|
||||
): Promise<void> => {
|
||||
for (const [relativePath, content] of Object.entries(files)) {
|
||||
writeRepoFile(gitDir, relativePath, content);
|
||||
}
|
||||
}
|
||||
|
||||
export const prepareTempDir = async(): Promise<string> => {
|
||||
return await fsMakeTempDir(path.join(tmpdir(), 'opencommit-test-'));
|
||||
}
|
||||
const stageFiles =
|
||||
options.stage === true
|
||||
? Object.keys(files)
|
||||
: Array.isArray(options.stage)
|
||||
? options.stage
|
||||
: options.commitMessage
|
||||
? Object.keys(files)
|
||||
: [];
|
||||
|
||||
export const wait = (ms: number) => new Promise(resolve => setTimeout(resolve, ms));
|
||||
if (stageFiles.length > 0) {
|
||||
await runGit(['add', ...stageFiles], gitDir);
|
||||
}
|
||||
|
||||
if (options.commitMessage) {
|
||||
await runGit(['commit', '-m', options.commitMessage], gitDir);
|
||||
}
|
||||
};
|
||||
|
||||
export const writeRepoFile = (
|
||||
gitDir: string,
|
||||
relativePath: string,
|
||||
content: string
|
||||
): void => {
|
||||
const filePath = path.resolve(gitDir, relativePath);
|
||||
mkdirSync(path.dirname(filePath), { recursive: true });
|
||||
writeFileSync(filePath, content);
|
||||
};
|
||||
|
||||
export const appendRepoFile = (
|
||||
gitDir: string,
|
||||
relativePath: string,
|
||||
content: string
|
||||
): void => {
|
||||
const filePath = path.resolve(gitDir, relativePath);
|
||||
mkdirSync(path.dirname(filePath), { recursive: true });
|
||||
appendFileSync(filePath, content);
|
||||
};
|
||||
|
||||
export const writeGlobalConfig = (homeDir: string, lines: string[]): string => {
|
||||
const configPath = path.resolve(homeDir, '.opencommit');
|
||||
writeFileSync(configPath, lines.join('\n'));
|
||||
return configPath;
|
||||
};
|
||||
|
||||
export const seedMigrations = (
|
||||
homeDir: string,
|
||||
completedMigrations: string[] = COMPLETED_MIGRATIONS
|
||||
): string => {
|
||||
const migrationsPath = path.resolve(homeDir, '.opencommit_migrations');
|
||||
writeFileSync(migrationsPath, JSON.stringify(completedMigrations));
|
||||
return migrationsPath;
|
||||
};
|
||||
|
||||
export const seedModelCache = async (
|
||||
homeDir: string,
|
||||
models: Record<string, string[]>
|
||||
): Promise<void> => {
|
||||
const modelCachePath = path.resolve(homeDir, '.opencommit-models.json');
|
||||
writeFileSync(
|
||||
modelCachePath,
|
||||
JSON.stringify(
|
||||
{
|
||||
timestamp: Date.now(),
|
||||
models
|
||||
},
|
||||
null,
|
||||
2
|
||||
)
|
||||
);
|
||||
};
|
||||
|
||||
export const getMockOpenAiEnv = (
|
||||
baseUrl: string,
|
||||
overrides: NodeJS.ProcessEnv = {}
|
||||
): NodeJS.ProcessEnv => ({
|
||||
OCO_AI_PROVIDER: 'openai',
|
||||
OCO_API_KEY: 'test-openai-key',
|
||||
OCO_MODEL: 'gpt-4o-mini',
|
||||
OCO_API_URL: baseUrl,
|
||||
OCO_GITPUSH: 'false',
|
||||
...overrides
|
||||
});
|
||||
|
||||
export const getMockGeminiEnv = (
|
||||
baseUrl: string,
|
||||
overrides: NodeJS.ProcessEnv = {}
|
||||
): NodeJS.ProcessEnv => ({
|
||||
OCO_AI_PROVIDER: 'gemini',
|
||||
OCO_API_KEY: 'test-gemini-key',
|
||||
OCO_MODEL: 'gemini-1.5-flash',
|
||||
OCO_API_URL: baseUrl,
|
||||
OCO_GITPUSH: 'false',
|
||||
...overrides
|
||||
});
|
||||
|
||||
export const wait = (ms: number) =>
|
||||
new Promise((resolve) => setTimeout(resolve, ms));
|
||||
|
||||
export const waitForExit = async (
|
||||
instance: RenderResult,
|
||||
timeoutMs: number = 20_000
|
||||
): Promise<number> => {
|
||||
const startedAt = Date.now();
|
||||
|
||||
while (Date.now() - startedAt < timeoutMs) {
|
||||
const exit = instance.hasExit();
|
||||
if (exit) {
|
||||
return exit.exitCode;
|
||||
}
|
||||
await wait(25);
|
||||
}
|
||||
|
||||
throw new Error('Process did not exit within the expected timeout');
|
||||
};
|
||||
|
||||
export const getHeadCommitSubject = async (gitDir: string): Promise<string> => {
|
||||
const { stdout } = await runGit(['log', '-1', '--pretty=%s'], gitDir);
|
||||
return stdout.trim();
|
||||
};
|
||||
|
||||
export const getHeadCommitMessage = async (gitDir: string): Promise<string> => {
|
||||
const { stdout } = await runGit(['log', '-1', '--pretty=%B'], gitDir);
|
||||
return stdout.trim();
|
||||
};
|
||||
|
||||
export const getHeadCommitFiles = async (gitDir: string): Promise<string[]> => {
|
||||
const { stdout } = await runGit(
|
||||
['diff-tree', '--root', '--no-commit-id', '--name-only', '-r', 'HEAD'],
|
||||
gitDir
|
||||
);
|
||||
|
||||
return stdout
|
||||
.split('\n')
|
||||
.map((file) => file.trim())
|
||||
.filter(Boolean)
|
||||
.sort();
|
||||
};
|
||||
|
||||
export const getShortGitStatus = async (gitDir: string): Promise<string> => {
|
||||
const { stdout } = await runGit(['status', '--short'], gitDir);
|
||||
return stdout.trim();
|
||||
};
|
||||
|
||||
export const getCurrentBranchName = async (gitDir: string): Promise<string> => {
|
||||
const { stdout } = await runGit(['branch', '--show-current'], gitDir);
|
||||
return stdout.trim();
|
||||
};
|
||||
|
||||
export const getRemoteBranchHeadSubject = async (
|
||||
remoteGitDir: string,
|
||||
branchName: string
|
||||
): Promise<string> => {
|
||||
const { stdout = '' } = await fsExecFile(
|
||||
'git',
|
||||
[
|
||||
'--git-dir',
|
||||
remoteGitDir,
|
||||
'log',
|
||||
'-1',
|
||||
'--pretty=%s',
|
||||
`refs/heads/${branchName}`
|
||||
],
|
||||
{ cwd: process.cwd() }
|
||||
);
|
||||
|
||||
return stdout.trim();
|
||||
};
|
||||
|
||||
export const remoteBranchExists = async (
|
||||
remoteGitDir: string,
|
||||
branchName: string
|
||||
): Promise<boolean> => {
|
||||
try {
|
||||
await fsExecFile(
|
||||
'git',
|
||||
[
|
||||
'--git-dir',
|
||||
remoteGitDir,
|
||||
'rev-parse',
|
||||
'--verify',
|
||||
'--quiet',
|
||||
`refs/heads/${branchName}`
|
||||
],
|
||||
{ cwd: process.cwd() }
|
||||
);
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
export const assertHeadCommit = async (
|
||||
gitDir: string,
|
||||
expectedSubject: string
|
||||
): Promise<void> => {
|
||||
expect(await getHeadCommitSubject(gitDir)).toBe(expectedSubject);
|
||||
};
|
||||
|
||||
export const assertGitStatus = async (
|
||||
gitDir: string,
|
||||
expected: string | RegExp
|
||||
): Promise<void> => {
|
||||
const status = await getShortGitStatus(gitDir);
|
||||
if (typeof expected === 'string') {
|
||||
expect(status).toContain(expected);
|
||||
return;
|
||||
}
|
||||
|
||||
expect(status).toMatch(expected);
|
||||
};
|
||||
|
||||
export const startMockOpenAiServer = async (
|
||||
response:
|
||||
| string
|
||||
| ((request: {
|
||||
authorization?: string;
|
||||
body: Record<string, any> | undefined;
|
||||
requestIndex: number;
|
||||
}) => {
|
||||
status?: number;
|
||||
body: Record<string, any>;
|
||||
headers?: Record<string, string>;
|
||||
})
|
||||
): Promise<{
|
||||
authHeaders: string[];
|
||||
requestBodies: Array<Record<string, any>>;
|
||||
baseUrl: string;
|
||||
cleanup: () => Promise<void>;
|
||||
}> => {
|
||||
const authHeaders: string[] = [];
|
||||
const requestBodies: Array<Record<string, any>> = [];
|
||||
|
||||
const server = http.createServer((req, res) => {
|
||||
const authorization = req.headers.authorization;
|
||||
if (authorization) {
|
||||
authHeaders.push(
|
||||
Array.isArray(authorization) ? authorization[0] : authorization
|
||||
);
|
||||
}
|
||||
|
||||
const chunks: Buffer[] = [];
|
||||
req.on('data', (chunk) => {
|
||||
chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk));
|
||||
});
|
||||
req.on('end', () => {
|
||||
const rawBody = Buffer.concat(chunks).toString('utf8');
|
||||
let parsedBody: Record<string, any> | undefined;
|
||||
if (rawBody) {
|
||||
try {
|
||||
parsedBody = JSON.parse(rawBody);
|
||||
requestBodies.push(parsedBody);
|
||||
} catch {
|
||||
requestBodies.push({ rawBody });
|
||||
}
|
||||
}
|
||||
|
||||
if (req.method === 'POST' && req.url?.includes('/chat/completions')) {
|
||||
const payload =
|
||||
typeof response === 'string'
|
||||
? {
|
||||
status: 200,
|
||||
body: {
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
content: response
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
: response({
|
||||
authorization: Array.isArray(authorization)
|
||||
? authorization[0]
|
||||
: authorization,
|
||||
body: parsedBody,
|
||||
requestIndex: requestBodies.length - 1
|
||||
});
|
||||
|
||||
res.writeHead(payload.status ?? 200, {
|
||||
'Content-Type': 'application/json',
|
||||
...payload.headers
|
||||
});
|
||||
res.end(JSON.stringify(payload.body));
|
||||
return;
|
||||
}
|
||||
|
||||
res.writeHead(404, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ error: 'not found' }));
|
||||
});
|
||||
});
|
||||
|
||||
await new Promise<void>((resolve) => {
|
||||
server.listen(0, '127.0.0.1', () => resolve());
|
||||
});
|
||||
|
||||
const { port } = server.address() as AddressInfo;
|
||||
|
||||
return {
|
||||
authHeaders,
|
||||
requestBodies,
|
||||
baseUrl: `http://127.0.0.1:${port}/v1`,
|
||||
cleanup: () =>
|
||||
new Promise<void>((resolve, reject) => {
|
||||
server.close((error) => {
|
||||
if (error) {
|
||||
reject(error);
|
||||
return;
|
||||
}
|
||||
resolve();
|
||||
});
|
||||
})
|
||||
};
|
||||
};
|
||||
|
||||
export const startMockGeminiServer = async (
|
||||
response:
|
||||
| Record<string, any>
|
||||
| ((request: {
|
||||
apiKey?: string;
|
||||
body: Record<string, any> | undefined;
|
||||
requestIndex: number;
|
||||
}) => {
|
||||
status?: number;
|
||||
body: Record<string, any>;
|
||||
headers?: Record<string, string>;
|
||||
})
|
||||
): Promise<{
|
||||
apiKeys: string[];
|
||||
requestBodies: Array<Record<string, any>>;
|
||||
baseUrl: string;
|
||||
cleanup: () => Promise<void>;
|
||||
}> => {
|
||||
const apiKeys: string[] = [];
|
||||
const requestBodies: Array<Record<string, any>> = [];
|
||||
|
||||
const server = http.createServer((req, res) => {
|
||||
const apiKeyHeader = req.headers['x-goog-api-key'];
|
||||
if (apiKeyHeader) {
|
||||
apiKeys.push(
|
||||
Array.isArray(apiKeyHeader) ? apiKeyHeader[0] : apiKeyHeader
|
||||
);
|
||||
}
|
||||
|
||||
const chunks: Buffer[] = [];
|
||||
req.on('data', (chunk) => {
|
||||
chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk));
|
||||
});
|
||||
req.on('end', () => {
|
||||
const rawBody = Buffer.concat(chunks).toString('utf8');
|
||||
let parsedBody: Record<string, any> | undefined;
|
||||
if (rawBody) {
|
||||
try {
|
||||
parsedBody = JSON.parse(rawBody);
|
||||
requestBodies.push(parsedBody);
|
||||
} catch {
|
||||
requestBodies.push({ rawBody });
|
||||
}
|
||||
}
|
||||
|
||||
if (req.method === 'POST' && req.url?.includes(':generateContent')) {
|
||||
const payload =
|
||||
typeof response === 'function'
|
||||
? response({
|
||||
apiKey: Array.isArray(apiKeyHeader)
|
||||
? apiKeyHeader[0]
|
||||
: apiKeyHeader,
|
||||
body: parsedBody,
|
||||
requestIndex: requestBodies.length - 1
|
||||
})
|
||||
: {
|
||||
status: 200,
|
||||
body: response
|
||||
};
|
||||
|
||||
res.writeHead(payload.status ?? 200, {
|
||||
'Content-Type': 'application/json',
|
||||
...payload.headers
|
||||
});
|
||||
res.end(JSON.stringify(payload.body));
|
||||
return;
|
||||
}
|
||||
|
||||
res.writeHead(404, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ error: 'not found' }));
|
||||
});
|
||||
});
|
||||
|
||||
await new Promise<void>((resolve) => {
|
||||
server.listen(0, '127.0.0.1', () => resolve());
|
||||
});
|
||||
|
||||
const { port } = server.address() as AddressInfo;
|
||||
|
||||
return {
|
||||
apiKeys,
|
||||
requestBodies,
|
||||
baseUrl: `http://127.0.0.1:${port}`,
|
||||
cleanup: () =>
|
||||
new Promise<void>((resolve, reject) => {
|
||||
server.close((error) => {
|
||||
if (error) {
|
||||
reject(error);
|
||||
return;
|
||||
}
|
||||
resolve();
|
||||
});
|
||||
})
|
||||
};
|
||||
};
|
||||
|
||||
@@ -6,6 +6,7 @@ import { configure } from 'cli-testing-library';
|
||||
global.jest = jest;
|
||||
|
||||
/**
|
||||
* Adjusted the wait time for waitFor/findByText to 2000ms, because the default 1000ms makes the test results flaky
|
||||
* CLI rendering gets noticeably slower under coverage and on CI, so keep a
|
||||
* slightly roomier timeout than the library default.
|
||||
*/
|
||||
configure({ asyncUtilTimeout: 2000 });
|
||||
configure({ asyncUtilTimeout: 10000 });
|
||||
|
||||
@@ -129,7 +129,8 @@ describe('config', () => {
|
||||
});
|
||||
|
||||
envConfigFile = await generateConfig('.env', {
|
||||
OCO_API_CUSTOM_HEADERS: '{"Authorization": "Bearer token123", "X-Custom-Header": "test-value"}'
|
||||
OCO_API_CUSTOM_HEADERS:
|
||||
'{"Authorization": "Bearer token123", "X-Custom-Header": "test-value"}'
|
||||
});
|
||||
|
||||
const config = getConfig({
|
||||
@@ -138,7 +139,10 @@ describe('config', () => {
|
||||
});
|
||||
|
||||
expect(config).not.toEqual(null);
|
||||
expect(config.OCO_API_CUSTOM_HEADERS).toEqual({"Authorization": "Bearer token123", "X-Custom-Header": "test-value"});
|
||||
expect(config.OCO_API_CUSTOM_HEADERS).toEqual({
|
||||
Authorization: 'Bearer token123',
|
||||
'X-Custom-Header': 'test-value'
|
||||
});
|
||||
|
||||
// No need to parse JSON again since it's already an object
|
||||
const parsedHeaders = config.OCO_API_CUSTOM_HEADERS;
|
||||
@@ -199,6 +203,48 @@ describe('config', () => {
|
||||
expect(config).not.toEqual(null);
|
||||
expect(config.OCO_API_KEY).toEqual(undefined);
|
||||
});
|
||||
|
||||
it('should not create a global config file when only reading defaults', async () => {
|
||||
globalConfigFile = await generateConfig('.opencommit', {});
|
||||
rmSync(globalConfigFile.filePath);
|
||||
|
||||
const config = getConfig({
|
||||
globalPath: globalConfigFile.filePath
|
||||
});
|
||||
|
||||
expect(config.OCO_MODEL).toEqual(DEFAULT_CONFIG.OCO_MODEL);
|
||||
expect(existsSync(globalConfigFile.filePath)).toBe(false);
|
||||
});
|
||||
|
||||
it('should not materialize ambient proxy env vars into OCO_PROXY', async () => {
|
||||
process.env.HTTPS_PROXY = 'http://127.0.0.1:7890';
|
||||
|
||||
globalConfigFile = await generateConfig('.opencommit', {});
|
||||
envConfigFile = await generateConfig('.env', {});
|
||||
|
||||
const config = getConfig({
|
||||
globalPath: globalConfigFile.filePath,
|
||||
envPath: envConfigFile.filePath
|
||||
});
|
||||
|
||||
expect(config.OCO_PROXY).toEqual(undefined);
|
||||
});
|
||||
|
||||
it('should parse OCO_PROXY=null from local .env as explicit disable', async () => {
|
||||
globalConfigFile = await generateConfig('.opencommit', {
|
||||
OCO_PROXY: 'http://global-proxy:8080'
|
||||
});
|
||||
envConfigFile = await generateConfig('.env', {
|
||||
OCO_PROXY: 'null'
|
||||
});
|
||||
|
||||
const config = getConfig({
|
||||
globalPath: globalConfigFile.filePath,
|
||||
envPath: envConfigFile.filePath
|
||||
});
|
||||
|
||||
expect(config.OCO_PROXY).toEqual(null);
|
||||
});
|
||||
});
|
||||
|
||||
describe('setConfig', () => {
|
||||
@@ -325,5 +371,20 @@ describe('config', () => {
|
||||
const fileContent2 = readFileSync(globalConfigFile.filePath, 'utf8');
|
||||
expect(fileContent2).toContain('OCO_MODEL=gpt-4');
|
||||
});
|
||||
|
||||
it('should persist OCO_PROXY=null as an explicit disable', async () => {
|
||||
await setConfig(
|
||||
[[CONFIG_KEYS.OCO_PROXY, null]],
|
||||
globalConfigFile.filePath
|
||||
);
|
||||
|
||||
const config = getConfig({
|
||||
globalPath: globalConfigFile.filePath
|
||||
});
|
||||
const fileContent = readFileSync(globalConfigFile.filePath, 'utf8');
|
||||
|
||||
expect(config.OCO_PROXY).toEqual(null);
|
||||
expect(fileContent).toContain('OCO_PROXY=null');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
29
test/unit/errors.test.ts
Normal file
29
test/unit/errors.test.ts
Normal file
@@ -0,0 +1,29 @@
|
||||
import {
|
||||
formatUserFriendlyError,
|
||||
ServiceUnavailableError
|
||||
} from '../../src/utils/errors';
|
||||
|
||||
describe('formatUserFriendlyError', () => {
|
||||
it('should keep provider wording when no custom API URL is configured', () => {
|
||||
const formatted = formatUserFriendlyError(
|
||||
new ServiceUnavailableError('openai'),
|
||||
'openai'
|
||||
);
|
||||
|
||||
expect(formatted.message).toEqual(
|
||||
'The openai service is temporarily unavailable.'
|
||||
);
|
||||
});
|
||||
|
||||
it('should use configured endpoint wording when a custom API URL is provided', () => {
|
||||
const formatted = formatUserFriendlyError(
|
||||
new ServiceUnavailableError('openai'),
|
||||
'openai',
|
||||
{ baseURL: 'http://127.0.0.1:1234/v1' }
|
||||
);
|
||||
|
||||
expect(formatted.message).toContain('configured API endpoint');
|
||||
expect(formatted.message).toContain('127.0.0.1:1234');
|
||||
expect(formatted.message).not.toContain('openai service');
|
||||
});
|
||||
});
|
||||
@@ -1,96 +1,133 @@
|
||||
import { FinishReason, Outcome } from '@google/generative-ai';
|
||||
import { OpenAI } from 'openai';
|
||||
import { GeminiEngine } from '../../src/engine/gemini';
|
||||
|
||||
import { GenerativeModel, GoogleGenerativeAI } from '@google/generative-ai';
|
||||
import {
|
||||
ConfigType,
|
||||
getConfig,
|
||||
OCO_AI_PROVIDER_ENUM
|
||||
} from '../../src/commands/config';
|
||||
import { OpenAI } from 'openai';
|
||||
|
||||
describe('Gemini', () => {
|
||||
let gemini: GeminiEngine;
|
||||
let mockConfig: ConfigType;
|
||||
let mockGoogleGenerativeAi: GoogleGenerativeAI;
|
||||
let mockGenerativeModel: GenerativeModel;
|
||||
let mockExit: jest.SpyInstance<never, [code?: number | undefined], any>;
|
||||
|
||||
const noop: (...args: any[]) => any = (...args: any[]) => {};
|
||||
|
||||
const mockGemini = () => {
|
||||
mockConfig = getConfig() as ConfigType;
|
||||
|
||||
gemini = new GeminiEngine({
|
||||
apiKey: mockConfig.OCO_API_KEY,
|
||||
model: mockConfig.OCO_MODEL
|
||||
});
|
||||
};
|
||||
|
||||
const oldEnv = process.env;
|
||||
|
||||
beforeEach(() => {
|
||||
jest.resetModules();
|
||||
process.env = { ...oldEnv };
|
||||
|
||||
jest.mock('@google/generative-ai');
|
||||
jest.mock('../src/commands/config');
|
||||
|
||||
jest.mock('@clack/prompts', () => ({
|
||||
intro: jest.fn(),
|
||||
outro: jest.fn()
|
||||
}));
|
||||
|
||||
mockExit = jest.spyOn(process, 'exit').mockImplementation();
|
||||
|
||||
mockConfig = getConfig() as ConfigType;
|
||||
|
||||
mockConfig.OCO_AI_PROVIDER = OCO_AI_PROVIDER_ENUM.GEMINI;
|
||||
mockConfig.OCO_API_KEY = 'mock-api-key';
|
||||
mockConfig.OCO_MODEL = 'gemini-1.5-flash';
|
||||
|
||||
mockGoogleGenerativeAi = new GoogleGenerativeAI(mockConfig.OCO_API_KEY);
|
||||
mockGenerativeModel = mockGoogleGenerativeAi.getGenerativeModel({
|
||||
model: mockConfig.OCO_MODEL
|
||||
});
|
||||
describe('GeminiEngine', () => {
|
||||
it('maps OpenAI-style chat messages into Gemini request payloads and ignores non-text parts', async () => {
|
||||
const engine = new GeminiEngine({
|
||||
apiKey: 'mock-api-key',
|
||||
model: 'gemini-1.5-flash',
|
||||
baseURL: 'http://127.0.0.1:8080/v1',
|
||||
maxTokensOutput: 256,
|
||||
maxTokensInput: 4096
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
gemini = undefined as any;
|
||||
const generateContent = jest.fn().mockResolvedValue({
|
||||
response: {
|
||||
candidates: [
|
||||
{
|
||||
index: 0,
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: [
|
||||
{
|
||||
text: 'feat(gemini): translate the diff<think>hidden</think>'
|
||||
},
|
||||
{
|
||||
executableCode: {
|
||||
language: 'python',
|
||||
code: 'print("hidden")'
|
||||
}
|
||||
},
|
||||
{
|
||||
codeExecutionResult: {
|
||||
outcome: Outcome.OUTCOME_OK,
|
||||
output: 'hidden'
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
finishReason: FinishReason.STOP
|
||||
}
|
||||
]
|
||||
}
|
||||
});
|
||||
const getGenerativeModel = jest.fn().mockReturnValue({
|
||||
generateContent
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
mockExit.mockRestore();
|
||||
process.env = oldEnv;
|
||||
});
|
||||
|
||||
it.skip('should exit process if OCO_GEMINI_API_KEY is not set and command is not config', () => {
|
||||
process.env.OCO_GEMINI_API_KEY = undefined;
|
||||
process.env.OCO_AI_PROVIDER = 'gemini';
|
||||
|
||||
mockGemini();
|
||||
|
||||
expect(mockExit).toHaveBeenCalledWith(1);
|
||||
});
|
||||
|
||||
it('should generate commit message', async () => {
|
||||
const mockGenerateContent = jest
|
||||
.fn()
|
||||
.mockResolvedValue({ response: { text: () => 'generated content' } });
|
||||
mockGenerativeModel.generateContent = mockGenerateContent;
|
||||
|
||||
mockGemini();
|
||||
engine.client = {
|
||||
getGenerativeModel
|
||||
} as any;
|
||||
|
||||
const messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam> =
|
||||
[
|
||||
{ role: 'system', content: 'system message' },
|
||||
{ role: 'assistant', content: 'assistant message' }
|
||||
{ role: 'assistant', content: 'assistant guidance' },
|
||||
{ role: 'user', content: 'diff --git a/file b/file' }
|
||||
];
|
||||
|
||||
jest
|
||||
.spyOn(gemini, 'generateCommitMessage')
|
||||
.mockImplementation(async () => 'generated content');
|
||||
const result = await gemini.generateCommitMessage(messages);
|
||||
const result = await engine.generateCommitMessage(messages);
|
||||
|
||||
expect(result).toEqual('generated content');
|
||||
expect(result).toEqual('feat(gemini): translate the diff');
|
||||
expect(getGenerativeModel).toHaveBeenCalledWith(
|
||||
{
|
||||
model: 'gemini-1.5-flash',
|
||||
systemInstruction: 'system message'
|
||||
},
|
||||
{
|
||||
baseUrl: 'http://127.0.0.1:8080/v1'
|
||||
}
|
||||
);
|
||||
expect(generateContent).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
contents: [
|
||||
{
|
||||
parts: [{ text: 'assistant guidance' }],
|
||||
role: 'model'
|
||||
},
|
||||
{
|
||||
parts: [{ text: 'diff --git a/file b/file' }],
|
||||
role: 'user'
|
||||
}
|
||||
],
|
||||
generationConfig: expect.objectContaining({
|
||||
maxOutputTokens: 256,
|
||||
temperature: 0,
|
||||
topP: 0.1
|
||||
})
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('fails when Gemini reports a blocked finish reason', async () => {
|
||||
const engine = new GeminiEngine({
|
||||
apiKey: 'mock-api-key',
|
||||
model: 'gemini-1.5-flash',
|
||||
baseURL: 'http://127.0.0.1:8080/v1',
|
||||
maxTokensOutput: 256,
|
||||
maxTokensInput: 4096
|
||||
});
|
||||
|
||||
const generateContent = jest.fn().mockResolvedValue({
|
||||
response: {
|
||||
candidates: [
|
||||
{
|
||||
index: 0,
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: [{ text: 'feat(gemini): should not pass' }]
|
||||
},
|
||||
finishReason: FinishReason.LANGUAGE,
|
||||
finishMessage: 'Unsupported language'
|
||||
}
|
||||
]
|
||||
}
|
||||
});
|
||||
|
||||
engine.client = {
|
||||
getGenerativeModel: jest.fn().mockReturnValue({
|
||||
generateContent
|
||||
})
|
||||
} as any;
|
||||
|
||||
await expect(
|
||||
engine.generateCommitMessage([
|
||||
{ role: 'system', content: 'system message' },
|
||||
{ role: 'user', content: 'diff --git a/file b/file' }
|
||||
])
|
||||
).rejects.toThrow(
|
||||
'Gemini response was blocked due to LANGUAGE: Unsupported language'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
64
test/unit/ollama.test.ts
Normal file
64
test/unit/ollama.test.ts
Normal file
@@ -0,0 +1,64 @@
|
||||
import { OllamaEngine } from '../../src/engine/ollama';
|
||||
|
||||
describe('OllamaEngine', () => {
|
||||
it('sends think=false when configured', async () => {
|
||||
const engine = new OllamaEngine({
|
||||
apiKey: 'ollama',
|
||||
model: 'qwen3.5:2b',
|
||||
maxTokensOutput: 500,
|
||||
maxTokensInput: 4096,
|
||||
ollamaThink: false
|
||||
});
|
||||
|
||||
const post = jest.fn().mockResolvedValue({
|
||||
data: {
|
||||
message: {
|
||||
content: 'feat: add support for ollama think config'
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
engine.client = { post } as any;
|
||||
|
||||
await engine.generateCommitMessage([
|
||||
{ role: 'user', content: 'diff --git a/file b/file' }
|
||||
]);
|
||||
|
||||
expect(post).toHaveBeenCalledWith(
|
||||
'http://localhost:11434/api/chat',
|
||||
expect.objectContaining({
|
||||
think: false
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('omits think when not configured', async () => {
|
||||
const engine = new OllamaEngine({
|
||||
apiKey: 'ollama',
|
||||
model: 'qwen3.5:2b',
|
||||
maxTokensOutput: 500,
|
||||
maxTokensInput: 4096
|
||||
});
|
||||
|
||||
const post = jest.fn().mockResolvedValue({
|
||||
data: {
|
||||
message: {
|
||||
content: 'feat: add support for ollama think config'
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
engine.client = { post } as any;
|
||||
|
||||
await engine.generateCommitMessage([
|
||||
{ role: 'user', content: 'diff --git a/file b/file' }
|
||||
]);
|
||||
|
||||
expect(post).toHaveBeenCalledWith(
|
||||
'http://localhost:11434/api/chat',
|
||||
expect.not.objectContaining({
|
||||
think: expect.anything()
|
||||
})
|
||||
);
|
||||
});
|
||||
});
|
||||
71
test/unit/openAi.test.ts
Normal file
71
test/unit/openAi.test.ts
Normal file
@@ -0,0 +1,71 @@
|
||||
import { OpenAI } from 'openai';
|
||||
import { OpenAiEngine } from '../../src/engine/openAi';
|
||||
|
||||
describe('OpenAiEngine', () => {
|
||||
const baseConfig = {
|
||||
apiKey: 'test-openai-key',
|
||||
maxTokensInput: 4096,
|
||||
maxTokensOutput: 256
|
||||
};
|
||||
|
||||
const messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam> = [
|
||||
{ role: 'system', content: 'system message' },
|
||||
{ role: 'user', content: 'diff --git a/file b/file' }
|
||||
];
|
||||
|
||||
it('uses max_completion_tokens for reasoning models', async () => {
|
||||
const engine = new OpenAiEngine({
|
||||
...baseConfig,
|
||||
model: 'o3-mini'
|
||||
});
|
||||
|
||||
const create = jest
|
||||
.spyOn(engine.client.chat.completions, 'create')
|
||||
.mockResolvedValue({
|
||||
choices: [{ message: { content: 'feat(openai): reasoning path' } }]
|
||||
} as any);
|
||||
|
||||
await engine.generateCommitMessage(messages);
|
||||
|
||||
expect(create).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
model: 'o3-mini',
|
||||
max_completion_tokens: 256
|
||||
})
|
||||
);
|
||||
expect(create).toHaveBeenCalledWith(
|
||||
expect.not.objectContaining({
|
||||
max_tokens: expect.anything()
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('uses max_tokens and sampling params for non-reasoning models', async () => {
|
||||
const engine = new OpenAiEngine({
|
||||
...baseConfig,
|
||||
model: 'gpt-4o-mini'
|
||||
});
|
||||
|
||||
const create = jest
|
||||
.spyOn(engine.client.chat.completions, 'create')
|
||||
.mockResolvedValue({
|
||||
choices: [{ message: { content: 'feat(openai): standard path' } }]
|
||||
} as any);
|
||||
|
||||
await engine.generateCommitMessage(messages);
|
||||
|
||||
expect(create).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
model: 'gpt-4o-mini',
|
||||
max_tokens: 256,
|
||||
temperature: 0,
|
||||
top_p: 0.1
|
||||
})
|
||||
);
|
||||
expect(create).toHaveBeenCalledWith(
|
||||
expect.not.objectContaining({
|
||||
max_completion_tokens: expect.anything()
|
||||
})
|
||||
);
|
||||
});
|
||||
});
|
||||
126
test/unit/proxy.test.ts
Normal file
126
test/unit/proxy.test.ts
Normal file
@@ -0,0 +1,126 @@
|
||||
import axios from 'axios';
|
||||
import { getGlobalDispatcher } from 'undici';
|
||||
import { AnthropicEngine } from '../../src/engine/anthropic';
|
||||
import { OpenAiEngine } from '../../src/engine/openAi';
|
||||
import { resolveProxy, setupProxy } from '../../src/utils/proxy';
|
||||
|
||||
describe('proxy utilities', () => {
|
||||
const originalEnv = { ...process.env };
|
||||
const originalAxiosProxy = axios.defaults.proxy;
|
||||
const originalAxiosHttpAgent = axios.defaults.httpAgent;
|
||||
const originalAxiosHttpsAgent = axios.defaults.httpsAgent;
|
||||
|
||||
function resetEnv(env: NodeJS.ProcessEnv) {
|
||||
Object.keys(process.env).forEach((key) => {
|
||||
if (!(key in env)) {
|
||||
delete process.env[key];
|
||||
} else {
|
||||
process.env[key] = env[key];
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
resetEnv(originalEnv);
|
||||
setupProxy(undefined);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
resetEnv(originalEnv);
|
||||
setupProxy(undefined);
|
||||
axios.defaults.proxy = originalAxiosProxy;
|
||||
axios.defaults.httpAgent = originalAxiosHttpAgent;
|
||||
axios.defaults.httpsAgent = originalAxiosHttpsAgent;
|
||||
});
|
||||
|
||||
it('should prefer an explicit proxy URL over ambient proxy env vars', () => {
|
||||
process.env.HTTPS_PROXY = 'http://ambient-proxy:8080';
|
||||
|
||||
expect(resolveProxy('http://explicit-proxy:8080')).toEqual(
|
||||
'http://explicit-proxy:8080'
|
||||
);
|
||||
});
|
||||
|
||||
it('should return null when proxy is explicitly disabled', () => {
|
||||
process.env.HTTPS_PROXY = 'http://ambient-proxy:8080';
|
||||
|
||||
expect(resolveProxy(null)).toEqual(null);
|
||||
});
|
||||
|
||||
it('should fall back to ambient proxy env vars when proxy is unset', () => {
|
||||
process.env.HTTPS_PROXY = 'http://ambient-proxy:8080';
|
||||
|
||||
expect(resolveProxy(undefined)).toEqual('http://ambient-proxy:8080');
|
||||
});
|
||||
|
||||
it('should disable proxy usage when setupProxy receives null', () => {
|
||||
process.env.HTTPS_PROXY = 'http://ambient-proxy:8080';
|
||||
|
||||
setupProxy(null);
|
||||
|
||||
expect(getGlobalDispatcher().constructor.name).toEqual('Agent');
|
||||
expect(axios.defaults.proxy).toEqual(false);
|
||||
expect(axios.defaults.httpAgent).toBeUndefined();
|
||||
expect(axios.defaults.httpsAgent).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should install proxy agents when setupProxy receives a proxy URL', () => {
|
||||
setupProxy('http://127.0.0.1:7890');
|
||||
|
||||
expect(getGlobalDispatcher().constructor.name).toEqual('ProxyAgent');
|
||||
expect(axios.defaults.proxy).toEqual(false);
|
||||
expect(axios.defaults.httpAgent).toBeDefined();
|
||||
expect(axios.defaults.httpsAgent).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('engine proxy handling', () => {
|
||||
const originalEnv = { ...process.env };
|
||||
const baseConfig = {
|
||||
apiKey: 'test-key',
|
||||
model: 'gpt-4o-mini',
|
||||
maxTokensInput: 4096,
|
||||
maxTokensOutput: 256
|
||||
};
|
||||
|
||||
function resetEnv(env: NodeJS.ProcessEnv) {
|
||||
Object.keys(process.env).forEach((key) => {
|
||||
if (!(key in env)) {
|
||||
delete process.env[key];
|
||||
} else {
|
||||
process.env[key] = env[key];
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
resetEnv(originalEnv);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
resetEnv(originalEnv);
|
||||
});
|
||||
|
||||
it('should not let OpenAI engine re-read proxy env vars when proxy is unset', () => {
|
||||
process.env.HTTPS_PROXY = 'http://ambient-proxy:8080';
|
||||
|
||||
const engine = new OpenAiEngine({
|
||||
...baseConfig,
|
||||
proxy: undefined
|
||||
});
|
||||
|
||||
expect(engine.client.httpAgent).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should not let Anthropic engine re-read proxy env vars when proxy is unset', () => {
|
||||
process.env.HTTPS_PROXY = 'http://ambient-proxy:8080';
|
||||
|
||||
const engine = new AnthropicEngine({
|
||||
...baseConfig,
|
||||
model: 'claude-sonnet-4-20250514',
|
||||
proxy: undefined
|
||||
});
|
||||
|
||||
expect(engine.client.httpAgent).toBeUndefined();
|
||||
});
|
||||
});
|
||||
@@ -8,7 +8,8 @@ describe('removeContentTags', () => {
|
||||
});
|
||||
|
||||
it('should handle multiple tag occurrences', () => {
|
||||
const content = '<think>hidden</think> visible <think>also hidden</think> text';
|
||||
const content =
|
||||
'<think>hidden</think> visible <think>also hidden</think> text';
|
||||
const result = removeContentTags(content, 'think');
|
||||
expect(result).toBe('visible text');
|
||||
});
|
||||
@@ -26,7 +27,8 @@ describe('removeContentTags', () => {
|
||||
});
|
||||
|
||||
it('should work with different tag names', () => {
|
||||
const content = 'This is <custom>something to hide</custom> visible content';
|
||||
const content =
|
||||
'This is <custom>something to hide</custom> visible content';
|
||||
const result = removeContentTags(content, 'custom');
|
||||
expect(result).toBe('This is visible content');
|
||||
});
|
||||
|
||||
Reference in New Issue
Block a user