Compare commits

...

19 Commits

Author SHA1 Message Date
di-sukharev
a7fd0d8237 3.2.17 2026-04-03 19:06:14 +03:00
di-sukharev
6cb67e5150 build 2026-04-03 19:05:33 +03:00
di-sukharev
62129503b3 3.2.16 2026-04-03 19:05:31 +03:00
di-sukharev
f81e836f34 Merge branch 'master' of github.com:di-sukharev/opencommit 2026-04-03 19:05:26 +03:00
di-sukharev
c3d1fb379f build 2026-04-03 19:04:25 +03:00
GPT8
a9c9bcfd5a Merge pull request #547 from keith666666/fix/ollama-url-resolution
fix(engine): fix broken URL resolution in Ollama and MLX engines
2026-04-03 19:01:02 +03:00
keith666666
0ee82f7430 fix(engine): fix broken URL resolution in Ollama and MLX engines
Both OllamaEngine and MLXEngine had two bugs in URL construction:

1. `axios.create({url: ...})` was used instead of `baseURL`, but `url`
   in axios config sets a default request URL - not a base prefix. This
   caused the URL to be ignored when `.post()` was called with a path.

2. `this.client.getUri(this.config)` was used to resolve the POST URL,
   but passing the engine config (which contains non-axios properties
   like `apiKey`, `model`, etc.) produced malformed URLs. When
   `apiKey` is null (the default for Ollama), the URL resolved to
   `http://localhost:11434/null`, returning HTTP 405.

Fix: construct the full endpoint URL once in the constructor and pass
it directly to `axios.post()`, matching how FlowiseEngine already works.

Co-Authored-By: Claude <noreply@anthropic.com>
2026-04-02 10:52:32 +08:00
GPT8
9923dab532 Merge pull request #544 from majiayu000/fix/issue-529-max-completion-tokens
fix: use max_completion_tokens for reasoning models (o1/o3/o4/gpt-5)
2026-03-31 11:28:34 +03:00
majiayu000
f74ba2dfc6 fix: resolve CI failures — revert gemini test mock path and fix prettier formatting
Signed-off-by: majiayu000 <1835304752@qq.com>
2026-03-30 00:55:06 +08:00
majiayu000
53414438d1 fix: resolve unit test failures
- Add chalk to jest transformIgnorePatterns so ESM chalk import works
- Fix wrong mock path in gemini.test.ts (../src -> ../../src)

Signed-off-by: majiayu000 <1835304752@qq.com>
2026-03-30 00:54:48 +08:00
majiayu000
6982e76cf5 fix: improve type safety for max_completion_tokens params
Remove Record<string, unknown> type annotation to let TypeScript infer
the params object type, preserving type checking on all properties.
Cast to ChatCompletionCreateParamsNonStreaming at the create() call site
to accommodate the SDK's missing max_completion_tokens type. Add unit
test for reasoning model detection regex.

Signed-off-by: majiayu000 <1835304752@qq.com>
2026-03-30 00:54:48 +08:00
majiayu000
dc7f7f6552 fix: use max_completion_tokens for reasoning models in OpenAI engine
Newer OpenAI models (o1, o3, o4, gpt-5 series) reject the max_tokens
parameter and require max_completion_tokens instead. These reasoning
models also do not support temperature and top_p parameters.

Conditionally set the correct token parameter and omit unsupported
sampling parameters based on the model name.

Fixes #529

Signed-off-by: majiayu000 <1835304752@qq.com>
2026-03-30 00:54:44 +08:00
GPT8
db8a22b0cb Merge pull request #546 from freedomsky11/feat/proxy-support
feat: add universal proxy support and fix Gemini model resolution (#536)
2026-03-29 19:20:36 +03:00
sky
e27007b6fe feat(proxy): add universal proxy support and fix Gemini model resolution (#536)
Integrated undici ProxyAgent for native fetch and HttpsProxyAgent for axios/openai/anthropic. Upgraded @google/generative-ai to fix #536. Added OCO_PROXY config.

Co-authored-by: uni <uni@hanwei.ink>
2026-03-29 14:54:45 +00:00
GPT8
f51393e37a Merge pull request #545 from majiayu000/fix/issue-493-rebase-slow-hook
fix: skip migrations and version check when called as git hook
2026-03-27 15:27:05 +03:00
majiayu000
83f9193749 fix: stabilize e2e flow in clean CI env
Signed-off-by: majiayu000 <majiayu000@users.noreply.github.com>
2026-03-27 17:19:31 +08:00
majiayu000
bc608e97bd fix: skip migrations and version check when called as git hook
Move isHookCalled() check before runMigrations() and
checkIsLatestVersion() so that during git rebase, each pick commit
exits immediately without expensive I/O and network calls.

Also adds missing await on prepareCommitMessageHook() to properly
handle async errors.

Closes #493

Signed-off-by: majiayu000 <1835304752@qq.com>
2026-03-21 10:59:24 +08:00
GPT8
40182f26b3 Merge pull request #542 from ymrdf/master
Fix: Allow OCO_API_URL to override DeepSeek engine baseURL
2026-03-14 20:02:34 +03:00
gaozhenqian
62d56a5278 Fix: Allow OCO_API_URL to override DeepSeek engine baseURL
- Move hardcoded baseURL before ...config spread in constructor
- This allows user config to override the default DeepSeek API URL
- Fixes issue #539 where OCO_API_URL was ignored by DeepSeek engine
2026-03-12 14:22:45 +08:00
29 changed files with 18592 additions and 90736 deletions

1
.gitignore vendored
View File

@@ -1,4 +1,5 @@
node_modules/
out/
coverage/
temp/
build/

View File

@@ -237,6 +237,16 @@ oco config set OCO_AI_PROVIDER=flowise OCO_API_KEY=<your_flowise_api_key> OCO_AP
oco config set OCO_AI_PROVIDER=ollama OCO_API_KEY=<your_ollama_api_key> OCO_API_URL=<your_ollama_endpoint>
```
### Use with Proxy
If you are behind a proxy, you can set it in the config:
```sh
oco config set OCO_PROXY=http://127.0.0.1:7890
```
Or it will automatically use `HTTPS_PROXY` or `HTTP_PROXY` environment variables.
### Locale configuration
To globally specify the language used to generate commit messages:

View File

@@ -19,7 +19,7 @@ const config: Config = {
'<rootDir>/test/e2e/prompt-module/data/'
],
transformIgnorePatterns: [
'node_modules/(?!(cli-testing-library|@clack|cleye)/.*)'
'node_modules/(?!(cli-testing-library|@clack|cleye|chalk)/.*)'
],
transform: {
'^.+\\.(ts|tsx|js|jsx|mjs)$': [

19723
out/cli.cjs

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

Binary file not shown.

48
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "opencommit",
"version": "3.2.15",
"version": "3.2.17",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "opencommit",
"version": "3.2.15",
"version": "3.2.17",
"license": "MIT",
"dependencies": {
"@actions/core": "^1.10.0",
@@ -16,7 +16,7 @@
"@azure/openai": "^1.0.0-beta.12",
"@clack/prompts": "^0.6.1",
"@dqbd/tiktoken": "^1.0.2",
"@google/generative-ai": "^0.11.4",
"@google/generative-ai": "^0.24.1",
"@mistralai/mistralai": "^1.3.5",
"@octokit/webhooks-schemas": "^6.11.0",
"@octokit/webhooks-types": "^6.11.0",
@@ -25,6 +25,7 @@
"cleye": "^1.3.2",
"crypto": "^1.0.1",
"execa": "^7.0.0",
"https-proxy-agent": "^8.0.0",
"ignore": "^5.2.4",
"ini": "^3.0.1",
"inquirer": "^9.1.4",
@@ -185,6 +186,19 @@
"node": ">=18.0.0"
}
},
"node_modules/@azure/core-rest-pipeline/node_modules/https-proxy-agent": {
"version": "7.0.6",
"resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz",
"integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==",
"license": "MIT",
"dependencies": {
"agent-base": "^7.1.2",
"debug": "4"
},
"engines": {
"node": ">= 14"
}
},
"node_modules/@azure/core-sse": {
"version": "2.1.2",
"license": "MIT",
@@ -1395,7 +1409,9 @@
}
},
"node_modules/@google/generative-ai": {
"version": "0.11.4",
"version": "0.24.1",
"resolved": "https://registry.npmjs.org/@google/generative-ai/-/generative-ai-0.24.1.tgz",
"integrity": "sha512-MqO+MLfM6kjxcKoy0p1wRzG3b4ZZXtPI+z2IE26UogS2Cm/XHO+7gGRBh6gcJsOiIVoH93UwKvW4HdgiOZCy9Q==",
"license": "Apache-2.0",
"engines": {
"node": ">=18.0.0"
@@ -2921,11 +2937,10 @@
}
},
"node_modules/agent-base": {
"version": "7.1.1",
"version": "7.1.4",
"resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz",
"integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==",
"license": "MIT",
"dependencies": {
"debug": "^4.3.4"
},
"engines": {
"node": ">= 14"
}
@@ -4770,16 +4785,27 @@
}
},
"node_modules/https-proxy-agent": {
"version": "7.0.4",
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-8.0.0.tgz",
"integrity": "sha512-YYeW+iCnAS3xhvj2dvVoWgsbca3RfQy/IlaNHHOtDmU0jMqPI9euIq3Y9BJETdxk16h9NHHCKqp/KB9nIMStCQ==",
"license": "MIT",
"dependencies": {
"agent-base": "^7.0.2",
"debug": "4"
"agent-base": "8.0.0",
"debug": "^4.3.4"
},
"engines": {
"node": ">= 14"
}
},
"node_modules/https-proxy-agent/node_modules/agent-base": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/agent-base/-/agent-base-8.0.0.tgz",
"integrity": "sha512-QT8i0hCz6C/KQ+KTAbSNwCHDGdmUJl2tp2ZpNlGSWCfhUNVbYG2WLE3MdZGBAgXPV4GAvjGMxo+C1hroyxmZEg==",
"license": "MIT",
"engines": {
"node": ">= 14"
}
},
"node_modules/human-signals": {
"version": "4.3.1",
"license": "Apache-2.0",

View File

@@ -1,6 +1,6 @@
{
"name": "opencommit",
"version": "3.2.15",
"version": "3.2.17",
"description": "Auto-generate impressive commits in 1 second. Killing lame commits with AI 🤯🔫",
"keywords": [
"git",
@@ -89,7 +89,7 @@
"@azure/openai": "^1.0.0-beta.12",
"@clack/prompts": "^0.6.1",
"@dqbd/tiktoken": "^1.0.2",
"@google/generative-ai": "^0.11.4",
"@google/generative-ai": "^0.24.1",
"@mistralai/mistralai": "^1.3.5",
"@octokit/webhooks-schemas": "^6.11.0",
"@octokit/webhooks-types": "^6.11.0",
@@ -98,6 +98,7 @@
"cleye": "^1.3.2",
"crypto": "^1.0.1",
"execa": "^7.0.0",
"https-proxy-agent": "^8.0.0",
"ignore": "^5.2.4",
"ini": "^3.0.1",
"inquirer": "^9.1.4",

View File

@@ -5,9 +5,10 @@ import { cli } from 'cleye';
import packageJSON from '../package.json';
import { commit } from './commands/commit';
import { commitlintConfigCommand } from './commands/commitlint';
import { configCommand } from './commands/config';
import { configCommand, getConfig } from './commands/config';
import { hookCommand, isHookCalled } from './commands/githook.js';
import { prepareCommitMessageHook } from './commands/prepare-commit-msg-hook';
import { setupProxy } from './utils/proxy';
import {
setupCommand,
isFirstRun,
@@ -18,13 +19,22 @@ import { modelsCommand } from './commands/models';
import { checkIsLatestVersion } from './utils/checkIsLatestVersion';
import { runMigrations } from './migrations/_run.js';
const config = getConfig();
setupProxy(config.OCO_PROXY);
const extraArgs = process.argv.slice(2);
cli(
{
version: packageJSON.version,
name: 'opencommit',
commands: [configCommand, hookCommand, commitlintConfigCommand, setupCommand, modelsCommand],
commands: [
configCommand,
hookCommand,
commitlintConfigCommand,
setupCommand,
modelsCommand
],
flags: {
fgm: {
type: Boolean,
@@ -48,28 +58,29 @@ cli(
help: { description: packageJSON.description }
},
async ({ flags }) => {
if (await isHookCalled()) {
await prepareCommitMessageHook();
return;
}
await runMigrations();
await checkIsLatestVersion();
if (await isHookCalled()) {
prepareCommitMessageHook();
} else {
// Check for first run and trigger setup wizard
if (isFirstRun()) {
const setupComplete = await runSetup();
if (!setupComplete) {
process.exit(1);
}
}
// Check for missing API key and prompt if needed
const hasApiKey = await promptForMissingApiKey();
if (!hasApiKey) {
// Check for first run and trigger setup wizard
if (isFirstRun()) {
const setupComplete = await runSetup();
if (!setupComplete) {
process.exit(1);
}
commit(extraArgs, flags.context, false, flags.fgm, flags.yes);
}
// Check for missing API key and prompt if needed
const hasApiKey = await promptForMissingApiKey();
if (!hasApiKey) {
process.exit(1);
}
commit(extraArgs, flags.context, false, flags.fgm, flags.yes);
},
extraArgs
);

View File

@@ -11,10 +11,7 @@ import {
import chalk from 'chalk';
import { execa } from 'execa';
import { generateCommitMessageByDiff } from '../generateCommitMessageFromGitDiff';
import {
formatUserFriendlyError,
printFormattedError
} from '../utils/errors';
import { formatUserFriendlyError, printFormattedError } from '../utils/errors';
import {
assertGitRepo,
getChangedFiles,

View File

@@ -25,6 +25,7 @@ export enum CONFIG_KEYS {
OCO_ONE_LINE_COMMIT = 'OCO_ONE_LINE_COMMIT',
OCO_TEST_MOCK_TYPE = 'OCO_TEST_MOCK_TYPE',
OCO_API_URL = 'OCO_API_URL',
OCO_PROXY = 'OCO_PROXY',
OCO_API_CUSTOM_HEADERS = 'OCO_API_CUSTOM_HEADERS',
OCO_OMIT_SCOPE = 'OCO_OMIT_SCOPE',
OCO_GITPUSH = 'OCO_GITPUSH', // todo: deprecate
@@ -727,6 +728,15 @@ export const configValidators = {
return value;
},
[CONFIG_KEYS.OCO_PROXY](value: any) {
validateConfig(
CONFIG_KEYS.OCO_PROXY,
typeof value === 'string',
`${value} is not a valid URL. It should start with 'http://' or 'https://'.`
);
return value;
},
[CONFIG_KEYS.OCO_MODEL](value: any, config: any = {}) {
validateConfig(
CONFIG_KEYS.OCO_MODEL,
@@ -849,7 +859,8 @@ export enum OCO_AI_PROVIDER_ENUM {
export const PROVIDER_API_KEY_URLS: Record<string, string | null> = {
[OCO_AI_PROVIDER_ENUM.OPENAI]: 'https://platform.openai.com/api-keys',
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]: 'https://console.anthropic.com/settings/keys',
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]:
'https://console.anthropic.com/settings/keys',
[OCO_AI_PROVIDER_ENUM.GEMINI]: 'https://aistudio.google.com/app/apikey',
[OCO_AI_PROVIDER_ENUM.GROQ]: 'https://console.groq.com/keys',
[OCO_AI_PROVIDER_ENUM.MISTRAL]: 'https://console.mistral.ai/api-keys/',
@@ -872,13 +883,14 @@ export const RECOMMENDED_MODELS: Record<string, string> = {
[OCO_AI_PROVIDER_ENUM.DEEPSEEK]: 'deepseek-chat',
[OCO_AI_PROVIDER_ENUM.OPENROUTER]: 'openai/gpt-4o-mini',
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'gpt-4o-mini'
}
};
export type ConfigType = {
[CONFIG_KEYS.OCO_API_KEY]?: string;
[CONFIG_KEYS.OCO_TOKENS_MAX_INPUT]: number;
[CONFIG_KEYS.OCO_TOKENS_MAX_OUTPUT]: number;
[CONFIG_KEYS.OCO_API_URL]?: string;
[CONFIG_KEYS.OCO_PROXY]?: string;
[CONFIG_KEYS.OCO_API_CUSTOM_HEADERS]?: string;
[CONFIG_KEYS.OCO_DESCRIPTION]: boolean;
[CONFIG_KEYS.OCO_EMOJI]: boolean;
@@ -963,6 +975,10 @@ const getEnvConfig = (envPath: string) => {
return {
OCO_MODEL: process.env.OCO_MODEL,
OCO_API_URL: process.env.OCO_API_URL,
OCO_PROXY:
process.env.OCO_PROXY ||
process.env.HTTPS_PROXY ||
process.env.HTTP_PROXY,
OCO_API_KEY: process.env.OCO_API_KEY,
OCO_API_CUSTOM_HEADERS: process.env.OCO_API_CUSTOM_HEADERS,
OCO_AI_PROVIDER: process.env.OCO_AI_PROVIDER as OCO_AI_PROVIDER_ENUM,
@@ -1188,6 +1204,11 @@ function getConfigKeyDetails(key) {
'Custom API URL - may be used to set proxy path to OpenAI API',
values: ["URL string (must start with 'http://' or 'https://')"]
};
case CONFIG_KEYS.OCO_PROXY:
return {
description: 'HTTP/HTTPS Proxy URL',
values: ["URL string (must start with 'http://' or 'https://')"]
};
case CONFIG_KEYS.OCO_MESSAGE_TEMPLATE_PLACEHOLDER:
return {
description: 'Message template placeholder',

View File

@@ -2,11 +2,7 @@ import { intro, outro, spinner } from '@clack/prompts';
import chalk from 'chalk';
import { command } from 'cleye';
import { COMMANDS } from './ENUMS';
import {
MODEL_LIST,
OCO_AI_PROVIDER_ENUM,
getConfig
} from './config';
import { MODEL_LIST, OCO_AI_PROVIDER_ENUM, getConfig } from './config';
import {
fetchModelsForProvider,
clearModelCache,
@@ -31,7 +27,10 @@ function formatCacheAge(timestamp: number | null): string {
return 'just now';
}
async function listModels(provider: string, useCache: boolean = true): Promise<void> {
async function listModels(
provider: string,
useCache: boolean = true
): Promise<void> {
const config = getConfig();
const apiKey = config.OCO_API_KEY;
const currentModel = config.OCO_MODEL;
@@ -52,7 +51,9 @@ async function listModels(provider: string, useCache: boolean = true): Promise<v
models = MODEL_LIST[providerKey] || [];
}
console.log(`\n${chalk.bold('Available models for')} ${chalk.cyan(provider)}:\n`);
console.log(
`\n${chalk.bold('Available models for')} ${chalk.cyan(provider)}:\n`
);
if (models.length === 0) {
console.log(chalk.dim(' No models found'));
@@ -79,14 +80,23 @@ async function refreshModels(provider: string): Promise<void> {
clearModelCache();
try {
const models = await fetchModelsForProvider(provider, apiKey, undefined, true);
const models = await fetchModelsForProvider(
provider,
apiKey,
undefined,
true
);
loadingSpinner.stop(`${chalk.green('+')} Fetched ${models.length} models`);
// List the models
await listModels(provider, true);
} catch (error) {
loadingSpinner.stop(chalk.red('Failed to fetch models'));
console.error(chalk.red(`Error: ${error instanceof Error ? error.message : 'Unknown error'}`));
console.error(
chalk.red(
`Error: ${error instanceof Error ? error.message : 'Unknown error'}`
)
);
}
}
@@ -112,7 +122,8 @@ export const modelsCommand = command(
},
async ({ flags }) => {
const config = getConfig();
const provider = flags.provider || config.OCO_AI_PROVIDER || OCO_AI_PROVIDER_ENUM.OPENAI;
const provider =
flags.provider || config.OCO_AI_PROVIDER || OCO_AI_PROVIDER_ENUM.OPENAI;
intro(chalk.bgCyan(' OpenCommit Models '));
@@ -120,7 +131,9 @@ export const modelsCommand = command(
const cacheInfo = getCacheInfo();
if (cacheInfo.timestamp) {
console.log(
chalk.dim(` Cache last updated: ${formatCacheAge(cacheInfo.timestamp)}`)
chalk.dim(
` Cache last updated: ${formatCacheAge(cacheInfo.timestamp)}`
)
);
if (cacheInfo.providers.length > 0) {
console.log(
@@ -137,8 +150,6 @@ export const modelsCommand = command(
await listModels(provider);
}
outro(
`Run ${chalk.cyan('oco models --refresh')} to update the model list`
);
outro(`Run ${chalk.cyan('oco models --refresh')} to update the model list`);
}
);

View File

@@ -52,6 +52,12 @@ const OTHER_PROVIDERS = [
];
const NO_API_KEY_PROVIDERS = [
OCO_AI_PROVIDER_ENUM.OLLAMA,
OCO_AI_PROVIDER_ENUM.MLX,
OCO_AI_PROVIDER_ENUM.TEST
];
const MODEL_REQUIRED_PROVIDERS = [
OCO_AI_PROVIDER_ENUM.OLLAMA,
OCO_AI_PROVIDER_ENUM.MLX
];
@@ -90,7 +96,8 @@ async function selectProvider(): Promise<string | symbol> {
}
async function getApiKey(provider: string): Promise<string | symbol> {
const url = PROVIDER_API_KEY_URLS[provider as keyof typeof PROVIDER_API_KEY_URLS];
const url =
PROVIDER_API_KEY_URLS[provider as keyof typeof PROVIDER_API_KEY_URLS];
let message = `Enter your ${provider} API key:`;
if (url) {
@@ -127,7 +134,8 @@ async function selectModel(
provider: string,
apiKey?: string
): Promise<string | symbol> {
const providerDisplayName = PROVIDER_DISPLAY_NAMES[provider]?.split(' (')[0] || provider;
const providerDisplayName =
PROVIDER_DISPLAY_NAMES[provider]?.split(' (')[0] || provider;
const loadingSpinner = spinner();
loadingSpinner.start(`Fetching models from ${providerDisplayName}...`);
@@ -178,7 +186,8 @@ async function selectModel(
}
// Get recommended model for this provider
const recommended = RECOMMENDED_MODELS[provider as keyof typeof RECOMMENDED_MODELS];
const recommended =
RECOMMENDED_MODELS[provider as keyof typeof RECOMMENDED_MODELS];
// Build options with recommended first
const options: Array<{ value: string; label: string }> = [];
@@ -191,9 +200,7 @@ async function selectModel(
}
// Add other models (first 10, excluding recommended)
const otherModels = models
.filter((m) => m !== recommended)
.slice(0, 10);
const otherModels = models.filter((m) => m !== recommended).slice(0, 10);
otherModels.forEach((model) => {
options.push({ value: model, label: model });
@@ -409,27 +416,31 @@ export async function runSetup(): Promise<boolean> {
setGlobalConfig(newConfig as any);
outro(
`${chalk.green('✔')} Configuration saved to ~/.opencommit\n\n Run ${chalk.cyan('oco')} to generate commit messages!`
`${chalk.green(
'✔'
)} Configuration saved to ~/.opencommit\n\n Run ${chalk.cyan(
'oco'
)} to generate commit messages!`
);
return true;
}
export function isFirstRun(): boolean {
if (!getIsGlobalConfigFileExist()) {
return true;
}
const config = getConfig();
// Check if API key is missing for providers that need it
const provider = config.OCO_AI_PROVIDER || OCO_AI_PROVIDER_ENUM.OPENAI;
if (NO_API_KEY_PROVIDERS.includes(provider as OCO_AI_PROVIDER_ENUM)) {
if (MODEL_REQUIRED_PROVIDERS.includes(provider as OCO_AI_PROVIDER_ENUM)) {
// For Ollama/MLX, check if model is set
return !config.OCO_MODEL;
}
if (provider === OCO_AI_PROVIDER_ENUM.TEST) {
return false;
}
// For other providers, check if API key is set
return !config.OCO_API_KEY;
}
@@ -447,9 +458,7 @@ export async function promptForMissingApiKey(): Promise<boolean> {
}
console.log(
chalk.yellow(
`\nAPI key missing for ${provider}. Let's set it up.\n`
)
chalk.yellow(`\nAPI key missing for ${provider}. Let's set it up.\n`)
);
const apiKey = await getApiKey(provider);

View File

@@ -11,6 +11,7 @@ export interface AiEngineConfig {
maxTokensOutput: number;
maxTokensInput: number;
baseURL?: string;
proxy?: string;
customHeaders?: Record<string, string>;
}

View File

@@ -1,4 +1,5 @@
import AnthropicClient from '@anthropic-ai/sdk';
import { HttpsProxyAgent } from 'https-proxy-agent';
import {
MessageCreateParamsNonStreaming,
MessageParam
@@ -18,7 +19,15 @@ export class AnthropicEngine implements AiEngine {
constructor(config) {
this.config = config;
this.client = new AnthropicClient({ apiKey: this.config.apiKey });
const clientOptions: any = { apiKey: this.config.apiKey };
const proxy =
config.proxy || process.env.HTTPS_PROXY || process.env.HTTP_PROXY;
if (proxy) {
clientOptions.httpAgent = new HttpsProxyAgent(proxy);
}
this.client = new AnthropicClient(clientOptions);
}
public generateCommitMessage = async (

View File

@@ -10,9 +10,10 @@ export interface DeepseekConfig extends OpenAiConfig {}
export class DeepseekEngine extends OpenAiEngine {
constructor(config: DeepseekConfig) {
// Call OpenAIEngine constructor with forced Deepseek baseURL
// Put baseURL first so user config can override it
super({
...config,
baseURL: 'https://api.deepseek.com/v1'
baseURL: 'https://api.deepseek.com/v1',
...config
});
}

View File

@@ -29,10 +29,15 @@ export class GeminiEngine implements AiEngine {
.map((m) => m.content)
.join('\n');
const gemini = this.client.getGenerativeModel({
model: this.config.model,
systemInstruction
});
const gemini = this.client.getGenerativeModel(
{
model: this.config.model,
systemInstruction
},
{
baseUrl: this.config.baseURL
}
);
const contents = messages
.filter((m) => m.role !== 'system')

View File

@@ -1,4 +1,5 @@
import { OpenAI } from 'openai';
import { HttpsProxyAgent } from 'https-proxy-agent';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { normalizeEngineError } from '../utils/engineErrorHandler';
import { removeContentTags } from '../utils/removeContentTags';

View File

@@ -6,16 +6,21 @@ import { AiEngine, AiEngineConfig } from './Engine';
interface MLXConfig extends AiEngineConfig {}
const DEFAULT_MLX_URL = 'http://localhost:8080';
const MLX_CHAT_PATH = '/v1/chat/completions';
export class MLXEngine implements AiEngine {
config: MLXConfig;
client: AxiosInstance;
private chatUrl: string;
constructor(config) {
this.config = config;
const baseUrl = config.baseURL || DEFAULT_MLX_URL;
this.chatUrl = `${baseUrl}${MLX_CHAT_PATH}`;
this.client = axios.create({
url: config.baseURL
? `${config.baseURL}/${config.apiKey}`
: 'http://localhost:8080/v1/chat/completions',
headers: { 'Content-Type': 'application/json' }
});
}
@@ -31,10 +36,7 @@ export class MLXEngine implements AiEngine {
stream: false
};
try {
const response = await this.client.post(
this.client.getUri(this.config),
params
);
const response = await this.client.post(this.chatUrl, params);
const choices = response.data.choices;
const message = choices[0].message;

View File

@@ -6,25 +6,27 @@ import { AiEngine, AiEngineConfig } from './Engine';
interface OllamaConfig extends AiEngineConfig {}
const DEFAULT_OLLAMA_URL = 'http://localhost:11434';
const OLLAMA_CHAT_PATH = '/api/chat';
export class OllamaEngine implements AiEngine {
config: OllamaConfig;
client: AxiosInstance;
private chatUrl: string;
constructor(config) {
this.config = config;
const baseUrl = config.baseURL || DEFAULT_OLLAMA_URL;
this.chatUrl = `${baseUrl}${OLLAMA_CHAT_PATH}`;
// Combine base headers with custom headers
const headers = {
'Content-Type': 'application/json',
...config.customHeaders
};
this.client = axios.create({
url: config.baseURL
? `${config.baseURL}/${config.apiKey}`
: 'http://localhost:11434/api/chat',
headers
});
this.client = axios.create({ headers });
}
async generateCommitMessage(
@@ -37,10 +39,7 @@ export class OllamaEngine implements AiEngine {
stream: false
};
try {
const response = await this.client.post(
this.client.getUri(this.config),
params
);
const response = await this.client.post(this.chatUrl, params);
const { message } = response.data;
let content = message?.content;

View File

@@ -1,4 +1,5 @@
import { OpenAI } from 'openai';
import { HttpsProxyAgent } from 'https-proxy-agent';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { parseCustomHeaders } from '../utils/engine';
import { normalizeEngineError } from '../utils/engineErrorHandler';
@@ -23,6 +24,12 @@ export class OpenAiEngine implements AiEngine {
clientOptions.baseURL = config.baseURL;
}
const proxy =
config.proxy || process.env.HTTPS_PROXY || process.env.HTTP_PROXY;
if (proxy) {
clientOptions.httpAgent = new HttpsProxyAgent(proxy);
}
if (config.customHeaders) {
const headers = parseCustomHeaders(config.customHeaders);
if (Object.keys(headers).length > 0) {
@@ -36,12 +43,18 @@ export class OpenAiEngine implements AiEngine {
public generateCommitMessage = async (
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | null> => {
const isReasoningModel = /^(o[1-9]|gpt-5)/.test(this.config.model);
const params = {
model: this.config.model,
messages,
temperature: 0,
top_p: 0.1,
max_tokens: this.config.maxTokensOutput
...(isReasoningModel
? { max_completion_tokens: this.config.maxTokensOutput }
: {
temperature: 0,
top_p: 0.1,
max_tokens: this.config.maxTokensOutput
})
};
try {
@@ -55,7 +68,9 @@ export class OpenAiEngine implements AiEngine {
)
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
const completion = await this.client.chat.completions.create(params);
const completion = await this.client.chat.completions.create(
params as OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming
);
const message = completion.choices[0].message;
let content = message?.content;

View File

@@ -55,9 +55,7 @@ async function handleModelNotFoundError(
provider: string,
currentModel: string
): Promise<string | null> {
console.log(
chalk.red(`\n✖ Model '${currentModel}' not found\n`)
);
console.log(chalk.red(`\n✖ Model '${currentModel}' not found\n`));
const suggestedModels = getSuggestedModels(provider, currentModel);
const recommended =

View File

@@ -47,6 +47,7 @@ export function getEngine(): AiEngine {
maxTokensOutput: config.OCO_TOKENS_MAX_OUTPUT!,
maxTokensInput: config.OCO_TOKENS_MAX_INPUT!,
baseURL: config.OCO_API_URL!,
proxy: config.OCO_PROXY!,
apiKey: config.OCO_API_KEY!,
customHeaders
};

View File

@@ -3,15 +3,18 @@ import { MODEL_LIST, OCO_AI_PROVIDER_ENUM } from '../commands/config';
// Provider billing/help URLs for common errors
export const PROVIDER_BILLING_URLS: Record<string, string | null> = {
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]: 'https://console.anthropic.com/settings/billing',
[OCO_AI_PROVIDER_ENUM.OPENAI]: 'https://platform.openai.com/settings/organization/billing',
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]:
'https://console.anthropic.com/settings/billing',
[OCO_AI_PROVIDER_ENUM.OPENAI]:
'https://platform.openai.com/settings/organization/billing',
[OCO_AI_PROVIDER_ENUM.GEMINI]: 'https://aistudio.google.com/app/plan',
[OCO_AI_PROVIDER_ENUM.GROQ]: 'https://console.groq.com/settings/billing',
[OCO_AI_PROVIDER_ENUM.MISTRAL]: 'https://console.mistral.ai/billing/',
[OCO_AI_PROVIDER_ENUM.DEEPSEEK]: 'https://platform.deepseek.com/usage',
[OCO_AI_PROVIDER_ENUM.OPENROUTER]: 'https://openrouter.ai/credits',
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'https://aimlapi.com/app/billing',
[OCO_AI_PROVIDER_ENUM.AZURE]: 'https://portal.azure.com/#view/Microsoft_Azure_CostManagement',
[OCO_AI_PROVIDER_ENUM.AZURE]:
'https://portal.azure.com/#view/Microsoft_Azure_CostManagement',
[OCO_AI_PROVIDER_ENUM.OLLAMA]: null,
[OCO_AI_PROVIDER_ENUM.MLX]: null,
[OCO_AI_PROVIDER_ENUM.FLOWISE]: null,
@@ -23,7 +26,9 @@ export class InsufficientCreditsError extends Error {
public readonly provider: string;
constructor(provider: string, message?: string) {
super(message || `Insufficient credits or quota for provider '${provider}'`);
super(
message || `Insufficient credits or quota for provider '${provider}'`
);
this.name = 'InsufficientCreditsError';
this.provider = provider;
}
@@ -345,7 +350,10 @@ export interface FormattedError {
}
// Format an error into a user-friendly structure
export function formatUserFriendlyError(error: unknown, provider: string): FormattedError {
export function formatUserFriendlyError(
error: unknown,
provider: string
): FormattedError {
const billingUrl = PROVIDER_BILLING_URLS[provider] || null;
// Handle our custom error types first
@@ -460,7 +468,9 @@ export function printFormattedError(formatted: FormattedError): string {
output += ` ${formatted.message}\n`;
if (formatted.helpUrl) {
output += `\n ${chalk.cyan('Help:')} ${chalk.underline(formatted.helpUrl)}\n`;
output += `\n ${chalk.cyan('Help:')} ${chalk.underline(
formatted.helpUrl
)}\n`;
}
if (formatted.suggestion) {

View File

@@ -125,9 +125,7 @@ export async function fetchMistralModels(apiKey: string): Promise<string[]> {
}
const data = await response.json();
const models = data.data
?.map((m: { id: string }) => m.id)
.sort();
const models = data.data?.map((m: { id: string }) => m.id).sort();
return models && models.length > 0 ? models : MODEL_LIST.mistral;
} catch {
@@ -148,9 +146,7 @@ export async function fetchGroqModels(apiKey: string): Promise<string[]> {
}
const data = await response.json();
const models = data.data
?.map((m: { id: string }) => m.id)
.sort();
const models = data.data?.map((m: { id: string }) => m.id).sort();
return models && models.length > 0 ? models : MODEL_LIST.groq;
} catch {
@@ -173,8 +169,9 @@ export async function fetchOpenRouterModels(apiKey: string): Promise<string[]> {
const data = await response.json();
// Filter to text-capable models only (exclude image/audio models)
const models = data.data
?.filter((m: { id: string; context_length?: number }) =>
m.context_length && m.context_length > 0
?.filter(
(m: { id: string; context_length?: number }) =>
m.context_length && m.context_length > 0
)
.map((m: { id: string }) => m.id)
.sort();
@@ -198,9 +195,7 @@ export async function fetchDeepSeekModels(apiKey: string): Promise<string[]> {
}
const data = await response.json();
const models = data.data
?.map((m: { id: string }) => m.id)
.sort();
const models = data.data?.map((m: { id: string }) => m.id).sort();
return models && models.length > 0 ? models : MODEL_LIST.deepseek;
} catch {
@@ -312,7 +307,10 @@ export function clearModelCache(): void {
}
}
export function getCacheInfo(): { timestamp: number | null; providers: string[] } {
export function getCacheInfo(): {
timestamp: number | null;
providers: string[];
} {
const cache = readCache();
if (!cache) {
return { timestamp: null, providers: [] };

21
src/utils/proxy.ts Normal file
View File

@@ -0,0 +1,21 @@
import { setGlobalDispatcher, ProxyAgent } from 'undici';
import axios from 'axios';
import { HttpsProxyAgent } from 'https-proxy-agent';
export function setupProxy(proxyUrl?: string) {
const proxy = proxyUrl || process.env.HTTPS_PROXY || process.env.HTTP_PROXY;
if (proxy) {
try {
// Set global dispatcher for undici (affects globalThis.fetch used by Gemini and others)
const dispatcher = new ProxyAgent(proxy);
setGlobalDispatcher(dispatcher);
// Set axios global agent
const agent = new HttpsProxyAgent(proxy);
axios.defaults.httpsAgent = agent;
axios.defaults.proxy = false; // Disable axios built-in proxy handling to use agent
} catch (error) {
console.warn(`[Proxy Error] Failed to set proxy: ${error.message}`);
}
}
}

View File

@@ -9,6 +9,11 @@ import { rm } from 'fs';
const fsExec = promisify(exec);
const fsRemove = promisify(rm);
const waitForCommitConfirmation = async (findByText: any) => {
expect(await findByText('Generating the commit message')).toBeInTheConsole();
expect(await findByText('Confirm the commit message?')).toBeInTheConsole();
};
/**
* git remote -v
*
@@ -97,7 +102,7 @@ describe('cli flow to push git branch', () => {
[resolve('./out/cli.cjs')],
{ cwd: gitDir }
);
expect(await findByText('Confirm the commit message?')).toBeInTheConsole();
await waitForCommitConfirmation(findByText);
userEvent.keyboard('[Enter]');
expect(
@@ -129,7 +134,7 @@ describe('cli flow to push git branch', () => {
[resolve('./out/cli.cjs')],
{ cwd: gitDir }
);
expect(await findByText('Confirm the commit message?')).toBeInTheConsole();
await waitForCommitConfirmation(findByText);
userEvent.keyboard('[Enter]');
expect(
@@ -162,7 +167,7 @@ describe('cli flow to push git branch', () => {
[resolve('./out/cli.cjs')],
{ cwd: gitDir }
);
expect(await findByText('Confirm the commit message?')).toBeInTheConsole();
await waitForCommitConfirmation(findByText);
userEvent.keyboard('[Enter]');
expect(
@@ -190,7 +195,7 @@ describe('cli flow to push git branch', () => {
[resolve('./out/cli.cjs')],
{ cwd: gitDir }
);
expect(await findByText('Confirm the commit message?')).toBeInTheConsole();
await waitForCommitConfirmation(findByText);
userEvent.keyboard('[Enter]');
expect(await findByText('Choose a remote to push to')).toBeInTheConsole();

View File

@@ -3,6 +3,10 @@ import { render } from 'cli-testing-library';
import 'cli-testing-library/extend-expect';
import { prepareEnvironment, wait } from '../utils';
import path from 'path';
import { execFile } from 'child_process';
import { promisify } from 'util';
const execFileAsync = promisify(execFile);
function getAbsolutePath(relativePath: string) {
// Use process.cwd() which should be the project root during test execution
@@ -27,9 +31,10 @@ async function setupCommitlint(dir: string, ver: 9 | 18 | 19) {
configPath = getAbsolutePath('./data/commitlint_19/commitlint.config.js');
break;
}
await render('cp', ['-r', packagePath, '.'], { cwd: dir });
await render('cp', [packageJsonPath, '.'], { cwd: dir });
await render('cp', [configPath, '.'], { cwd: dir });
await execFileAsync('cp', ['-R', packagePath, path.join(dir, 'node_modules')]);
await execFileAsync('cp', [packageJsonPath, path.join(dir, 'package.json')]);
await execFileAsync('cp', [configPath, path.join(dir, 'commitlint.config.js')]);
await wait(3000); // Avoid flakiness by waiting
}

26
test/unit/openAi.test.ts Normal file
View File

@@ -0,0 +1,26 @@
// Test the reasoning model detection regex used in OpenAiEngine.
// Integration test with the engine is not possible because mistral.ts
// uses require() which is unavailable in the ESM test environment.
const REASONING_MODEL_RE = /^(o[1-9]|gpt-5)/;
describe('OpenAiEngine reasoning model detection', () => {
it.each([
['o1', true],
['o1-preview', true],
['o1-mini', true],
['o3', true],
['o3-mini', true],
['o4-mini', true],
['gpt-5', true],
['gpt-5-nano', true],
['gpt-4o', false],
['gpt-4o-mini', false],
['gpt-4', false],
['gpt-3.5-turbo', false]
])(
'model "%s" isReasoning=%s',
(model, expected) => {
expect(REASONING_MODEL_RE.test(model)).toBe(expected);
}
);
});