Compare commits

...

6 Commits
dev ... v3.2.12

Author SHA1 Message Date
di-sukharev
6d9fff56aa 3.2.12 2026-01-17 23:46:15 +03:00
di-sukharev
6ed70d0382 add oco models command 2026-01-17 23:46:04 +03:00
di-sukharev
5b241ed2d0 refactor: enhance error handling and normalization across AI engines
This update introduces a centralized error handling mechanism for various AI engines, improving the consistency and clarity of error messages. The new `normalizeEngineError` function standardizes error responses, allowing for better user feedback and recovery suggestions. Additionally, specific error classes for insufficient credits, rate limits, and service availability have been implemented, along with user-friendly formatting for error messages. This refactor aims to enhance the overall user experience when interacting with the AI services.
2026-01-17 23:34:49 +03:00
di-sukharev
8b0ee25923 build 2026-01-17 23:06:18 +03:00
di-sukharev
fdd4d89bba 3.2.11 2026-01-17 23:06:17 +03:00
di-sukharev
d70797b864 feat: add interactive setup wizard and model error handling
Add comprehensive setup command with provider selection, API key
configuration, and model selection. Include error recovery for
model-not-found scenarios with suggested alternatives and automatic
retry functionality. Update Anthropic model list with latest versions
and add provider metadata for better user experience.
2026-01-17 23:04:43 +03:00
26 changed files with 8876 additions and 4778 deletions

View File

@@ -201,6 +201,28 @@ or for as a cheaper option:
oco config set OCO_MODEL=gpt-3.5-turbo
```
### Model Management
OpenCommit automatically fetches available models from your provider when you run `oco setup`. Models are cached for 7 days to reduce API calls.
To see available models for your current provider:
```sh
oco models
```
To refresh the model list (e.g., after new models are released):
```sh
oco models --refresh
```
To see models for a specific provider:
```sh
oco models --provider anthropic
```
### Switch to other LLM providers with a custom URL
By default OpenCommit uses [OpenAI](https://openai.com).

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

4
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "opencommit",
"version": "3.2.10",
"version": "3.2.12",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "opencommit",
"version": "3.2.10",
"version": "3.2.12",
"license": "MIT",
"dependencies": {
"@actions/core": "^1.10.0",

View File

@@ -1,6 +1,6 @@
{
"name": "opencommit",
"version": "3.2.10",
"version": "3.2.12",
"description": "Auto-generate impressive commits in 1 second. Killing lame commits with AI 🤯🔫",
"keywords": [
"git",

View File

@@ -8,6 +8,13 @@ import { commitlintConfigCommand } from './commands/commitlint';
import { configCommand } from './commands/config';
import { hookCommand, isHookCalled } from './commands/githook.js';
import { prepareCommitMessageHook } from './commands/prepare-commit-msg-hook';
import {
setupCommand,
isFirstRun,
runSetup,
promptForMissingApiKey
} from './commands/setup';
import { modelsCommand } from './commands/models';
import { checkIsLatestVersion } from './utils/checkIsLatestVersion';
import { runMigrations } from './migrations/_run.js';
@@ -17,7 +24,7 @@ cli(
{
version: packageJSON.version,
name: 'opencommit',
commands: [configCommand, hookCommand, commitlintConfigCommand],
commands: [configCommand, hookCommand, commitlintConfigCommand, setupCommand, modelsCommand],
flags: {
fgm: {
type: Boolean,
@@ -47,6 +54,20 @@ cli(
if (await isHookCalled()) {
prepareCommitMessageHook();
} else {
// Check for first run and trigger setup wizard
if (isFirstRun()) {
const setupComplete = await runSetup();
if (!setupComplete) {
process.exit(1);
}
}
// Check for missing API key and prompt if needed
const hasApiKey = await promptForMissingApiKey();
if (!hasApiKey) {
process.exit(1);
}
commit(extraArgs, flags.context, false, flags.fgm, flags.yes);
}
},

View File

@@ -1,5 +1,7 @@
export enum COMMANDS {
config = 'config',
hook = 'hook',
commitlint = 'commitlint'
commitlint = 'commitlint',
setup = 'setup',
models = 'models'
}

View File

@@ -11,6 +11,10 @@ import {
import chalk from 'chalk';
import { execa } from 'execa';
import { generateCommitMessageByDiff } from '../generateCommitMessageFromGitDiff';
import {
formatUserFriendlyError,
printFormattedError
} from '../utils/errors';
import {
assertGitRepo,
getChangedFiles,
@@ -211,10 +215,11 @@ ${chalk.grey('——————————————————')}`
`${chalk.red('✖')} Failed to generate the commit message`
);
console.log(error);
const errorConfig = getConfig();
const provider = errorConfig.OCO_AI_PROVIDER || 'openai';
const formatted = formatUserFriendlyError(error, provider);
outro(printFormattedError(formatted));
const err = error as Error;
outro(`${chalk.red('✖')} ${err?.message || err}`);
process.exit(1);
}
};

View File

@@ -68,10 +68,11 @@ export const MODEL_LIST = {
],
anthropic: [
'claude-3-5-sonnet-20240620',
'claude-3-opus-20240229',
'claude-3-sonnet-20240229',
'claude-3-haiku-20240307'
'claude-sonnet-4-20250514',
'claude-opus-4-20250514',
'claude-3-7-sonnet-20250219',
'claude-3-5-sonnet-20241022',
'claude-3-5-haiku-20241022'
],
gemini: [
@@ -846,6 +847,33 @@ export enum OCO_AI_PROVIDER_ENUM {
OPENROUTER = 'openrouter'
}
export const PROVIDER_API_KEY_URLS: Record<string, string | null> = {
[OCO_AI_PROVIDER_ENUM.OPENAI]: 'https://platform.openai.com/api-keys',
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]: 'https://console.anthropic.com/settings/keys',
[OCO_AI_PROVIDER_ENUM.GEMINI]: 'https://aistudio.google.com/app/apikey',
[OCO_AI_PROVIDER_ENUM.GROQ]: 'https://console.groq.com/keys',
[OCO_AI_PROVIDER_ENUM.MISTRAL]: 'https://console.mistral.ai/api-keys/',
[OCO_AI_PROVIDER_ENUM.DEEPSEEK]: 'https://platform.deepseek.com/api_keys',
[OCO_AI_PROVIDER_ENUM.OPENROUTER]: 'https://openrouter.ai/keys',
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'https://aimlapi.com/app/keys',
[OCO_AI_PROVIDER_ENUM.AZURE]: 'https://portal.azure.com/',
[OCO_AI_PROVIDER_ENUM.OLLAMA]: null,
[OCO_AI_PROVIDER_ENUM.MLX]: null,
[OCO_AI_PROVIDER_ENUM.FLOWISE]: null,
[OCO_AI_PROVIDER_ENUM.TEST]: null
};
export const RECOMMENDED_MODELS: Record<string, string> = {
[OCO_AI_PROVIDER_ENUM.OPENAI]: 'gpt-4o-mini',
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]: 'claude-sonnet-4-20250514',
[OCO_AI_PROVIDER_ENUM.GEMINI]: 'gemini-1.5-flash',
[OCO_AI_PROVIDER_ENUM.GROQ]: 'llama3-70b-8192',
[OCO_AI_PROVIDER_ENUM.MISTRAL]: 'mistral-small-latest',
[OCO_AI_PROVIDER_ENUM.DEEPSEEK]: 'deepseek-chat',
[OCO_AI_PROVIDER_ENUM.OPENROUTER]: 'openai/gpt-4o-mini',
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'gpt-4o-mini'
}
export type ConfigType = {
[CONFIG_KEYS.OCO_API_KEY]?: string;
[CONFIG_KEYS.OCO_TOKENS_MAX_INPUT]: number;

144
src/commands/models.ts Normal file
View File

@@ -0,0 +1,144 @@
import { intro, outro, spinner } from '@clack/prompts';
import chalk from 'chalk';
import { command } from 'cleye';
import { COMMANDS } from './ENUMS';
import {
MODEL_LIST,
OCO_AI_PROVIDER_ENUM,
getConfig
} from './config';
import {
fetchModelsForProvider,
clearModelCache,
getCacheInfo,
getCachedModels
} from '../utils/modelCache';
function formatCacheAge(timestamp: number | null): string {
if (!timestamp) return 'never';
const ageMs = Date.now() - timestamp;
const days = Math.floor(ageMs / (1000 * 60 * 60 * 24));
const hours = Math.floor(ageMs / (1000 * 60 * 60));
const minutes = Math.floor(ageMs / (1000 * 60));
if (days > 0) {
return `${days} day${days === 1 ? '' : 's'} ago`;
} else if (hours > 0) {
return `${hours} hour${hours === 1 ? '' : 's'} ago`;
} else if (minutes > 0) {
return `${minutes} minute${minutes === 1 ? '' : 's'} ago`;
}
return 'just now';
}
async function listModels(provider: string, useCache: boolean = true): Promise<void> {
const config = getConfig();
const apiKey = config.OCO_API_KEY;
const currentModel = config.OCO_MODEL;
// Get cached models or fetch new ones
let models: string[] = [];
if (useCache) {
const cached = getCachedModels(provider);
if (cached) {
models = cached;
}
}
if (models.length === 0) {
// Fallback to hardcoded list
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
models = MODEL_LIST[providerKey] || [];
}
console.log(`\n${chalk.bold('Available models for')} ${chalk.cyan(provider)}:\n`);
if (models.length === 0) {
console.log(chalk.dim(' No models found'));
} else {
models.forEach((model) => {
const isCurrent = model === currentModel;
const prefix = isCurrent ? chalk.green('* ') : ' ';
const label = isCurrent ? chalk.green(model) : model;
console.log(`${prefix}${label}`);
});
}
console.log('');
}
async function refreshModels(provider: string): Promise<void> {
const config = getConfig();
const apiKey = config.OCO_API_KEY;
const loadingSpinner = spinner();
loadingSpinner.start(`Fetching models from ${provider}...`);
// Clear cache first
clearModelCache();
try {
const models = await fetchModelsForProvider(provider, apiKey, undefined, true);
loadingSpinner.stop(`${chalk.green('+')} Fetched ${models.length} models`);
// List the models
await listModels(provider, true);
} catch (error) {
loadingSpinner.stop(chalk.red('Failed to fetch models'));
console.error(chalk.red(`Error: ${error instanceof Error ? error.message : 'Unknown error'}`));
}
}
export const modelsCommand = command(
{
name: COMMANDS.models,
help: {
description: 'List and manage cached models for your AI provider'
},
flags: {
refresh: {
type: Boolean,
alias: 'r',
description: 'Clear cache and re-fetch models from the provider',
default: false
},
provider: {
type: String,
alias: 'p',
description: 'Specify provider (defaults to current OCO_AI_PROVIDER)'
}
}
},
async ({ flags }) => {
const config = getConfig();
const provider = flags.provider || config.OCO_AI_PROVIDER || OCO_AI_PROVIDER_ENUM.OPENAI;
intro(chalk.bgCyan(' OpenCommit Models '));
// Show cache info
const cacheInfo = getCacheInfo();
if (cacheInfo.timestamp) {
console.log(
chalk.dim(` Cache last updated: ${formatCacheAge(cacheInfo.timestamp)}`)
);
if (cacheInfo.providers.length > 0) {
console.log(
chalk.dim(` Cached providers: ${cacheInfo.providers.join(', ')}`)
);
}
} else {
console.log(chalk.dim(' No cached models'));
}
if (flags.refresh) {
await refreshModels(provider);
} else {
await listModels(provider);
}
outro(
`Run ${chalk.cyan('oco models --refresh')} to update the model list`
);
}
);

480
src/commands/setup.ts Normal file
View File

@@ -0,0 +1,480 @@
import { intro, outro, select, text, isCancel, spinner } from '@clack/prompts';
import chalk from 'chalk';
import { command } from 'cleye';
import { COMMANDS } from './ENUMS';
import {
CONFIG_KEYS,
MODEL_LIST,
OCO_AI_PROVIDER_ENUM,
getConfig,
setGlobalConfig,
getGlobalConfig,
getIsGlobalConfigFileExist,
DEFAULT_CONFIG,
PROVIDER_API_KEY_URLS,
RECOMMENDED_MODELS
} from './config';
import {
fetchModelsForProvider,
fetchOllamaModels,
getCacheInfo
} from '../utils/modelCache';
const PROVIDER_DISPLAY_NAMES: Record<string, string> = {
[OCO_AI_PROVIDER_ENUM.OPENAI]: 'OpenAI (GPT-4o, GPT-4)',
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]: 'Anthropic (Claude Sonnet, Opus)',
[OCO_AI_PROVIDER_ENUM.OLLAMA]: 'Ollama (Free, runs locally)',
[OCO_AI_PROVIDER_ENUM.GEMINI]: 'Google Gemini',
[OCO_AI_PROVIDER_ENUM.GROQ]: 'Groq (Fast inference, free tier)',
[OCO_AI_PROVIDER_ENUM.MISTRAL]: 'Mistral AI',
[OCO_AI_PROVIDER_ENUM.DEEPSEEK]: 'DeepSeek',
[OCO_AI_PROVIDER_ENUM.OPENROUTER]: 'OpenRouter (Multiple providers)',
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'AI/ML API',
[OCO_AI_PROVIDER_ENUM.AZURE]: 'Azure OpenAI',
[OCO_AI_PROVIDER_ENUM.MLX]: 'MLX (Apple Silicon, local)'
};
const PRIMARY_PROVIDERS = [
OCO_AI_PROVIDER_ENUM.OPENAI,
OCO_AI_PROVIDER_ENUM.ANTHROPIC,
OCO_AI_PROVIDER_ENUM.OLLAMA
];
const OTHER_PROVIDERS = [
OCO_AI_PROVIDER_ENUM.GEMINI,
OCO_AI_PROVIDER_ENUM.GROQ,
OCO_AI_PROVIDER_ENUM.MISTRAL,
OCO_AI_PROVIDER_ENUM.DEEPSEEK,
OCO_AI_PROVIDER_ENUM.OPENROUTER,
OCO_AI_PROVIDER_ENUM.AIMLAPI,
OCO_AI_PROVIDER_ENUM.AZURE,
OCO_AI_PROVIDER_ENUM.MLX
];
const NO_API_KEY_PROVIDERS = [
OCO_AI_PROVIDER_ENUM.OLLAMA,
OCO_AI_PROVIDER_ENUM.MLX
];
async function selectProvider(): Promise<string | symbol> {
const primaryOptions = PRIMARY_PROVIDERS.map((provider) => ({
value: provider,
label: PROVIDER_DISPLAY_NAMES[provider] || provider
}));
primaryOptions.push({
value: 'other',
label: 'Other providers...'
});
const selection = await select({
message: 'Select your AI provider:',
options: primaryOptions
});
if (isCancel(selection)) return selection;
if (selection === 'other') {
const otherOptions = OTHER_PROVIDERS.map((provider) => ({
value: provider,
label: PROVIDER_DISPLAY_NAMES[provider] || provider
}));
return await select({
message: 'Select provider:',
options: otherOptions
});
}
return selection;
}
async function getApiKey(provider: string): Promise<string | symbol> {
const url = PROVIDER_API_KEY_URLS[provider as keyof typeof PROVIDER_API_KEY_URLS];
let message = `Enter your ${provider} API key:`;
if (url) {
message = `Enter your API key:\n${chalk.dim(` Get your key at: ${url}`)}`;
}
return await text({
message,
placeholder: 'sk-...',
validate: (value) => {
if (!value || value.trim().length === 0) {
return 'API key is required';
}
return undefined;
}
});
}
function formatCacheAge(timestamp: number | null): string {
if (!timestamp) return '';
const ageMs = Date.now() - timestamp;
const days = Math.floor(ageMs / (1000 * 60 * 60 * 24));
const hours = Math.floor(ageMs / (1000 * 60 * 60));
if (days > 0) {
return `${days} day${days === 1 ? '' : 's'} ago`;
} else if (hours > 0) {
return `${hours} hour${hours === 1 ? '' : 's'} ago`;
}
return 'just now';
}
async function selectModel(
provider: string,
apiKey?: string
): Promise<string | symbol> {
const providerDisplayName = PROVIDER_DISPLAY_NAMES[provider]?.split(' (')[0] || provider;
const loadingSpinner = spinner();
loadingSpinner.start(`Fetching models from ${providerDisplayName}...`);
let models: string[] = [];
let usedFallback = false;
try {
models = await fetchModelsForProvider(provider, apiKey);
} catch {
// Fall back to hardcoded list
usedFallback = true;
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
models = MODEL_LIST[providerKey] || [];
}
// Check cache info for display
const cacheInfo = getCacheInfo();
const cacheAge = formatCacheAge(cacheInfo.timestamp);
if (usedFallback) {
loadingSpinner.stop(
chalk.yellow('Could not fetch models from API. Using default list.')
);
} else if (cacheAge) {
loadingSpinner.stop(`Models loaded ${chalk.dim(`(cached ${cacheAge})`)}`);
} else {
loadingSpinner.stop('Models loaded');
}
if (models.length === 0) {
// For Ollama/MLX, prompt for manual entry
if (NO_API_KEY_PROVIDERS.includes(provider as OCO_AI_PROVIDER_ENUM)) {
return await text({
message: 'Enter model name (e.g., llama3:8b, mistral):',
placeholder: 'llama3:8b',
validate: (value) => {
if (!value || value.trim().length === 0) {
return 'Model name is required';
}
return undefined;
}
});
}
// Use default from config
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
return MODEL_LIST[providerKey]?.[0] || 'gpt-4o-mini';
}
// Get recommended model for this provider
const recommended = RECOMMENDED_MODELS[provider as keyof typeof RECOMMENDED_MODELS];
// Build options with recommended first
const options: Array<{ value: string; label: string }> = [];
if (recommended && models.includes(recommended)) {
options.push({
value: recommended,
label: `${recommended} (Recommended)`
});
}
// Add other models (first 10, excluding recommended)
const otherModels = models
.filter((m) => m !== recommended)
.slice(0, 10);
otherModels.forEach((model) => {
options.push({ value: model, label: model });
});
// Add option to see all or enter custom
if (models.length > 11) {
options.push({ value: '__show_all__', label: 'Show all models...' });
}
options.push({ value: '__custom__', label: 'Enter custom model...' });
const selection = await select({
message: 'Select a model:',
options
});
if (isCancel(selection)) return selection;
if (selection === '__show_all__') {
const allOptions = models.map((model) => ({
value: model,
label: model === recommended ? `${model} (Recommended)` : model
}));
return await select({
message: 'Select a model:',
options: allOptions
});
}
if (selection === '__custom__') {
return await text({
message: 'Enter model name:',
validate: (value) => {
if (!value || value.trim().length === 0) {
return 'Model name is required';
}
return undefined;
}
});
}
return selection;
}
async function setupOllama(): Promise<{
provider: string;
model: string;
apiUrl: string;
} | null> {
console.log(chalk.cyan('\n Ollama - Free Local AI\n'));
console.log(chalk.dim(' Setup steps:'));
console.log(chalk.dim(' 1. Install: https://ollama.ai/download'));
console.log(chalk.dim(' 2. Pull a model: ollama pull llama3:8b'));
console.log(chalk.dim(' 3. Start server: ollama serve\n'));
// Try to fetch available models
const loadingSpinner = spinner();
loadingSpinner.start('Checking for local Ollama installation...');
const defaultUrl = 'http://localhost:11434';
let ollamaModels: string[] = [];
try {
ollamaModels = await fetchOllamaModels(defaultUrl);
if (ollamaModels.length > 0) {
loadingSpinner.stop(
`${chalk.green('✔')} Found ${ollamaModels.length} local model(s)`
);
} else {
loadingSpinner.stop(
chalk.yellow(
'Ollama is running but no models found. Pull a model first: ollama pull llama3:8b'
)
);
}
} catch {
loadingSpinner.stop(
chalk.yellow(
'Could not connect to Ollama. Make sure it is running: ollama serve'
)
);
}
// Model selection
let model: string | symbol;
if (ollamaModels.length > 0) {
model = await select({
message: 'Select a model:',
options: [
...ollamaModels.map((m) => ({ value: m, label: m })),
{ value: '__custom__', label: 'Enter custom model name...' }
]
});
if (isCancel(model)) return null;
if (model === '__custom__') {
model = await text({
message: 'Enter model name (e.g., llama3:8b, mistral):',
placeholder: 'llama3:8b'
});
}
} else {
model = await text({
message: 'Enter model name (e.g., llama3:8b, mistral):',
placeholder: 'llama3:8b',
validate: (value) => {
if (!value || value.trim().length === 0) {
return 'Model name is required';
}
return undefined;
}
});
}
if (isCancel(model)) return null;
// API URL (optional)
const apiUrl = await text({
message: 'Ollama URL (press Enter for default):',
placeholder: defaultUrl,
defaultValue: defaultUrl
});
if (isCancel(apiUrl)) return null;
return {
provider: OCO_AI_PROVIDER_ENUM.OLLAMA,
model: model as string,
apiUrl: (apiUrl as string) || defaultUrl
};
}
export async function runSetup(): Promise<boolean> {
intro(chalk.bgCyan(' Welcome to OpenCommit! '));
// Select provider
const provider = await selectProvider();
if (isCancel(provider)) {
outro('Setup cancelled');
return false;
}
let config: Partial<Record<string, any>> = {};
// Handle Ollama specially
if (provider === OCO_AI_PROVIDER_ENUM.OLLAMA) {
const ollamaConfig = await setupOllama();
if (!ollamaConfig) {
outro('Setup cancelled');
return false;
}
config = {
OCO_AI_PROVIDER: ollamaConfig.provider,
OCO_MODEL: ollamaConfig.model,
OCO_API_URL: ollamaConfig.apiUrl,
OCO_API_KEY: 'ollama' // Placeholder
};
} else if (provider === OCO_AI_PROVIDER_ENUM.MLX) {
// MLX setup
console.log(chalk.cyan('\n MLX - Apple Silicon Local AI\n'));
console.log(chalk.dim(' MLX runs locally on Apple Silicon Macs.'));
console.log(chalk.dim(' No API key required.\n'));
const model = await text({
message: 'Enter model name:',
placeholder: 'mlx-community/Llama-3-8B-Instruct-4bit'
});
if (isCancel(model)) {
outro('Setup cancelled');
return false;
}
config = {
OCO_AI_PROVIDER: OCO_AI_PROVIDER_ENUM.MLX,
OCO_MODEL: model,
OCO_API_KEY: 'mlx' // Placeholder
};
} else {
// Standard provider flow: API key then model
const apiKey = await getApiKey(provider as string);
if (isCancel(apiKey)) {
outro('Setup cancelled');
return false;
}
const model = await selectModel(provider as string, apiKey as string);
if (isCancel(model)) {
outro('Setup cancelled');
return false;
}
config = {
OCO_AI_PROVIDER: provider,
OCO_API_KEY: apiKey,
OCO_MODEL: model
};
}
// Save configuration
const existingConfig = getIsGlobalConfigFileExist()
? getGlobalConfig()
: DEFAULT_CONFIG;
const newConfig = {
...existingConfig,
...config
};
setGlobalConfig(newConfig as any);
outro(
`${chalk.green('✔')} Configuration saved to ~/.opencommit\n\n Run ${chalk.cyan('oco')} to generate commit messages!`
);
return true;
}
export function isFirstRun(): boolean {
if (!getIsGlobalConfigFileExist()) {
return true;
}
const config = getConfig();
// Check if API key is missing for providers that need it
const provider = config.OCO_AI_PROVIDER || OCO_AI_PROVIDER_ENUM.OPENAI;
if (NO_API_KEY_PROVIDERS.includes(provider as OCO_AI_PROVIDER_ENUM)) {
// For Ollama/MLX, check if model is set
return !config.OCO_MODEL;
}
// For other providers, check if API key is set
return !config.OCO_API_KEY;
}
export async function promptForMissingApiKey(): Promise<boolean> {
const config = getConfig();
const provider = config.OCO_AI_PROVIDER || OCO_AI_PROVIDER_ENUM.OPENAI;
if (NO_API_KEY_PROVIDERS.includes(provider as OCO_AI_PROVIDER_ENUM)) {
return true; // No API key needed
}
if (config.OCO_API_KEY) {
return true; // Already has key
}
console.log(
chalk.yellow(
`\nAPI key missing for ${provider}. Let's set it up.\n`
)
);
const apiKey = await getApiKey(provider);
if (isCancel(apiKey)) {
return false;
}
const existingConfig = getGlobalConfig();
setGlobalConfig({
...existingConfig,
OCO_API_KEY: apiKey as string
} as any);
console.log(chalk.green('✔') + ' API key saved\n');
return true;
}
export const setupCommand = command(
{
name: COMMANDS.setup,
help: {
description: 'Interactive setup wizard for OpenCommit'
}
},
async () => {
await runSetup();
}
);

View File

@@ -1,5 +1,6 @@
import OpenAI from 'openai';
import axios, { AxiosInstance } from 'axios';
import { normalizeEngineError } from '../utils/engineErrorHandler';
import { AiEngine, AiEngineConfig } from './Engine';
interface AimlApiConfig extends AiEngineConfig {}
@@ -32,16 +33,7 @@ export class AimlApiEngine implements AiEngine {
const message = response.data.choices?.[0]?.message;
return message?.content ?? null;
} catch (error) {
const err = error as Error;
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401
) {
const apiError = error.response.data.error;
if (apiError) throw new Error(apiError.message);
}
throw err;
throw normalizeEngineError(error, 'aimlapi', this.config.model);
}
};
}

View File

@@ -3,11 +3,9 @@ import {
MessageCreateParamsNonStreaming,
MessageParam
} from '@anthropic-ai/sdk/resources/messages.mjs';
import { outro } from '@clack/prompts';
import axios from 'axios';
import chalk from 'chalk';
import { OpenAI } from 'openai';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { normalizeEngineError } from '../utils/engineErrorHandler';
import { removeContentTags } from '../utils/removeContentTags';
import { tokenCount } from '../utils/tokenCount';
import { AiEngine, AiEngineConfig } from './Engine';
@@ -58,22 +56,7 @@ export class AnthropicEngine implements AiEngine {
let content = message;
return removeContentTags(content, 'think');
} catch (error) {
const err = error as Error;
outro(`${chalk.red('✖')} ${err?.message || err}`);
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401
) {
const anthropicAiError = error.response.data.error;
if (anthropicAiError?.message) outro(anthropicAiError.message);
outro(
'For help look into README https://github.com/di-sukharev/opencommit#setup'
);
}
throw err;
throw normalizeEngineError(error, 'anthropic', this.config.model);
}
};
}

View File

@@ -2,11 +2,9 @@ import {
AzureKeyCredential,
OpenAIClient as AzureOpenAIClient
} from '@azure/openai';
import { outro } from '@clack/prompts';
import axios from 'axios';
import chalk from 'chalk';
import { OpenAI } from 'openai';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { normalizeEngineError } from '../utils/engineErrorHandler';
import { removeContentTags } from '../utils/removeContentTags';
import { tokenCount } from '../utils/tokenCount';
import { AiEngine, AiEngineConfig } from './Engine';
@@ -57,24 +55,7 @@ export class AzureEngine implements AiEngine {
let content = message?.content;
return removeContentTags(content, 'think');
} catch (error) {
outro(`${chalk.red('✖')} ${this.config.model}`);
const err = error as Error;
outro(`${chalk.red('✖')} ${JSON.stringify(error)}`);
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401
) {
const openAiError = error.response.data.error;
if (openAiError?.message) outro(openAiError.message);
outro(
'For help look into README https://github.com/di-sukharev/opencommit#setup'
);
}
throw err;
throw normalizeEngineError(error, 'azure', this.config.model);
}
};
}

View File

@@ -1,6 +1,6 @@
import axios from 'axios';
import { OpenAI } from 'openai';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { normalizeEngineError } from '../utils/engineErrorHandler';
import { removeContentTags } from '../utils/removeContentTags';
import { tokenCount } from '../utils/tokenCount';
import { OpenAiEngine, OpenAiConfig } from './openAi';
@@ -45,17 +45,7 @@ export class DeepseekEngine extends OpenAiEngine {
let content = message?.content;
return removeContentTags(content, 'think');
} catch (error) {
const err = error as Error;
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401
) {
const openAiError = error.response.data.error;
if (openAiError) throw new Error(openAiError.message);
}
throw err;
throw normalizeEngineError(error, 'deepseek', this.config.model);
}
};
}

View File

@@ -1,5 +1,6 @@
import axios, { AxiosInstance } from 'axios';
import { OpenAI } from 'openai';
import { normalizeEngineError } from '../utils/engineErrorHandler';
import { removeContentTags } from '../utils/removeContentTags';
import { AiEngine, AiEngineConfig } from './Engine';
@@ -39,9 +40,8 @@ export class FlowiseEngine implements AiEngine {
const message = response.data;
let content = message?.text;
return removeContentTags(content, 'think');
} catch (err: any) {
const message = err.response?.data?.error ?? err.message;
throw new Error('local model issues. details: ' + message);
} catch (error) {
throw normalizeEngineError(error, 'flowise', this.config.model);
}
}
}

View File

@@ -5,8 +5,8 @@ import {
HarmCategory,
Part
} from '@google/generative-ai';
import axios from 'axios';
import { OpenAI } from 'openai';
import { normalizeEngineError } from '../utils/engineErrorHandler';
import { removeContentTags } from '../utils/removeContentTags';
import { AiEngine, AiEngineConfig } from './Engine';
@@ -75,16 +75,7 @@ export class GeminiEngine implements AiEngine {
const content = result.response.text();
return removeContentTags(content, 'think');
} catch (error) {
const err = error as Error;
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401
) {
const geminiError = error.response.data.error;
if (geminiError) throw new Error(geminiError?.message);
}
throw err;
throw normalizeEngineError(error, 'gemini', this.config.model);
}
}
}

View File

@@ -1,6 +1,6 @@
import axios from 'axios';
import { OpenAI } from 'openai';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { normalizeEngineError } from '../utils/engineErrorHandler';
import { removeContentTags } from '../utils/removeContentTags';
import { tokenCount } from '../utils/tokenCount';
import { AiEngine, AiEngineConfig } from './Engine';
@@ -63,17 +63,7 @@ export class MistralAiEngine implements AiEngine {
let content = message.content as string;
return removeContentTags(content, 'think');
} catch (error) {
const err = error as Error;
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401
) {
const mistralError = error.response.data.error;
if (mistralError) throw new Error(mistralError.message);
}
throw err;
throw normalizeEngineError(error, 'mistral', this.config.model);
}
};
}

View File

@@ -1,5 +1,6 @@
import axios, { AxiosInstance } from 'axios';
import { OpenAI } from 'openai';
import { normalizeEngineError } from '../utils/engineErrorHandler';
import { removeContentTags } from '../utils/removeContentTags';
import { AiEngine, AiEngineConfig } from './Engine';
@@ -39,9 +40,8 @@ export class MLXEngine implements AiEngine {
const message = choices[0].message;
let content = message?.content;
return removeContentTags(content, 'think');
} catch (err: any) {
const message = err.response?.data?.error ?? err.message;
throw new Error(`MLX provider error: ${message}`);
} catch (error) {
throw normalizeEngineError(error, 'mlx', this.config.model);
}
}
}

View File

@@ -1,5 +1,6 @@
import axios, { AxiosInstance } from 'axios';
import { OpenAI } from 'openai';
import { normalizeEngineError } from '../utils/engineErrorHandler';
import { removeContentTags } from '../utils/removeContentTags';
import { AiEngine, AiEngineConfig } from './Engine';
@@ -44,9 +45,8 @@ export class OllamaEngine implements AiEngine {
const { message } = response.data;
let content = message?.content;
return removeContentTags(content, 'think');
} catch (err: any) {
const message = err.response?.data?.error ?? err.message;
throw new Error(`Ollama provider error: ${message}`);
} catch (error) {
throw normalizeEngineError(error, 'ollama', this.config.model);
}
}
}

View File

@@ -1,7 +1,7 @@
import axios from 'axios';
import { OpenAI } from 'openai';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { parseCustomHeaders } from '../utils/engine';
import { normalizeEngineError } from '../utils/engineErrorHandler';
import { removeContentTags } from '../utils/removeContentTags';
import { tokenCount } from '../utils/tokenCount';
import { AiEngine, AiEngineConfig } from './Engine';
@@ -61,17 +61,7 @@ export class OpenAiEngine implements AiEngine {
let content = message?.content;
return removeContentTags(content, 'think');
} catch (error) {
const err = error as Error;
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401
) {
const openAiError = error.response.data.error;
if (openAiError) throw new Error(openAiError.message);
}
throw err;
throw normalizeEngineError(error, 'openai', this.config.model);
}
};
}

View File

@@ -1,7 +1,8 @@
import OpenAI from 'openai';
import { AiEngine, AiEngineConfig } from './Engine';
import axios, { AxiosInstance } from 'axios';
import { normalizeEngineError } from '../utils/engineErrorHandler';
import { removeContentTags } from '../utils/removeContentTags';
import { AiEngine, AiEngineConfig } from './Engine';
interface OpenRouterConfig extends AiEngineConfig {}
@@ -33,17 +34,7 @@ export class OpenRouterEngine implements AiEngine {
let content = message?.content;
return removeContentTags(content, 'think');
} catch (error) {
const err = error as Error;
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401
) {
const openRouterError = error.response.data.error;
if (openRouterError) throw new Error(openRouterError.message);
}
throw err;
throw normalizeEngineError(error, 'openrouter', this.config.model);
}
};
}

View File

@@ -1,7 +1,21 @@
import { select, confirm, isCancel } from '@clack/prompts';
import chalk from 'chalk';
import { OpenAI } from 'openai';
import { DEFAULT_TOKEN_LIMITS, getConfig } from './commands/config';
import {
DEFAULT_TOKEN_LIMITS,
getConfig,
setGlobalConfig,
getGlobalConfig,
MODEL_LIST,
RECOMMENDED_MODELS
} from './commands/config';
import { getMainCommitPrompt } from './prompts';
import { getEngine } from './utils/engine';
import {
isModelNotFoundError,
getSuggestedModels,
ModelNotFoundError
} from './utils/errors';
import { mergeDiffs } from './utils/mergeDiffs';
import { tokenCount } from './utils/tokenCount';
@@ -36,13 +50,106 @@ export enum GenerateCommitMessageErrorEnum {
outputTokensTooHigh = `Token limit exceeded, OCO_TOKENS_MAX_OUTPUT must not be much higher than the default ${DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT} tokens.`
}
async function handleModelNotFoundError(
error: Error,
provider: string,
currentModel: string
): Promise<string | null> {
console.log(
chalk.red(`\n✖ Model '${currentModel}' not found\n`)
);
const suggestedModels = getSuggestedModels(provider, currentModel);
const recommended =
RECOMMENDED_MODELS[provider as keyof typeof RECOMMENDED_MODELS];
if (suggestedModels.length === 0) {
console.log(
chalk.yellow(
`No alternative models available. Run 'oco setup' to configure a different model.`
)
);
return null;
}
const options: Array<{ value: string; label: string }> = [];
// Add recommended first if available
if (recommended && suggestedModels.includes(recommended)) {
options.push({
value: recommended,
label: `${recommended} (Recommended)`
});
}
// Add other suggestions
suggestedModels
.filter((m) => m !== recommended)
.forEach((model) => {
options.push({ value: model, label: model });
});
options.push({ value: '__custom__', label: 'Enter custom model...' });
const selection = await select({
message: 'Select an alternative model:',
options
});
if (isCancel(selection)) {
return null;
}
let newModel: string;
if (selection === '__custom__') {
const { text } = await import('@clack/prompts');
const customModel = await text({
message: 'Enter model name:',
validate: (value) => {
if (!value || value.trim().length === 0) {
return 'Model name is required';
}
return undefined;
}
});
if (isCancel(customModel)) {
return null;
}
newModel = customModel as string;
} else {
newModel = selection as string;
}
// Ask if user wants to save as default
const saveAsDefault = await confirm({
message: 'Save as default model?'
});
if (!isCancel(saveAsDefault) && saveAsDefault) {
const existingConfig = getGlobalConfig();
setGlobalConfig({
...existingConfig,
OCO_MODEL: newModel
} as any);
console.log(chalk.green('✔') + ' Model saved as default\n');
}
return newModel;
}
const ADJUSTMENT_FACTOR = 20;
export const generateCommitMessageByDiff = async (
diff: string,
fullGitMojiSpec: boolean = false,
context: string = ''
context: string = '',
retryWithModel?: string
): Promise<string> => {
const currentConfig = getConfig();
const provider = currentConfig.OCO_AI_PROVIDER || 'openai';
const currentModel = retryWithModel || currentConfig.OCO_MODEL;
try {
const INIT_MESSAGES_PROMPT = await getMainCommitPrompt(
fullGitMojiSpec,
@@ -89,6 +196,32 @@ export const generateCommitMessageByDiff = async (
return commitMessage;
} catch (error) {
// Handle model-not-found errors with interactive recovery
if (isModelNotFoundError(error)) {
const newModel = await handleModelNotFoundError(
error as Error,
provider,
currentModel
);
if (newModel) {
console.log(chalk.cyan(`Retrying with ${newModel}...\n`));
// Retry with the new model by updating config temporarily
const existingConfig = getGlobalConfig();
setGlobalConfig({
...existingConfig,
OCO_MODEL: newModel
} as any);
return generateCommitMessageByDiff(
diff,
fullGitMojiSpec,
context,
newModel
);
}
}
throw error;
}
};

View File

@@ -0,0 +1,205 @@
import axios from 'axios';
import {
AuthenticationError,
InsufficientCreditsError,
ModelNotFoundError,
RateLimitError,
ServiceUnavailableError
} from './errors';
/**
* Extracts HTTP status code from various error types
*/
function getStatusCode(error: unknown): number | null {
// Direct status property (common in API SDKs)
if (typeof (error as any)?.status === 'number') {
return (error as any).status;
}
// Axios-style errors
if (axios.isAxiosError(error)) {
return error.response?.status ?? null;
}
// Response object with status
if (typeof (error as any)?.response?.status === 'number') {
return (error as any).response.status;
}
return null;
}
/**
* Extracts retry-after value from error headers (for rate limiting)
*/
function getRetryAfter(error: unknown): number | undefined {
const headers = (error as any)?.response?.headers;
if (headers) {
const retryAfter = headers['retry-after'] || headers['Retry-After'];
if (retryAfter) {
const seconds = parseInt(retryAfter, 10);
if (!isNaN(seconds)) {
return seconds;
}
}
}
return undefined;
}
/**
* Extracts the error message from various error structures
*/
function extractErrorMessage(error: unknown): string {
if (error instanceof Error) {
return error.message;
}
// API error response structures
const apiError = (error as any)?.response?.data?.error;
if (apiError) {
if (typeof apiError === 'string') {
return apiError;
}
if (apiError.message) {
return apiError.message;
}
}
// Direct error data
const errorData = (error as any)?.error;
if (errorData) {
if (typeof errorData === 'string') {
return errorData;
}
if (errorData.message) {
return errorData.message;
}
}
// Fallback
if (typeof error === 'string') {
return error;
}
return 'An unknown error occurred';
}
/**
* Checks if the error message indicates a model not found error
*/
function isModelNotFoundMessage(message: string): boolean {
const lowerMessage = message.toLowerCase();
return (
(lowerMessage.includes('model') &&
(lowerMessage.includes('not found') ||
lowerMessage.includes('does not exist') ||
lowerMessage.includes('invalid') ||
lowerMessage.includes('pull'))) ||
lowerMessage.includes('does_not_exist')
);
}
/**
* Checks if the error message indicates insufficient credits
*/
function isInsufficientCreditsMessage(message: string): boolean {
const lowerMessage = message.toLowerCase();
return (
lowerMessage.includes('insufficient') ||
lowerMessage.includes('credit') ||
lowerMessage.includes('quota') ||
lowerMessage.includes('balance too low') ||
lowerMessage.includes('billing') ||
lowerMessage.includes('payment required') ||
lowerMessage.includes('exceeded')
);
}
/**
* Normalizes raw API errors into typed error classes.
* This provides consistent error handling across all engine implementations.
*
* @param error - The raw error from the API call
* @param provider - The AI provider name (e.g., 'openai', 'anthropic')
* @param model - The model being used
* @returns A typed Error instance
*/
export function normalizeEngineError(
error: unknown,
provider: string,
model: string
): Error {
// If it's already one of our custom errors, return as-is
if (
error instanceof ModelNotFoundError ||
error instanceof AuthenticationError ||
error instanceof InsufficientCreditsError ||
error instanceof RateLimitError ||
error instanceof ServiceUnavailableError
) {
return error;
}
const statusCode = getStatusCode(error);
const message = extractErrorMessage(error);
// Handle based on HTTP status codes
switch (statusCode) {
case 401:
return new AuthenticationError(provider, message);
case 402:
return new InsufficientCreditsError(provider, message);
case 404:
// Could be model not found or endpoint not found
if (isModelNotFoundMessage(message)) {
return new ModelNotFoundError(model, provider, 404);
}
// Return generic error for other 404s
return error instanceof Error ? error : new Error(message);
case 429:
const retryAfter = getRetryAfter(error);
return new RateLimitError(provider, retryAfter, message);
case 500:
case 502:
case 503:
case 504:
return new ServiceUnavailableError(provider, statusCode, message);
}
// Handle based on error message content
if (isModelNotFoundMessage(message)) {
return new ModelNotFoundError(model, provider, 404);
}
if (isInsufficientCreditsMessage(message)) {
return new InsufficientCreditsError(provider, message);
}
// Check for rate limit patterns in message
const lowerMessage = message.toLowerCase();
if (
lowerMessage.includes('rate limit') ||
lowerMessage.includes('rate_limit') ||
lowerMessage.includes('too many requests')
) {
return new RateLimitError(provider, undefined, message);
}
// Check for auth patterns in message
if (
lowerMessage.includes('unauthorized') ||
lowerMessage.includes('api key') ||
lowerMessage.includes('apikey') ||
lowerMessage.includes('authentication') ||
lowerMessage.includes('invalid_api_key')
) {
return new AuthenticationError(provider, message);
}
// Return original error or wrap in Error if needed
return error instanceof Error ? error : new Error(message);
}

471
src/utils/errors.ts Normal file
View File

@@ -0,0 +1,471 @@
import chalk from 'chalk';
import { MODEL_LIST, OCO_AI_PROVIDER_ENUM } from '../commands/config';
// Provider billing/help URLs for common errors
export const PROVIDER_BILLING_URLS: Record<string, string | null> = {
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]: 'https://console.anthropic.com/settings/billing',
[OCO_AI_PROVIDER_ENUM.OPENAI]: 'https://platform.openai.com/settings/organization/billing',
[OCO_AI_PROVIDER_ENUM.GEMINI]: 'https://aistudio.google.com/app/plan',
[OCO_AI_PROVIDER_ENUM.GROQ]: 'https://console.groq.com/settings/billing',
[OCO_AI_PROVIDER_ENUM.MISTRAL]: 'https://console.mistral.ai/billing/',
[OCO_AI_PROVIDER_ENUM.DEEPSEEK]: 'https://platform.deepseek.com/usage',
[OCO_AI_PROVIDER_ENUM.OPENROUTER]: 'https://openrouter.ai/credits',
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'https://aimlapi.com/app/billing',
[OCO_AI_PROVIDER_ENUM.AZURE]: 'https://portal.azure.com/#view/Microsoft_Azure_CostManagement',
[OCO_AI_PROVIDER_ENUM.OLLAMA]: null,
[OCO_AI_PROVIDER_ENUM.MLX]: null,
[OCO_AI_PROVIDER_ENUM.FLOWISE]: null,
[OCO_AI_PROVIDER_ENUM.TEST]: null
};
// Error type for insufficient credits/quota
export class InsufficientCreditsError extends Error {
public readonly provider: string;
constructor(provider: string, message?: string) {
super(message || `Insufficient credits or quota for provider '${provider}'`);
this.name = 'InsufficientCreditsError';
this.provider = provider;
}
}
// Error type for rate limiting (429 errors)
export class RateLimitError extends Error {
public readonly provider: string;
public readonly retryAfter?: number;
constructor(provider: string, retryAfter?: number, message?: string) {
super(message || `Rate limit exceeded for provider '${provider}'`);
this.name = 'RateLimitError';
this.provider = provider;
this.retryAfter = retryAfter;
}
}
// Error type for service unavailable (5xx errors)
export class ServiceUnavailableError extends Error {
public readonly provider: string;
public readonly statusCode: number;
constructor(provider: string, statusCode: number = 503, message?: string) {
super(message || `Service unavailable for provider '${provider}'`);
this.name = 'ServiceUnavailableError';
this.provider = provider;
this.statusCode = statusCode;
}
}
// Error type for authentication failures
export class AuthenticationError extends Error {
public readonly provider: string;
constructor(provider: string, message?: string) {
super(message || `Authentication failed for provider '${provider}'`);
this.name = 'AuthenticationError';
this.provider = provider;
}
}
export class ModelNotFoundError extends Error {
public readonly modelName: string;
public readonly provider: string;
public readonly statusCode: number;
constructor(modelName: string, provider: string, statusCode: number = 404) {
super(`Model '${modelName}' not found for provider '${provider}'`);
this.name = 'ModelNotFoundError';
this.modelName = modelName;
this.provider = provider;
this.statusCode = statusCode;
}
}
export class ApiKeyMissingError extends Error {
public readonly provider: string;
constructor(provider: string) {
super(`API key is missing for provider '${provider}'`);
this.name = 'ApiKeyMissingError';
this.provider = provider;
}
}
export function isModelNotFoundError(error: unknown): boolean {
if (error instanceof ModelNotFoundError) {
return true;
}
if (error instanceof Error) {
const message = error.message.toLowerCase();
// OpenAI error patterns
if (
message.includes('model') &&
(message.includes('not found') ||
message.includes('does not exist') ||
message.includes('invalid model'))
) {
return true;
}
// Anthropic error patterns
if (
message.includes('model') &&
(message.includes('not found') || message.includes('invalid'))
) {
return true;
}
// Check for 404 status in axios/fetch errors
if (
'status' in (error as any) &&
(error as any).status === 404 &&
message.includes('model')
) {
return true;
}
// Check for response status
if ('response' in (error as any)) {
const response = (error as any).response;
if (response?.status === 404) {
return true;
}
}
}
return false;
}
export function isApiKeyError(error: unknown): boolean {
if (error instanceof ApiKeyMissingError) {
return true;
}
if (error instanceof Error) {
const message = error.message.toLowerCase();
// Common API key error patterns
if (
message.includes('api key') ||
message.includes('apikey') ||
message.includes('authentication') ||
message.includes('unauthorized') ||
message.includes('invalid_api_key') ||
message.includes('incorrect api key')
) {
return true;
}
// Check for 401 status
if ('response' in (error as any)) {
const response = (error as any).response;
if (response?.status === 401) {
return true;
}
}
}
return false;
}
export function getSuggestedModels(
provider: string,
failedModel: string
): string[] {
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
const models = MODEL_LIST[providerKey];
if (!models || !Array.isArray(models)) {
return [];
}
// Return first 5 models as suggestions, excluding the failed one
return models.filter((m) => m !== failedModel).slice(0, 5);
}
export function getRecommendedModel(provider: string): string | null {
switch (provider.toLowerCase()) {
case OCO_AI_PROVIDER_ENUM.OPENAI:
return 'gpt-4o-mini';
case OCO_AI_PROVIDER_ENUM.ANTHROPIC:
return 'claude-sonnet-4-20250514';
case OCO_AI_PROVIDER_ENUM.GEMINI:
return 'gemini-1.5-flash';
case OCO_AI_PROVIDER_ENUM.GROQ:
return 'llama3-70b-8192';
case OCO_AI_PROVIDER_ENUM.MISTRAL:
return 'mistral-small-latest';
case OCO_AI_PROVIDER_ENUM.DEEPSEEK:
return 'deepseek-chat';
case OCO_AI_PROVIDER_ENUM.OPENROUTER:
return 'openai/gpt-4o-mini';
case OCO_AI_PROVIDER_ENUM.AIMLAPI:
return 'gpt-4o-mini';
default:
return null;
}
}
export function formatErrorWithRecovery(
error: Error,
provider: string,
model: string
): string {
const suggestions = getSuggestedModels(provider, model);
const recommended = getRecommendedModel(provider);
let message = `\n${error.message}\n`;
if (suggestions.length > 0) {
message += '\nSuggested alternatives:\n';
suggestions.forEach((m, i) => {
const isRecommended = m === recommended;
message += ` ${i + 1}. ${m}${isRecommended ? ' (Recommended)' : ''}\n`;
});
}
message += '\nTo fix this, run: oco config set OCO_MODEL=<model-name>\n';
message += 'Or run: oco setup\n';
return message;
}
// Detect insufficient credits/quota errors from various providers
export function isInsufficientCreditsError(error: unknown): boolean {
if (error instanceof InsufficientCreditsError) {
return true;
}
if (error instanceof Error) {
const message = error.message.toLowerCase();
// Common patterns for insufficient credits/quota
if (
message.includes('insufficient') ||
message.includes('credit') ||
message.includes('quota') ||
message.includes('balance') ||
message.includes('billing') ||
message.includes('payment') ||
message.includes('exceeded') ||
message.includes('limit reached') ||
message.includes('no remaining')
) {
return true;
}
// Check for 402 Payment Required status
if ('status' in (error as any) && (error as any).status === 402) {
return true;
}
if ('response' in (error as any)) {
const response = (error as any).response;
if (response?.status === 402) {
return true;
}
}
}
return false;
}
// Detect rate limit errors (429)
export function isRateLimitError(error: unknown): boolean {
if (error instanceof RateLimitError) {
return true;
}
if (error instanceof Error) {
const message = error.message.toLowerCase();
// Common patterns for rate limiting
if (
message.includes('rate limit') ||
message.includes('rate_limit') ||
message.includes('too many requests') ||
message.includes('throttle')
) {
return true;
}
// Check for 429 status
if ('status' in (error as any) && (error as any).status === 429) {
return true;
}
if ('response' in (error as any)) {
const response = (error as any).response;
if (response?.status === 429) {
return true;
}
}
}
return false;
}
// Detect service unavailable errors (5xx)
export function isServiceUnavailableError(error: unknown): boolean {
if (error instanceof ServiceUnavailableError) {
return true;
}
if (error instanceof Error) {
const message = error.message.toLowerCase();
// Common patterns for service unavailable
if (
message.includes('service unavailable') ||
message.includes('server error') ||
message.includes('internal error') ||
message.includes('temporarily unavailable') ||
message.includes('overloaded')
) {
return true;
}
// Check for 5xx status
const status = (error as any).status || (error as any).response?.status;
if (status && status >= 500 && status < 600) {
return true;
}
}
return false;
}
// User-friendly formatted error structure
export interface FormattedError {
title: string;
message: string;
helpUrl: string | null;
suggestion: string | null;
}
// Format an error into a user-friendly structure
export function formatUserFriendlyError(error: unknown, provider: string): FormattedError {
const billingUrl = PROVIDER_BILLING_URLS[provider] || null;
// Handle our custom error types first
if (error instanceof InsufficientCreditsError) {
return {
title: 'Insufficient Credits',
message: `Your ${provider} account has insufficient credits or quota.`,
helpUrl: billingUrl,
suggestion: 'Add credits to your account to continue using the service.'
};
}
if (error instanceof RateLimitError) {
const retryMsg = error.retryAfter
? `Please wait ${error.retryAfter} seconds before retrying.`
: 'Please wait a moment before retrying.';
return {
title: 'Rate Limit Exceeded',
message: `You've made too many requests to ${provider}.`,
helpUrl: billingUrl,
suggestion: retryMsg
};
}
if (error instanceof ServiceUnavailableError) {
return {
title: 'Service Unavailable',
message: `The ${provider} service is temporarily unavailable.`,
helpUrl: null,
suggestion: 'Please try again in a few moments.'
};
}
if (error instanceof AuthenticationError) {
return {
title: 'Authentication Failed',
message: `Your ${provider} API key is invalid or expired.`,
helpUrl: billingUrl,
suggestion: 'Run `oco setup` to configure a valid API key.'
};
}
if (error instanceof ModelNotFoundError) {
return {
title: 'Model Not Found',
message: `The model '${error.modelName}' is not available for ${provider}.`,
helpUrl: null,
suggestion: 'Run `oco setup` to select a valid model.'
};
}
// Detect error type from raw errors
if (isInsufficientCreditsError(error)) {
return {
title: 'Insufficient Credits',
message: `Your ${provider} account has insufficient credits or quota.`,
helpUrl: billingUrl,
suggestion: 'Add credits to your account to continue using the service.'
};
}
if (isRateLimitError(error)) {
return {
title: 'Rate Limit Exceeded',
message: `You've made too many requests to ${provider}.`,
helpUrl: billingUrl,
suggestion: 'Please wait a moment before retrying.'
};
}
if (isServiceUnavailableError(error)) {
return {
title: 'Service Unavailable',
message: `The ${provider} service is temporarily unavailable.`,
helpUrl: null,
suggestion: 'Please try again in a few moments.'
};
}
if (isApiKeyError(error)) {
return {
title: 'Authentication Failed',
message: `Your ${provider} API key is invalid or expired.`,
helpUrl: billingUrl,
suggestion: 'Run `oco setup` to configure a valid API key.'
};
}
if (isModelNotFoundError(error)) {
const model = (error as any).modelName || (error as any).model || 'unknown';
return {
title: 'Model Not Found',
message: `The model '${model}' is not available for ${provider}.`,
helpUrl: null,
suggestion: 'Run `oco setup` to select a valid model.'
};
}
// Default: generic error
const errorMessage = error instanceof Error ? error.message : String(error);
return {
title: 'Error',
message: errorMessage,
helpUrl: null,
suggestion: 'Run `oco setup` to reconfigure or check your settings.'
};
}
// Print a formatted error as a chalk-styled string
export function printFormattedError(formatted: FormattedError): string {
let output = `\n${chalk.red('✖')} ${chalk.bold.red(formatted.title)}\n`;
output += ` ${formatted.message}\n`;
if (formatted.helpUrl) {
output += `\n ${chalk.cyan('Help:')} ${chalk.underline(formatted.helpUrl)}\n`;
}
if (formatted.suggestion) {
output += `\n ${chalk.yellow('Suggestion:')} ${formatted.suggestion}\n`;
}
return output;
}

332
src/utils/modelCache.ts Normal file
View File

@@ -0,0 +1,332 @@
import { existsSync, readFileSync, writeFileSync } from 'fs';
import { homedir } from 'os';
import { join as pathJoin } from 'path';
import { MODEL_LIST, OCO_AI_PROVIDER_ENUM } from '../commands/config';
const MODEL_CACHE_PATH = pathJoin(homedir(), '.opencommit-models.json');
const CACHE_TTL_MS = 7 * 24 * 60 * 60 * 1000; // 7 days
interface ModelCache {
timestamp: number;
models: Record<string, string[]>;
}
function readCache(): ModelCache | null {
try {
if (!existsSync(MODEL_CACHE_PATH)) {
return null;
}
const data = readFileSync(MODEL_CACHE_PATH, 'utf8');
return JSON.parse(data);
} catch {
return null;
}
}
function writeCache(models: Record<string, string[]>): void {
try {
const cache: ModelCache = {
timestamp: Date.now(),
models
};
writeFileSync(MODEL_CACHE_PATH, JSON.stringify(cache, null, 2), 'utf8');
} catch {
// Silently fail if we can't write cache
}
}
function isCacheValid(cache: ModelCache | null): boolean {
if (!cache) return false;
return Date.now() - cache.timestamp < CACHE_TTL_MS;
}
export async function fetchOpenAIModels(apiKey: string): Promise<string[]> {
try {
const response = await fetch('https://api.openai.com/v1/models', {
headers: {
Authorization: `Bearer ${apiKey}`
}
});
if (!response.ok) {
return MODEL_LIST.openai;
}
const data = await response.json();
const models = data.data
.map((m: { id: string }) => m.id)
.filter(
(id: string) =>
id.startsWith('gpt-') ||
id.startsWith('o1') ||
id.startsWith('o3') ||
id.startsWith('o4')
)
.sort();
return models.length > 0 ? models : MODEL_LIST.openai;
} catch {
return MODEL_LIST.openai;
}
}
export async function fetchOllamaModels(
baseUrl: string = 'http://localhost:11434'
): Promise<string[]> {
try {
const response = await fetch(`${baseUrl}/api/tags`);
if (!response.ok) {
return [];
}
const data = await response.json();
return data.models?.map((m: { name: string }) => m.name) || [];
} catch {
return [];
}
}
export async function fetchAnthropicModels(apiKey: string): Promise<string[]> {
try {
const response = await fetch('https://api.anthropic.com/v1/models', {
headers: {
'x-api-key': apiKey,
'anthropic-version': '2023-06-01'
}
});
if (!response.ok) {
return MODEL_LIST.anthropic;
}
const data = await response.json();
const models = data.data
?.map((m: { id: string }) => m.id)
.filter((id: string) => id.startsWith('claude-'))
.sort();
return models && models.length > 0 ? models : MODEL_LIST.anthropic;
} catch {
return MODEL_LIST.anthropic;
}
}
export async function fetchMistralModels(apiKey: string): Promise<string[]> {
try {
const response = await fetch('https://api.mistral.ai/v1/models', {
headers: {
Authorization: `Bearer ${apiKey}`
}
});
if (!response.ok) {
return MODEL_LIST.mistral;
}
const data = await response.json();
const models = data.data
?.map((m: { id: string }) => m.id)
.sort();
return models && models.length > 0 ? models : MODEL_LIST.mistral;
} catch {
return MODEL_LIST.mistral;
}
}
export async function fetchGroqModels(apiKey: string): Promise<string[]> {
try {
const response = await fetch('https://api.groq.com/openai/v1/models', {
headers: {
Authorization: `Bearer ${apiKey}`
}
});
if (!response.ok) {
return MODEL_LIST.groq;
}
const data = await response.json();
const models = data.data
?.map((m: { id: string }) => m.id)
.sort();
return models && models.length > 0 ? models : MODEL_LIST.groq;
} catch {
return MODEL_LIST.groq;
}
}
export async function fetchOpenRouterModels(apiKey: string): Promise<string[]> {
try {
const response = await fetch('https://openrouter.ai/api/v1/models', {
headers: {
Authorization: `Bearer ${apiKey}`
}
});
if (!response.ok) {
return MODEL_LIST.openrouter;
}
const data = await response.json();
// Filter to text-capable models only (exclude image/audio models)
const models = data.data
?.filter((m: { id: string; context_length?: number }) =>
m.context_length && m.context_length > 0
)
.map((m: { id: string }) => m.id)
.sort();
return models && models.length > 0 ? models : MODEL_LIST.openrouter;
} catch {
return MODEL_LIST.openrouter;
}
}
export async function fetchDeepSeekModels(apiKey: string): Promise<string[]> {
try {
const response = await fetch('https://api.deepseek.com/v1/models', {
headers: {
Authorization: `Bearer ${apiKey}`
}
});
if (!response.ok) {
return MODEL_LIST.deepseek;
}
const data = await response.json();
const models = data.data
?.map((m: { id: string }) => m.id)
.sort();
return models && models.length > 0 ? models : MODEL_LIST.deepseek;
} catch {
return MODEL_LIST.deepseek;
}
}
export async function fetchModelsForProvider(
provider: string,
apiKey?: string,
baseUrl?: string,
forceRefresh: boolean = false
): Promise<string[]> {
const cache = readCache();
// Return cached models if valid (unless force refresh)
if (!forceRefresh && isCacheValid(cache) && cache!.models[provider]) {
return cache!.models[provider];
}
let models: string[] = [];
switch (provider.toLowerCase()) {
case OCO_AI_PROVIDER_ENUM.OPENAI:
if (apiKey) {
models = await fetchOpenAIModels(apiKey);
} else {
models = MODEL_LIST.openai;
}
break;
case OCO_AI_PROVIDER_ENUM.OLLAMA:
models = await fetchOllamaModels(baseUrl);
break;
case OCO_AI_PROVIDER_ENUM.ANTHROPIC:
if (apiKey) {
models = await fetchAnthropicModels(apiKey);
} else {
models = MODEL_LIST.anthropic;
}
break;
case OCO_AI_PROVIDER_ENUM.GEMINI:
// Google's API doesn't easily list generative models, use hardcoded list
models = MODEL_LIST.gemini;
break;
case OCO_AI_PROVIDER_ENUM.GROQ:
if (apiKey) {
models = await fetchGroqModels(apiKey);
} else {
models = MODEL_LIST.groq;
}
break;
case OCO_AI_PROVIDER_ENUM.MISTRAL:
if (apiKey) {
models = await fetchMistralModels(apiKey);
} else {
models = MODEL_LIST.mistral;
}
break;
case OCO_AI_PROVIDER_ENUM.DEEPSEEK:
if (apiKey) {
models = await fetchDeepSeekModels(apiKey);
} else {
models = MODEL_LIST.deepseek;
}
break;
case OCO_AI_PROVIDER_ENUM.AIMLAPI:
models = MODEL_LIST.aimlapi;
break;
case OCO_AI_PROVIDER_ENUM.OPENROUTER:
if (apiKey) {
models = await fetchOpenRouterModels(apiKey);
} else {
models = MODEL_LIST.openrouter;
}
break;
default:
models = MODEL_LIST.openai;
}
// Update cache
const existingCache = cache?.models || {};
existingCache[provider] = models;
writeCache(existingCache);
return models;
}
export function getModelsForProvider(provider: string): string[] {
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
return MODEL_LIST[providerKey] || MODEL_LIST.openai;
}
export function clearModelCache(): void {
try {
if (existsSync(MODEL_CACHE_PATH)) {
writeFileSync(MODEL_CACHE_PATH, '{}', 'utf8');
}
} catch {
// Silently fail
}
}
export function getCacheInfo(): { timestamp: number | null; providers: string[] } {
const cache = readCache();
if (!cache) {
return { timestamp: null, providers: [] };
}
return {
timestamp: cache.timestamp,
providers: Object.keys(cache.models || {})
};
}
export function getCachedModels(provider: string): string[] | null {
const cache = readCache();
if (!cache || !cache.models[provider]) {
return null;
}
return cache.models[provider];
}