Compare commits

..

2 Commits
dev ... v3.2.11

Author SHA1 Message Date
di-sukharev
fdd4d89bba 3.2.11 2026-01-17 23:06:17 +03:00
di-sukharev
d70797b864 feat: add interactive setup wizard and model error handling
Add comprehensive setup command with provider selection, API key
configuration, and model selection. Include error recovery for
model-not-found scenarios with suggested alternatives and automatic
retry functionality. Update Anthropic model list with latest versions
and add provider metadata for better user experience.
2026-01-17 23:04:43 +03:00
15 changed files with 7304 additions and 4503 deletions

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

4
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "opencommit",
"version": "3.2.10",
"version": "3.2.11",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "opencommit",
"version": "3.2.10",
"version": "3.2.11",
"license": "MIT",
"dependencies": {
"@actions/core": "^1.10.0",

View File

@@ -1,6 +1,6 @@
{
"name": "opencommit",
"version": "3.2.10",
"version": "3.2.11",
"description": "Auto-generate impressive commits in 1 second. Killing lame commits with AI 🤯🔫",
"keywords": [
"git",

View File

@@ -8,6 +8,12 @@ import { commitlintConfigCommand } from './commands/commitlint';
import { configCommand } from './commands/config';
import { hookCommand, isHookCalled } from './commands/githook.js';
import { prepareCommitMessageHook } from './commands/prepare-commit-msg-hook';
import {
setupCommand,
isFirstRun,
runSetup,
promptForMissingApiKey
} from './commands/setup';
import { checkIsLatestVersion } from './utils/checkIsLatestVersion';
import { runMigrations } from './migrations/_run.js';
@@ -17,7 +23,7 @@ cli(
{
version: packageJSON.version,
name: 'opencommit',
commands: [configCommand, hookCommand, commitlintConfigCommand],
commands: [configCommand, hookCommand, commitlintConfigCommand, setupCommand],
flags: {
fgm: {
type: Boolean,
@@ -47,6 +53,20 @@ cli(
if (await isHookCalled()) {
prepareCommitMessageHook();
} else {
// Check for first run and trigger setup wizard
if (isFirstRun()) {
const setupComplete = await runSetup();
if (!setupComplete) {
process.exit(1);
}
}
// Check for missing API key and prompt if needed
const hasApiKey = await promptForMissingApiKey();
if (!hasApiKey) {
process.exit(1);
}
commit(extraArgs, flags.context, false, flags.fgm, flags.yes);
}
},

View File

@@ -1,5 +1,6 @@
export enum COMMANDS {
config = 'config',
hook = 'hook',
commitlint = 'commitlint'
commitlint = 'commitlint',
setup = 'setup'
}

View File

@@ -68,10 +68,11 @@ export const MODEL_LIST = {
],
anthropic: [
'claude-3-5-sonnet-20240620',
'claude-3-opus-20240229',
'claude-3-sonnet-20240229',
'claude-3-haiku-20240307'
'claude-sonnet-4-20250514',
'claude-opus-4-20250514',
'claude-3-7-sonnet-20250219',
'claude-3-5-sonnet-20241022',
'claude-3-5-haiku-20241022'
],
gemini: [
@@ -846,6 +847,33 @@ export enum OCO_AI_PROVIDER_ENUM {
OPENROUTER = 'openrouter'
}
export const PROVIDER_API_KEY_URLS: Record<string, string | null> = {
[OCO_AI_PROVIDER_ENUM.OPENAI]: 'https://platform.openai.com/api-keys',
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]: 'https://console.anthropic.com/settings/keys',
[OCO_AI_PROVIDER_ENUM.GEMINI]: 'https://aistudio.google.com/app/apikey',
[OCO_AI_PROVIDER_ENUM.GROQ]: 'https://console.groq.com/keys',
[OCO_AI_PROVIDER_ENUM.MISTRAL]: 'https://console.mistral.ai/api-keys/',
[OCO_AI_PROVIDER_ENUM.DEEPSEEK]: 'https://platform.deepseek.com/api_keys',
[OCO_AI_PROVIDER_ENUM.OPENROUTER]: 'https://openrouter.ai/keys',
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'https://aimlapi.com/app/keys',
[OCO_AI_PROVIDER_ENUM.AZURE]: 'https://portal.azure.com/',
[OCO_AI_PROVIDER_ENUM.OLLAMA]: null,
[OCO_AI_PROVIDER_ENUM.MLX]: null,
[OCO_AI_PROVIDER_ENUM.FLOWISE]: null,
[OCO_AI_PROVIDER_ENUM.TEST]: null
};
export const RECOMMENDED_MODELS: Record<string, string> = {
[OCO_AI_PROVIDER_ENUM.OPENAI]: 'gpt-4o-mini',
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]: 'claude-sonnet-4-20250514',
[OCO_AI_PROVIDER_ENUM.GEMINI]: 'gemini-1.5-flash',
[OCO_AI_PROVIDER_ENUM.GROQ]: 'llama3-70b-8192',
[OCO_AI_PROVIDER_ENUM.MISTRAL]: 'mistral-small-latest',
[OCO_AI_PROVIDER_ENUM.DEEPSEEK]: 'deepseek-chat',
[OCO_AI_PROVIDER_ENUM.OPENROUTER]: 'openai/gpt-4o-mini',
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'gpt-4o-mini'
}
export type ConfigType = {
[CONFIG_KEYS.OCO_API_KEY]?: string;
[CONFIG_KEYS.OCO_TOKENS_MAX_INPUT]: number;

450
src/commands/setup.ts Normal file
View File

@@ -0,0 +1,450 @@
import { intro, outro, select, text, isCancel, spinner } from '@clack/prompts';
import chalk from 'chalk';
import { command } from 'cleye';
import { COMMANDS } from './ENUMS';
import {
CONFIG_KEYS,
MODEL_LIST,
OCO_AI_PROVIDER_ENUM,
getConfig,
setGlobalConfig,
getGlobalConfig,
getIsGlobalConfigFileExist,
DEFAULT_CONFIG,
PROVIDER_API_KEY_URLS,
RECOMMENDED_MODELS
} from './config';
import {
fetchModelsForProvider,
fetchOllamaModels
} from '../utils/modelCache';
const PROVIDER_DISPLAY_NAMES: Record<string, string> = {
[OCO_AI_PROVIDER_ENUM.OPENAI]: 'OpenAI (GPT-4o, GPT-4)',
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]: 'Anthropic (Claude Sonnet, Opus)',
[OCO_AI_PROVIDER_ENUM.OLLAMA]: 'Ollama (Free, runs locally)',
[OCO_AI_PROVIDER_ENUM.GEMINI]: 'Google Gemini',
[OCO_AI_PROVIDER_ENUM.GROQ]: 'Groq (Fast inference, free tier)',
[OCO_AI_PROVIDER_ENUM.MISTRAL]: 'Mistral AI',
[OCO_AI_PROVIDER_ENUM.DEEPSEEK]: 'DeepSeek',
[OCO_AI_PROVIDER_ENUM.OPENROUTER]: 'OpenRouter (Multiple providers)',
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'AI/ML API',
[OCO_AI_PROVIDER_ENUM.AZURE]: 'Azure OpenAI',
[OCO_AI_PROVIDER_ENUM.MLX]: 'MLX (Apple Silicon, local)'
};
const PRIMARY_PROVIDERS = [
OCO_AI_PROVIDER_ENUM.OPENAI,
OCO_AI_PROVIDER_ENUM.ANTHROPIC,
OCO_AI_PROVIDER_ENUM.OLLAMA
];
const OTHER_PROVIDERS = [
OCO_AI_PROVIDER_ENUM.GEMINI,
OCO_AI_PROVIDER_ENUM.GROQ,
OCO_AI_PROVIDER_ENUM.MISTRAL,
OCO_AI_PROVIDER_ENUM.DEEPSEEK,
OCO_AI_PROVIDER_ENUM.OPENROUTER,
OCO_AI_PROVIDER_ENUM.AIMLAPI,
OCO_AI_PROVIDER_ENUM.AZURE,
OCO_AI_PROVIDER_ENUM.MLX
];
const NO_API_KEY_PROVIDERS = [
OCO_AI_PROVIDER_ENUM.OLLAMA,
OCO_AI_PROVIDER_ENUM.MLX
];
async function selectProvider(): Promise<string | symbol> {
const primaryOptions = PRIMARY_PROVIDERS.map((provider) => ({
value: provider,
label: PROVIDER_DISPLAY_NAMES[provider] || provider
}));
primaryOptions.push({
value: 'other',
label: 'Other providers...'
});
const selection = await select({
message: 'Select your AI provider:',
options: primaryOptions
});
if (isCancel(selection)) return selection;
if (selection === 'other') {
const otherOptions = OTHER_PROVIDERS.map((provider) => ({
value: provider,
label: PROVIDER_DISPLAY_NAMES[provider] || provider
}));
return await select({
message: 'Select provider:',
options: otherOptions
});
}
return selection;
}
async function getApiKey(provider: string): Promise<string | symbol> {
const url = PROVIDER_API_KEY_URLS[provider as keyof typeof PROVIDER_API_KEY_URLS];
let message = `Enter your ${provider} API key:`;
if (url) {
message = `Enter your API key:\n${chalk.dim(` Get your key at: ${url}`)}`;
}
return await text({
message,
placeholder: 'sk-...',
validate: (value) => {
if (!value || value.trim().length === 0) {
return 'API key is required';
}
return undefined;
}
});
}
async function selectModel(
provider: string,
apiKey?: string
): Promise<string | symbol> {
const loadingSpinner = spinner();
loadingSpinner.start('Fetching available models...');
let models: string[] = [];
try {
models = await fetchModelsForProvider(provider, apiKey);
} catch {
// Fall back to hardcoded list
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
models = MODEL_LIST[providerKey] || [];
}
loadingSpinner.stop('Models loaded');
if (models.length === 0) {
// For Ollama/MLX, prompt for manual entry
if (NO_API_KEY_PROVIDERS.includes(provider as OCO_AI_PROVIDER_ENUM)) {
return await text({
message: 'Enter model name (e.g., llama3:8b, mistral):',
placeholder: 'llama3:8b',
validate: (value) => {
if (!value || value.trim().length === 0) {
return 'Model name is required';
}
return undefined;
}
});
}
// Use default from config
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
return MODEL_LIST[providerKey]?.[0] || 'gpt-4o-mini';
}
// Get recommended model for this provider
const recommended = RECOMMENDED_MODELS[provider as keyof typeof RECOMMENDED_MODELS];
// Build options with recommended first
const options: Array<{ value: string; label: string }> = [];
if (recommended && models.includes(recommended)) {
options.push({
value: recommended,
label: `${recommended} (Recommended)`
});
}
// Add other models (first 10, excluding recommended)
const otherModels = models
.filter((m) => m !== recommended)
.slice(0, 10);
otherModels.forEach((model) => {
options.push({ value: model, label: model });
});
// Add option to see all or enter custom
if (models.length > 11) {
options.push({ value: '__show_all__', label: 'Show all models...' });
}
options.push({ value: '__custom__', label: 'Enter custom model...' });
const selection = await select({
message: 'Select a model:',
options
});
if (isCancel(selection)) return selection;
if (selection === '__show_all__') {
const allOptions = models.map((model) => ({
value: model,
label: model === recommended ? `${model} (Recommended)` : model
}));
return await select({
message: 'Select a model:',
options: allOptions
});
}
if (selection === '__custom__') {
return await text({
message: 'Enter model name:',
validate: (value) => {
if (!value || value.trim().length === 0) {
return 'Model name is required';
}
return undefined;
}
});
}
return selection;
}
async function setupOllama(): Promise<{
provider: string;
model: string;
apiUrl: string;
} | null> {
console.log(chalk.cyan('\n Ollama - Free Local AI\n'));
console.log(chalk.dim(' Setup steps:'));
console.log(chalk.dim(' 1. Install: https://ollama.ai/download'));
console.log(chalk.dim(' 2. Pull a model: ollama pull llama3:8b'));
console.log(chalk.dim(' 3. Start server: ollama serve\n'));
// Try to fetch available models
const loadingSpinner = spinner();
loadingSpinner.start('Checking for local Ollama installation...');
const defaultUrl = 'http://localhost:11434';
let ollamaModels: string[] = [];
try {
ollamaModels = await fetchOllamaModels(defaultUrl);
if (ollamaModels.length > 0) {
loadingSpinner.stop(
`${chalk.green('✔')} Found ${ollamaModels.length} local model(s)`
);
} else {
loadingSpinner.stop(
chalk.yellow(
'Ollama is running but no models found. Pull a model first: ollama pull llama3:8b'
)
);
}
} catch {
loadingSpinner.stop(
chalk.yellow(
'Could not connect to Ollama. Make sure it is running: ollama serve'
)
);
}
// Model selection
let model: string | symbol;
if (ollamaModels.length > 0) {
model = await select({
message: 'Select a model:',
options: [
...ollamaModels.map((m) => ({ value: m, label: m })),
{ value: '__custom__', label: 'Enter custom model name...' }
]
});
if (isCancel(model)) return null;
if (model === '__custom__') {
model = await text({
message: 'Enter model name (e.g., llama3:8b, mistral):',
placeholder: 'llama3:8b'
});
}
} else {
model = await text({
message: 'Enter model name (e.g., llama3:8b, mistral):',
placeholder: 'llama3:8b',
validate: (value) => {
if (!value || value.trim().length === 0) {
return 'Model name is required';
}
return undefined;
}
});
}
if (isCancel(model)) return null;
// API URL (optional)
const apiUrl = await text({
message: 'Ollama URL (press Enter for default):',
placeholder: defaultUrl,
defaultValue: defaultUrl
});
if (isCancel(apiUrl)) return null;
return {
provider: OCO_AI_PROVIDER_ENUM.OLLAMA,
model: model as string,
apiUrl: (apiUrl as string) || defaultUrl
};
}
export async function runSetup(): Promise<boolean> {
intro(chalk.bgCyan(' Welcome to OpenCommit! '));
// Select provider
const provider = await selectProvider();
if (isCancel(provider)) {
outro('Setup cancelled');
return false;
}
let config: Partial<Record<string, any>> = {};
// Handle Ollama specially
if (provider === OCO_AI_PROVIDER_ENUM.OLLAMA) {
const ollamaConfig = await setupOllama();
if (!ollamaConfig) {
outro('Setup cancelled');
return false;
}
config = {
OCO_AI_PROVIDER: ollamaConfig.provider,
OCO_MODEL: ollamaConfig.model,
OCO_API_URL: ollamaConfig.apiUrl,
OCO_API_KEY: 'ollama' // Placeholder
};
} else if (provider === OCO_AI_PROVIDER_ENUM.MLX) {
// MLX setup
console.log(chalk.cyan('\n MLX - Apple Silicon Local AI\n'));
console.log(chalk.dim(' MLX runs locally on Apple Silicon Macs.'));
console.log(chalk.dim(' No API key required.\n'));
const model = await text({
message: 'Enter model name:',
placeholder: 'mlx-community/Llama-3-8B-Instruct-4bit'
});
if (isCancel(model)) {
outro('Setup cancelled');
return false;
}
config = {
OCO_AI_PROVIDER: OCO_AI_PROVIDER_ENUM.MLX,
OCO_MODEL: model,
OCO_API_KEY: 'mlx' // Placeholder
};
} else {
// Standard provider flow: API key then model
const apiKey = await getApiKey(provider as string);
if (isCancel(apiKey)) {
outro('Setup cancelled');
return false;
}
const model = await selectModel(provider as string, apiKey as string);
if (isCancel(model)) {
outro('Setup cancelled');
return false;
}
config = {
OCO_AI_PROVIDER: provider,
OCO_API_KEY: apiKey,
OCO_MODEL: model
};
}
// Save configuration
const existingConfig = getIsGlobalConfigFileExist()
? getGlobalConfig()
: DEFAULT_CONFIG;
const newConfig = {
...existingConfig,
...config
};
setGlobalConfig(newConfig as any);
outro(
`${chalk.green('✔')} Configuration saved to ~/.opencommit\n\n Run ${chalk.cyan('oco')} to generate commit messages!`
);
return true;
}
export function isFirstRun(): boolean {
if (!getIsGlobalConfigFileExist()) {
return true;
}
const config = getConfig();
// Check if API key is missing for providers that need it
const provider = config.OCO_AI_PROVIDER || OCO_AI_PROVIDER_ENUM.OPENAI;
if (NO_API_KEY_PROVIDERS.includes(provider as OCO_AI_PROVIDER_ENUM)) {
// For Ollama/MLX, check if model is set
return !config.OCO_MODEL;
}
// For other providers, check if API key is set
return !config.OCO_API_KEY;
}
export async function promptForMissingApiKey(): Promise<boolean> {
const config = getConfig();
const provider = config.OCO_AI_PROVIDER || OCO_AI_PROVIDER_ENUM.OPENAI;
if (NO_API_KEY_PROVIDERS.includes(provider as OCO_AI_PROVIDER_ENUM)) {
return true; // No API key needed
}
if (config.OCO_API_KEY) {
return true; // Already has key
}
console.log(
chalk.yellow(
`\nAPI key missing for ${provider}. Let's set it up.\n`
)
);
const apiKey = await getApiKey(provider);
if (isCancel(apiKey)) {
return false;
}
const existingConfig = getGlobalConfig();
setGlobalConfig({
...existingConfig,
OCO_API_KEY: apiKey as string
} as any);
console.log(chalk.green('✔') + ' API key saved\n');
return true;
}
export const setupCommand = command(
{
name: COMMANDS.setup,
help: {
description: 'Interactive setup wizard for OpenCommit'
}
},
async () => {
await runSetup();
}
);

View File

@@ -8,6 +8,7 @@ import axios from 'axios';
import chalk from 'chalk';
import { OpenAI } from 'openai';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { ModelNotFoundError } from '../utils/errors';
import { removeContentTags } from '../utils/removeContentTags';
import { tokenCount } from '../utils/tokenCount';
import { AiEngine, AiEngineConfig } from './Engine';
@@ -59,6 +60,20 @@ export class AnthropicEngine implements AiEngine {
return removeContentTags(content, 'think');
} catch (error) {
const err = error as Error;
// Check for model not found errors
if (err.message?.toLowerCase().includes('model') &&
(err.message?.toLowerCase().includes('not found') ||
err.message?.toLowerCase().includes('does not exist') ||
err.message?.toLowerCase().includes('invalid'))) {
throw new ModelNotFoundError(this.config.model, 'anthropic', 404);
}
// Check for 404 errors
if ('status' in (error as any) && (error as any).status === 404) {
throw new ModelNotFoundError(this.config.model, 'anthropic', 404);
}
outro(`${chalk.red('✖')} ${err?.message || err}`);
if (
@@ -73,6 +88,11 @@ export class AnthropicEngine implements AiEngine {
);
}
// Check axios 404 errors
if (axios.isAxiosError(error) && error.response?.status === 404) {
throw new ModelNotFoundError(this.config.model, 'anthropic', 404);
}
throw err;
}
};

View File

@@ -7,6 +7,7 @@ import {
} from '@google/generative-ai';
import axios from 'axios';
import { OpenAI } from 'openai';
import { ModelNotFoundError } from '../utils/errors';
import { removeContentTags } from '../utils/removeContentTags';
import { AiEngine, AiEngineConfig } from './Engine';
@@ -76,6 +77,15 @@ export class GeminiEngine implements AiEngine {
return removeContentTags(content, 'think');
} catch (error) {
const err = error as Error;
// Check for model not found errors
if (err.message?.toLowerCase().includes('model') &&
(err.message?.toLowerCase().includes('not found') ||
err.message?.toLowerCase().includes('does not exist') ||
err.message?.toLowerCase().includes('invalid'))) {
throw new ModelNotFoundError(this.config.model, 'gemini', 404);
}
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401
@@ -84,6 +94,11 @@ export class GeminiEngine implements AiEngine {
if (geminiError) throw new Error(geminiError?.message);
}
// Check axios 404 errors
if (axios.isAxiosError(error) && error.response?.status === 404) {
throw new ModelNotFoundError(this.config.model, 'gemini', 404);
}
throw err;
}
}

View File

@@ -1,5 +1,6 @@
import axios, { AxiosInstance } from 'axios';
import { OpenAI } from 'openai';
import { ModelNotFoundError } from '../utils/errors';
import { removeContentTags } from '../utils/removeContentTags';
import { AiEngine, AiEngineConfig } from './Engine';
@@ -46,6 +47,20 @@ export class OllamaEngine implements AiEngine {
return removeContentTags(content, 'think');
} catch (err: any) {
const message = err.response?.data?.error ?? err.message;
// Check for model not found errors
if (message?.toLowerCase().includes('model') &&
(message?.toLowerCase().includes('not found') ||
message?.toLowerCase().includes('does not exist') ||
message?.toLowerCase().includes('pull'))) {
throw new ModelNotFoundError(this.config.model, 'ollama', 404);
}
// Check for 404 status
if (err.response?.status === 404) {
throw new ModelNotFoundError(this.config.model, 'ollama', 404);
}
throw new Error(`Ollama provider error: ${message}`);
}
}

View File

@@ -2,6 +2,7 @@ import axios from 'axios';
import { OpenAI } from 'openai';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { parseCustomHeaders } from '../utils/engine';
import { ModelNotFoundError } from '../utils/errors';
import { removeContentTags } from '../utils/removeContentTags';
import { tokenCount } from '../utils/tokenCount';
import { AiEngine, AiEngineConfig } from './Engine';
@@ -62,6 +63,20 @@ export class OpenAiEngine implements AiEngine {
return removeContentTags(content, 'think');
} catch (error) {
const err = error as Error;
// Check for model not found errors
if (err.message?.toLowerCase().includes('model') &&
(err.message?.toLowerCase().includes('not found') ||
err.message?.toLowerCase().includes('does not exist') ||
err.message?.toLowerCase().includes('invalid'))) {
throw new ModelNotFoundError(this.config.model, 'openai', 404);
}
// Check for 404 errors from API
if ('status' in (error as any) && (error as any).status === 404) {
throw new ModelNotFoundError(this.config.model, 'openai', 404);
}
if (
axios.isAxiosError<{ error?: { message: string } }>(error) &&
error.response?.status === 401
@@ -71,6 +86,11 @@ export class OpenAiEngine implements AiEngine {
if (openAiError) throw new Error(openAiError.message);
}
// Check axios 404 errors
if (axios.isAxiosError(error) && error.response?.status === 404) {
throw new ModelNotFoundError(this.config.model, 'openai', 404);
}
throw err;
}
};

View File

@@ -1,7 +1,21 @@
import { select, confirm, isCancel } from '@clack/prompts';
import chalk from 'chalk';
import { OpenAI } from 'openai';
import { DEFAULT_TOKEN_LIMITS, getConfig } from './commands/config';
import {
DEFAULT_TOKEN_LIMITS,
getConfig,
setGlobalConfig,
getGlobalConfig,
MODEL_LIST,
RECOMMENDED_MODELS
} from './commands/config';
import { getMainCommitPrompt } from './prompts';
import { getEngine } from './utils/engine';
import {
isModelNotFoundError,
getSuggestedModels,
ModelNotFoundError
} from './utils/errors';
import { mergeDiffs } from './utils/mergeDiffs';
import { tokenCount } from './utils/tokenCount';
@@ -36,13 +50,106 @@ export enum GenerateCommitMessageErrorEnum {
outputTokensTooHigh = `Token limit exceeded, OCO_TOKENS_MAX_OUTPUT must not be much higher than the default ${DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT} tokens.`
}
async function handleModelNotFoundError(
error: Error,
provider: string,
currentModel: string
): Promise<string | null> {
console.log(
chalk.red(`\n✖ Model '${currentModel}' not found\n`)
);
const suggestedModels = getSuggestedModels(provider, currentModel);
const recommended =
RECOMMENDED_MODELS[provider as keyof typeof RECOMMENDED_MODELS];
if (suggestedModels.length === 0) {
console.log(
chalk.yellow(
`No alternative models available. Run 'oco setup' to configure a different model.`
)
);
return null;
}
const options: Array<{ value: string; label: string }> = [];
// Add recommended first if available
if (recommended && suggestedModels.includes(recommended)) {
options.push({
value: recommended,
label: `${recommended} (Recommended)`
});
}
// Add other suggestions
suggestedModels
.filter((m) => m !== recommended)
.forEach((model) => {
options.push({ value: model, label: model });
});
options.push({ value: '__custom__', label: 'Enter custom model...' });
const selection = await select({
message: 'Select an alternative model:',
options
});
if (isCancel(selection)) {
return null;
}
let newModel: string;
if (selection === '__custom__') {
const { text } = await import('@clack/prompts');
const customModel = await text({
message: 'Enter model name:',
validate: (value) => {
if (!value || value.trim().length === 0) {
return 'Model name is required';
}
return undefined;
}
});
if (isCancel(customModel)) {
return null;
}
newModel = customModel as string;
} else {
newModel = selection as string;
}
// Ask if user wants to save as default
const saveAsDefault = await confirm({
message: 'Save as default model?'
});
if (!isCancel(saveAsDefault) && saveAsDefault) {
const existingConfig = getGlobalConfig();
setGlobalConfig({
...existingConfig,
OCO_MODEL: newModel
} as any);
console.log(chalk.green('✔') + ' Model saved as default\n');
}
return newModel;
}
const ADJUSTMENT_FACTOR = 20;
export const generateCommitMessageByDiff = async (
diff: string,
fullGitMojiSpec: boolean = false,
context: string = ''
context: string = '',
retryWithModel?: string
): Promise<string> => {
const currentConfig = getConfig();
const provider = currentConfig.OCO_AI_PROVIDER || 'openai';
const currentModel = retryWithModel || currentConfig.OCO_MODEL;
try {
const INIT_MESSAGES_PROMPT = await getMainCommitPrompt(
fullGitMojiSpec,
@@ -89,6 +196,32 @@ export const generateCommitMessageByDiff = async (
return commitMessage;
} catch (error) {
// Handle model-not-found errors with interactive recovery
if (isModelNotFoundError(error)) {
const newModel = await handleModelNotFoundError(
error as Error,
provider,
currentModel
);
if (newModel) {
console.log(chalk.cyan(`Retrying with ${newModel}...\n`));
// Retry with the new model by updating config temporarily
const existingConfig = getGlobalConfig();
setGlobalConfig({
...existingConfig,
OCO_MODEL: newModel
} as any);
return generateCommitMessageByDiff(
diff,
fullGitMojiSpec,
context,
newModel
);
}
}
throw error;
}
};

166
src/utils/errors.ts Normal file
View File

@@ -0,0 +1,166 @@
import { MODEL_LIST, OCO_AI_PROVIDER_ENUM } from '../commands/config';
export class ModelNotFoundError extends Error {
public readonly modelName: string;
public readonly provider: string;
public readonly statusCode: number;
constructor(modelName: string, provider: string, statusCode: number = 404) {
super(`Model '${modelName}' not found for provider '${provider}'`);
this.name = 'ModelNotFoundError';
this.modelName = modelName;
this.provider = provider;
this.statusCode = statusCode;
}
}
export class ApiKeyMissingError extends Error {
public readonly provider: string;
constructor(provider: string) {
super(`API key is missing for provider '${provider}'`);
this.name = 'ApiKeyMissingError';
this.provider = provider;
}
}
export function isModelNotFoundError(error: unknown): boolean {
if (error instanceof ModelNotFoundError) {
return true;
}
if (error instanceof Error) {
const message = error.message.toLowerCase();
// OpenAI error patterns
if (
message.includes('model') &&
(message.includes('not found') ||
message.includes('does not exist') ||
message.includes('invalid model'))
) {
return true;
}
// Anthropic error patterns
if (
message.includes('model') &&
(message.includes('not found') || message.includes('invalid'))
) {
return true;
}
// Check for 404 status in axios/fetch errors
if (
'status' in (error as any) &&
(error as any).status === 404 &&
message.includes('model')
) {
return true;
}
// Check for response status
if ('response' in (error as any)) {
const response = (error as any).response;
if (response?.status === 404) {
return true;
}
}
}
return false;
}
export function isApiKeyError(error: unknown): boolean {
if (error instanceof ApiKeyMissingError) {
return true;
}
if (error instanceof Error) {
const message = error.message.toLowerCase();
// Common API key error patterns
if (
message.includes('api key') ||
message.includes('apikey') ||
message.includes('authentication') ||
message.includes('unauthorized') ||
message.includes('invalid_api_key') ||
message.includes('incorrect api key')
) {
return true;
}
// Check for 401 status
if ('response' in (error as any)) {
const response = (error as any).response;
if (response?.status === 401) {
return true;
}
}
}
return false;
}
export function getSuggestedModels(
provider: string,
failedModel: string
): string[] {
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
const models = MODEL_LIST[providerKey];
if (!models || !Array.isArray(models)) {
return [];
}
// Return first 5 models as suggestions, excluding the failed one
return models.filter((m) => m !== failedModel).slice(0, 5);
}
export function getRecommendedModel(provider: string): string | null {
switch (provider.toLowerCase()) {
case OCO_AI_PROVIDER_ENUM.OPENAI:
return 'gpt-4o-mini';
case OCO_AI_PROVIDER_ENUM.ANTHROPIC:
return 'claude-sonnet-4-20250514';
case OCO_AI_PROVIDER_ENUM.GEMINI:
return 'gemini-1.5-flash';
case OCO_AI_PROVIDER_ENUM.GROQ:
return 'llama3-70b-8192';
case OCO_AI_PROVIDER_ENUM.MISTRAL:
return 'mistral-small-latest';
case OCO_AI_PROVIDER_ENUM.DEEPSEEK:
return 'deepseek-chat';
case OCO_AI_PROVIDER_ENUM.OPENROUTER:
return 'openai/gpt-4o-mini';
case OCO_AI_PROVIDER_ENUM.AIMLAPI:
return 'gpt-4o-mini';
default:
return null;
}
}
export function formatErrorWithRecovery(
error: Error,
provider: string,
model: string
): string {
const suggestions = getSuggestedModels(provider, model);
const recommended = getRecommendedModel(provider);
let message = `\n${error.message}\n`;
if (suggestions.length > 0) {
message += '\nSuggested alternatives:\n';
suggestions.forEach((m, i) => {
const isRecommended = m === recommended;
message += ` ${i + 1}. ${m}${isRecommended ? ' (Recommended)' : ''}\n`;
});
}
message += '\nTo fix this, run: oco config set OCO_MODEL=<model-name>\n';
message += 'Or run: oco setup\n';
return message;
}

170
src/utils/modelCache.ts Normal file
View File

@@ -0,0 +1,170 @@
import { existsSync, readFileSync, writeFileSync } from 'fs';
import { homedir } from 'os';
import { join as pathJoin } from 'path';
import { MODEL_LIST, OCO_AI_PROVIDER_ENUM } from '../commands/config';
const MODEL_CACHE_PATH = pathJoin(homedir(), '.opencommit-models.json');
const CACHE_TTL_MS = 7 * 24 * 60 * 60 * 1000; // 7 days
interface ModelCache {
timestamp: number;
models: Record<string, string[]>;
}
function readCache(): ModelCache | null {
try {
if (!existsSync(MODEL_CACHE_PATH)) {
return null;
}
const data = readFileSync(MODEL_CACHE_PATH, 'utf8');
return JSON.parse(data);
} catch {
return null;
}
}
function writeCache(models: Record<string, string[]>): void {
try {
const cache: ModelCache = {
timestamp: Date.now(),
models
};
writeFileSync(MODEL_CACHE_PATH, JSON.stringify(cache, null, 2), 'utf8');
} catch {
// Silently fail if we can't write cache
}
}
function isCacheValid(cache: ModelCache | null): boolean {
if (!cache) return false;
return Date.now() - cache.timestamp < CACHE_TTL_MS;
}
export async function fetchOpenAIModels(apiKey: string): Promise<string[]> {
try {
const response = await fetch('https://api.openai.com/v1/models', {
headers: {
Authorization: `Bearer ${apiKey}`
}
});
if (!response.ok) {
return MODEL_LIST.openai;
}
const data = await response.json();
const models = data.data
.map((m: { id: string }) => m.id)
.filter(
(id: string) =>
id.startsWith('gpt-') ||
id.startsWith('o1') ||
id.startsWith('o3') ||
id.startsWith('o4')
)
.sort();
return models.length > 0 ? models : MODEL_LIST.openai;
} catch {
return MODEL_LIST.openai;
}
}
export async function fetchOllamaModels(
baseUrl: string = 'http://localhost:11434'
): Promise<string[]> {
try {
const response = await fetch(`${baseUrl}/api/tags`);
if (!response.ok) {
return [];
}
const data = await response.json();
return data.models?.map((m: { name: string }) => m.name) || [];
} catch {
return [];
}
}
export async function fetchModelsForProvider(
provider: string,
apiKey?: string,
baseUrl?: string
): Promise<string[]> {
const cache = readCache();
// Return cached models if valid
if (isCacheValid(cache) && cache!.models[provider]) {
return cache!.models[provider];
}
let models: string[] = [];
switch (provider.toLowerCase()) {
case OCO_AI_PROVIDER_ENUM.OPENAI:
if (apiKey) {
models = await fetchOpenAIModels(apiKey);
} else {
models = MODEL_LIST.openai;
}
break;
case OCO_AI_PROVIDER_ENUM.OLLAMA:
models = await fetchOllamaModels(baseUrl);
break;
case OCO_AI_PROVIDER_ENUM.ANTHROPIC:
models = MODEL_LIST.anthropic;
break;
case OCO_AI_PROVIDER_ENUM.GEMINI:
models = MODEL_LIST.gemini;
break;
case OCO_AI_PROVIDER_ENUM.GROQ:
models = MODEL_LIST.groq;
break;
case OCO_AI_PROVIDER_ENUM.MISTRAL:
models = MODEL_LIST.mistral;
break;
case OCO_AI_PROVIDER_ENUM.DEEPSEEK:
models = MODEL_LIST.deepseek;
break;
case OCO_AI_PROVIDER_ENUM.AIMLAPI:
models = MODEL_LIST.aimlapi;
break;
case OCO_AI_PROVIDER_ENUM.OPENROUTER:
models = MODEL_LIST.openrouter;
break;
default:
models = MODEL_LIST.openai;
}
// Update cache
const existingCache = cache?.models || {};
existingCache[provider] = models;
writeCache(existingCache);
return models;
}
export function getModelsForProvider(provider: string): string[] {
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
return MODEL_LIST[providerKey] || MODEL_LIST.openai;
}
export function clearModelCache(): void {
try {
if (existsSync(MODEL_CACHE_PATH)) {
writeFileSync(MODEL_CACHE_PATH, '{}', 'utf8');
}
} catch {
// Silently fail
}
}