feat: add interactive setup wizard and model error handling

Add comprehensive setup command with provider selection, API key
configuration, and model selection. Include error recovery for
model-not-found scenarios with suggested alternatives and automatic
retry functionality. Update Anthropic model list with latest versions
and add provider metadata for better user experience.
This commit is contained in:
di-sukharev
2026-01-17 23:04:43 +03:00
parent ebbaff0628
commit d70797b864
13 changed files with 7301 additions and 4500 deletions

166
src/utils/errors.ts Normal file
View File

@@ -0,0 +1,166 @@
import { MODEL_LIST, OCO_AI_PROVIDER_ENUM } from '../commands/config';
export class ModelNotFoundError extends Error {
public readonly modelName: string;
public readonly provider: string;
public readonly statusCode: number;
constructor(modelName: string, provider: string, statusCode: number = 404) {
super(`Model '${modelName}' not found for provider '${provider}'`);
this.name = 'ModelNotFoundError';
this.modelName = modelName;
this.provider = provider;
this.statusCode = statusCode;
}
}
export class ApiKeyMissingError extends Error {
public readonly provider: string;
constructor(provider: string) {
super(`API key is missing for provider '${provider}'`);
this.name = 'ApiKeyMissingError';
this.provider = provider;
}
}
export function isModelNotFoundError(error: unknown): boolean {
if (error instanceof ModelNotFoundError) {
return true;
}
if (error instanceof Error) {
const message = error.message.toLowerCase();
// OpenAI error patterns
if (
message.includes('model') &&
(message.includes('not found') ||
message.includes('does not exist') ||
message.includes('invalid model'))
) {
return true;
}
// Anthropic error patterns
if (
message.includes('model') &&
(message.includes('not found') || message.includes('invalid'))
) {
return true;
}
// Check for 404 status in axios/fetch errors
if (
'status' in (error as any) &&
(error as any).status === 404 &&
message.includes('model')
) {
return true;
}
// Check for response status
if ('response' in (error as any)) {
const response = (error as any).response;
if (response?.status === 404) {
return true;
}
}
}
return false;
}
export function isApiKeyError(error: unknown): boolean {
if (error instanceof ApiKeyMissingError) {
return true;
}
if (error instanceof Error) {
const message = error.message.toLowerCase();
// Common API key error patterns
if (
message.includes('api key') ||
message.includes('apikey') ||
message.includes('authentication') ||
message.includes('unauthorized') ||
message.includes('invalid_api_key') ||
message.includes('incorrect api key')
) {
return true;
}
// Check for 401 status
if ('response' in (error as any)) {
const response = (error as any).response;
if (response?.status === 401) {
return true;
}
}
}
return false;
}
export function getSuggestedModels(
provider: string,
failedModel: string
): string[] {
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
const models = MODEL_LIST[providerKey];
if (!models || !Array.isArray(models)) {
return [];
}
// Return first 5 models as suggestions, excluding the failed one
return models.filter((m) => m !== failedModel).slice(0, 5);
}
export function getRecommendedModel(provider: string): string | null {
switch (provider.toLowerCase()) {
case OCO_AI_PROVIDER_ENUM.OPENAI:
return 'gpt-4o-mini';
case OCO_AI_PROVIDER_ENUM.ANTHROPIC:
return 'claude-sonnet-4-20250514';
case OCO_AI_PROVIDER_ENUM.GEMINI:
return 'gemini-1.5-flash';
case OCO_AI_PROVIDER_ENUM.GROQ:
return 'llama3-70b-8192';
case OCO_AI_PROVIDER_ENUM.MISTRAL:
return 'mistral-small-latest';
case OCO_AI_PROVIDER_ENUM.DEEPSEEK:
return 'deepseek-chat';
case OCO_AI_PROVIDER_ENUM.OPENROUTER:
return 'openai/gpt-4o-mini';
case OCO_AI_PROVIDER_ENUM.AIMLAPI:
return 'gpt-4o-mini';
default:
return null;
}
}
export function formatErrorWithRecovery(
error: Error,
provider: string,
model: string
): string {
const suggestions = getSuggestedModels(provider, model);
const recommended = getRecommendedModel(provider);
let message = `\n${error.message}\n`;
if (suggestions.length > 0) {
message += '\nSuggested alternatives:\n';
suggestions.forEach((m, i) => {
const isRecommended = m === recommended;
message += ` ${i + 1}. ${m}${isRecommended ? ' (Recommended)' : ''}\n`;
});
}
message += '\nTo fix this, run: oco config set OCO_MODEL=<model-name>\n';
message += 'Or run: oco setup\n';
return message;
}

170
src/utils/modelCache.ts Normal file
View File

@@ -0,0 +1,170 @@
import { existsSync, readFileSync, writeFileSync } from 'fs';
import { homedir } from 'os';
import { join as pathJoin } from 'path';
import { MODEL_LIST, OCO_AI_PROVIDER_ENUM } from '../commands/config';
const MODEL_CACHE_PATH = pathJoin(homedir(), '.opencommit-models.json');
const CACHE_TTL_MS = 7 * 24 * 60 * 60 * 1000; // 7 days
interface ModelCache {
timestamp: number;
models: Record<string, string[]>;
}
function readCache(): ModelCache | null {
try {
if (!existsSync(MODEL_CACHE_PATH)) {
return null;
}
const data = readFileSync(MODEL_CACHE_PATH, 'utf8');
return JSON.parse(data);
} catch {
return null;
}
}
function writeCache(models: Record<string, string[]>): void {
try {
const cache: ModelCache = {
timestamp: Date.now(),
models
};
writeFileSync(MODEL_CACHE_PATH, JSON.stringify(cache, null, 2), 'utf8');
} catch {
// Silently fail if we can't write cache
}
}
function isCacheValid(cache: ModelCache | null): boolean {
if (!cache) return false;
return Date.now() - cache.timestamp < CACHE_TTL_MS;
}
export async function fetchOpenAIModels(apiKey: string): Promise<string[]> {
try {
const response = await fetch('https://api.openai.com/v1/models', {
headers: {
Authorization: `Bearer ${apiKey}`
}
});
if (!response.ok) {
return MODEL_LIST.openai;
}
const data = await response.json();
const models = data.data
.map((m: { id: string }) => m.id)
.filter(
(id: string) =>
id.startsWith('gpt-') ||
id.startsWith('o1') ||
id.startsWith('o3') ||
id.startsWith('o4')
)
.sort();
return models.length > 0 ? models : MODEL_LIST.openai;
} catch {
return MODEL_LIST.openai;
}
}
export async function fetchOllamaModels(
baseUrl: string = 'http://localhost:11434'
): Promise<string[]> {
try {
const response = await fetch(`${baseUrl}/api/tags`);
if (!response.ok) {
return [];
}
const data = await response.json();
return data.models?.map((m: { name: string }) => m.name) || [];
} catch {
return [];
}
}
export async function fetchModelsForProvider(
provider: string,
apiKey?: string,
baseUrl?: string
): Promise<string[]> {
const cache = readCache();
// Return cached models if valid
if (isCacheValid(cache) && cache!.models[provider]) {
return cache!.models[provider];
}
let models: string[] = [];
switch (provider.toLowerCase()) {
case OCO_AI_PROVIDER_ENUM.OPENAI:
if (apiKey) {
models = await fetchOpenAIModels(apiKey);
} else {
models = MODEL_LIST.openai;
}
break;
case OCO_AI_PROVIDER_ENUM.OLLAMA:
models = await fetchOllamaModels(baseUrl);
break;
case OCO_AI_PROVIDER_ENUM.ANTHROPIC:
models = MODEL_LIST.anthropic;
break;
case OCO_AI_PROVIDER_ENUM.GEMINI:
models = MODEL_LIST.gemini;
break;
case OCO_AI_PROVIDER_ENUM.GROQ:
models = MODEL_LIST.groq;
break;
case OCO_AI_PROVIDER_ENUM.MISTRAL:
models = MODEL_LIST.mistral;
break;
case OCO_AI_PROVIDER_ENUM.DEEPSEEK:
models = MODEL_LIST.deepseek;
break;
case OCO_AI_PROVIDER_ENUM.AIMLAPI:
models = MODEL_LIST.aimlapi;
break;
case OCO_AI_PROVIDER_ENUM.OPENROUTER:
models = MODEL_LIST.openrouter;
break;
default:
models = MODEL_LIST.openai;
}
// Update cache
const existingCache = cache?.models || {};
existingCache[provider] = models;
writeCache(existingCache);
return models;
}
export function getModelsForProvider(provider: string): string[] {
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
return MODEL_LIST[providerKey] || MODEL_LIST.openai;
}
export function clearModelCache(): void {
try {
if (existsSync(MODEL_CACHE_PATH)) {
writeFileSync(MODEL_CACHE_PATH, '{}', 'utf8');
}
} catch {
// Silently fail
}
}