mirror of
https://github.com/di-sukharev/opencommit.git
synced 2026-01-18 01:58:14 -05:00
Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6d9fff56aa | ||
|
|
6ed70d0382 | ||
|
|
5b241ed2d0 | ||
|
|
8b0ee25923 |
22
README.md
22
README.md
@@ -201,6 +201,28 @@ or for as a cheaper option:
|
||||
oco config set OCO_MODEL=gpt-3.5-turbo
|
||||
```
|
||||
|
||||
### Model Management
|
||||
|
||||
OpenCommit automatically fetches available models from your provider when you run `oco setup`. Models are cached for 7 days to reduce API calls.
|
||||
|
||||
To see available models for your current provider:
|
||||
|
||||
```sh
|
||||
oco models
|
||||
```
|
||||
|
||||
To refresh the model list (e.g., after new models are released):
|
||||
|
||||
```sh
|
||||
oco models --refresh
|
||||
```
|
||||
|
||||
To see models for a specific provider:
|
||||
|
||||
```sh
|
||||
oco models --provider anthropic
|
||||
```
|
||||
|
||||
### Switch to other LLM providers with a custom URL
|
||||
|
||||
By default OpenCommit uses [OpenAI](https://openai.com).
|
||||
|
||||
769
out/cli.cjs
769
out/cli.cjs
@@ -48509,7 +48509,7 @@ function G3(t2, e3) {
|
||||
// package.json
|
||||
var package_default = {
|
||||
name: "opencommit",
|
||||
version: "3.2.10",
|
||||
version: "3.2.11",
|
||||
description: "Auto-generate impressive commits in 1 second. Killing lame commits with AI \u{1F92F}\u{1F52B}",
|
||||
keywords: [
|
||||
"git",
|
||||
@@ -53911,9 +53911,6 @@ var { AnthropicError: AnthropicError2, APIError: APIError2, APIConnectionError:
|
||||
})(Anthropic || (Anthropic = {}));
|
||||
var sdk_default = Anthropic;
|
||||
|
||||
// src/engine/anthropic.ts
|
||||
init_dist2();
|
||||
|
||||
// node_modules/axios/lib/helpers/bind.js
|
||||
function bind(fn, thisArg) {
|
||||
return function wrap() {
|
||||
@@ -57224,6 +57221,51 @@ var {
|
||||
} = axios_default;
|
||||
|
||||
// src/utils/errors.ts
|
||||
var PROVIDER_BILLING_URLS = {
|
||||
["anthropic" /* ANTHROPIC */]: "https://console.anthropic.com/settings/billing",
|
||||
["openai" /* OPENAI */]: "https://platform.openai.com/settings/organization/billing",
|
||||
["gemini" /* GEMINI */]: "https://aistudio.google.com/app/plan",
|
||||
["groq" /* GROQ */]: "https://console.groq.com/settings/billing",
|
||||
["mistral" /* MISTRAL */]: "https://console.mistral.ai/billing/",
|
||||
["deepseek" /* DEEPSEEK */]: "https://platform.deepseek.com/usage",
|
||||
["openrouter" /* OPENROUTER */]: "https://openrouter.ai/credits",
|
||||
["aimlapi" /* AIMLAPI */]: "https://aimlapi.com/app/billing",
|
||||
["azure" /* AZURE */]: "https://portal.azure.com/#view/Microsoft_Azure_CostManagement",
|
||||
["ollama" /* OLLAMA */]: null,
|
||||
["mlx" /* MLX */]: null,
|
||||
["flowise" /* FLOWISE */]: null,
|
||||
["test" /* TEST */]: null
|
||||
};
|
||||
var InsufficientCreditsError = class extends Error {
|
||||
constructor(provider, message) {
|
||||
super(message || `Insufficient credits or quota for provider '${provider}'`);
|
||||
this.name = "InsufficientCreditsError";
|
||||
this.provider = provider;
|
||||
}
|
||||
};
|
||||
var RateLimitError3 = class extends Error {
|
||||
constructor(provider, retryAfter, message) {
|
||||
super(message || `Rate limit exceeded for provider '${provider}'`);
|
||||
this.name = "RateLimitError";
|
||||
this.provider = provider;
|
||||
this.retryAfter = retryAfter;
|
||||
}
|
||||
};
|
||||
var ServiceUnavailableError = class extends Error {
|
||||
constructor(provider, statusCode = 503, message) {
|
||||
super(message || `Service unavailable for provider '${provider}'`);
|
||||
this.name = "ServiceUnavailableError";
|
||||
this.provider = provider;
|
||||
this.statusCode = statusCode;
|
||||
}
|
||||
};
|
||||
var AuthenticationError3 = class extends Error {
|
||||
constructor(provider, message) {
|
||||
super(message || `Authentication failed for provider '${provider}'`);
|
||||
this.name = "AuthenticationError";
|
||||
this.provider = provider;
|
||||
}
|
||||
};
|
||||
var ModelNotFoundError = class extends Error {
|
||||
constructor(modelName, provider, statusCode = 404) {
|
||||
super(`Model '${modelName}' not found for provider '${provider}'`);
|
||||
@@ -57233,6 +57275,13 @@ var ModelNotFoundError = class extends Error {
|
||||
this.statusCode = statusCode;
|
||||
}
|
||||
};
|
||||
var ApiKeyMissingError = class extends Error {
|
||||
constructor(provider) {
|
||||
super(`API key is missing for provider '${provider}'`);
|
||||
this.name = "ApiKeyMissingError";
|
||||
this.provider = provider;
|
||||
}
|
||||
};
|
||||
function isModelNotFoundError(error) {
|
||||
if (error instanceof ModelNotFoundError) {
|
||||
return true;
|
||||
@@ -57257,6 +57306,24 @@ function isModelNotFoundError(error) {
|
||||
}
|
||||
return false;
|
||||
}
|
||||
function isApiKeyError(error) {
|
||||
if (error instanceof ApiKeyMissingError) {
|
||||
return true;
|
||||
}
|
||||
if (error instanceof Error) {
|
||||
const message = error.message.toLowerCase();
|
||||
if (message.includes("api key") || message.includes("apikey") || message.includes("authentication") || message.includes("unauthorized") || message.includes("invalid_api_key") || message.includes("incorrect api key")) {
|
||||
return true;
|
||||
}
|
||||
if ("response" in error) {
|
||||
const response = error.response;
|
||||
if (response?.status === 401) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
function getSuggestedModels(provider, failedModel) {
|
||||
const providerKey = provider.toLowerCase();
|
||||
const models = MODEL_LIST[providerKey];
|
||||
@@ -57265,6 +57332,276 @@ function getSuggestedModels(provider, failedModel) {
|
||||
}
|
||||
return models.filter((m5) => m5 !== failedModel).slice(0, 5);
|
||||
}
|
||||
function isInsufficientCreditsError(error) {
|
||||
if (error instanceof InsufficientCreditsError) {
|
||||
return true;
|
||||
}
|
||||
if (error instanceof Error) {
|
||||
const message = error.message.toLowerCase();
|
||||
if (message.includes("insufficient") || message.includes("credit") || message.includes("quota") || message.includes("balance") || message.includes("billing") || message.includes("payment") || message.includes("exceeded") || message.includes("limit reached") || message.includes("no remaining")) {
|
||||
return true;
|
||||
}
|
||||
if ("status" in error && error.status === 402) {
|
||||
return true;
|
||||
}
|
||||
if ("response" in error) {
|
||||
const response = error.response;
|
||||
if (response?.status === 402) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
function isRateLimitError(error) {
|
||||
if (error instanceof RateLimitError3) {
|
||||
return true;
|
||||
}
|
||||
if (error instanceof Error) {
|
||||
const message = error.message.toLowerCase();
|
||||
if (message.includes("rate limit") || message.includes("rate_limit") || message.includes("too many requests") || message.includes("throttle")) {
|
||||
return true;
|
||||
}
|
||||
if ("status" in error && error.status === 429) {
|
||||
return true;
|
||||
}
|
||||
if ("response" in error) {
|
||||
const response = error.response;
|
||||
if (response?.status === 429) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
function isServiceUnavailableError(error) {
|
||||
if (error instanceof ServiceUnavailableError) {
|
||||
return true;
|
||||
}
|
||||
if (error instanceof Error) {
|
||||
const message = error.message.toLowerCase();
|
||||
if (message.includes("service unavailable") || message.includes("server error") || message.includes("internal error") || message.includes("temporarily unavailable") || message.includes("overloaded")) {
|
||||
return true;
|
||||
}
|
||||
const status = error.status || error.response?.status;
|
||||
if (status && status >= 500 && status < 600) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
function formatUserFriendlyError(error, provider) {
|
||||
const billingUrl = PROVIDER_BILLING_URLS[provider] || null;
|
||||
if (error instanceof InsufficientCreditsError) {
|
||||
return {
|
||||
title: "Insufficient Credits",
|
||||
message: `Your ${provider} account has insufficient credits or quota.`,
|
||||
helpUrl: billingUrl,
|
||||
suggestion: "Add credits to your account to continue using the service."
|
||||
};
|
||||
}
|
||||
if (error instanceof RateLimitError3) {
|
||||
const retryMsg = error.retryAfter ? `Please wait ${error.retryAfter} seconds before retrying.` : "Please wait a moment before retrying.";
|
||||
return {
|
||||
title: "Rate Limit Exceeded",
|
||||
message: `You've made too many requests to ${provider}.`,
|
||||
helpUrl: billingUrl,
|
||||
suggestion: retryMsg
|
||||
};
|
||||
}
|
||||
if (error instanceof ServiceUnavailableError) {
|
||||
return {
|
||||
title: "Service Unavailable",
|
||||
message: `The ${provider} service is temporarily unavailable.`,
|
||||
helpUrl: null,
|
||||
suggestion: "Please try again in a few moments."
|
||||
};
|
||||
}
|
||||
if (error instanceof AuthenticationError3) {
|
||||
return {
|
||||
title: "Authentication Failed",
|
||||
message: `Your ${provider} API key is invalid or expired.`,
|
||||
helpUrl: billingUrl,
|
||||
suggestion: "Run `oco setup` to configure a valid API key."
|
||||
};
|
||||
}
|
||||
if (error instanceof ModelNotFoundError) {
|
||||
return {
|
||||
title: "Model Not Found",
|
||||
message: `The model '${error.modelName}' is not available for ${provider}.`,
|
||||
helpUrl: null,
|
||||
suggestion: "Run `oco setup` to select a valid model."
|
||||
};
|
||||
}
|
||||
if (isInsufficientCreditsError(error)) {
|
||||
return {
|
||||
title: "Insufficient Credits",
|
||||
message: `Your ${provider} account has insufficient credits or quota.`,
|
||||
helpUrl: billingUrl,
|
||||
suggestion: "Add credits to your account to continue using the service."
|
||||
};
|
||||
}
|
||||
if (isRateLimitError(error)) {
|
||||
return {
|
||||
title: "Rate Limit Exceeded",
|
||||
message: `You've made too many requests to ${provider}.`,
|
||||
helpUrl: billingUrl,
|
||||
suggestion: "Please wait a moment before retrying."
|
||||
};
|
||||
}
|
||||
if (isServiceUnavailableError(error)) {
|
||||
return {
|
||||
title: "Service Unavailable",
|
||||
message: `The ${provider} service is temporarily unavailable.`,
|
||||
helpUrl: null,
|
||||
suggestion: "Please try again in a few moments."
|
||||
};
|
||||
}
|
||||
if (isApiKeyError(error)) {
|
||||
return {
|
||||
title: "Authentication Failed",
|
||||
message: `Your ${provider} API key is invalid or expired.`,
|
||||
helpUrl: billingUrl,
|
||||
suggestion: "Run `oco setup` to configure a valid API key."
|
||||
};
|
||||
}
|
||||
if (isModelNotFoundError(error)) {
|
||||
const model = error.modelName || error.model || "unknown";
|
||||
return {
|
||||
title: "Model Not Found",
|
||||
message: `The model '${model}' is not available for ${provider}.`,
|
||||
helpUrl: null,
|
||||
suggestion: "Run `oco setup` to select a valid model."
|
||||
};
|
||||
}
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
return {
|
||||
title: "Error",
|
||||
message: errorMessage,
|
||||
helpUrl: null,
|
||||
suggestion: "Run `oco setup` to reconfigure or check your settings."
|
||||
};
|
||||
}
|
||||
function printFormattedError(formatted) {
|
||||
let output = `
|
||||
${source_default.red("\u2716")} ${source_default.bold.red(formatted.title)}
|
||||
`;
|
||||
output += ` ${formatted.message}
|
||||
`;
|
||||
if (formatted.helpUrl) {
|
||||
output += `
|
||||
${source_default.cyan("Help:")} ${source_default.underline(formatted.helpUrl)}
|
||||
`;
|
||||
}
|
||||
if (formatted.suggestion) {
|
||||
output += `
|
||||
${source_default.yellow("Suggestion:")} ${formatted.suggestion}
|
||||
`;
|
||||
}
|
||||
return output;
|
||||
}
|
||||
|
||||
// src/utils/engineErrorHandler.ts
|
||||
function getStatusCode(error) {
|
||||
if (typeof error?.status === "number") {
|
||||
return error.status;
|
||||
}
|
||||
if (axios_default.isAxiosError(error)) {
|
||||
return error.response?.status ?? null;
|
||||
}
|
||||
if (typeof error?.response?.status === "number") {
|
||||
return error.response.status;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
function getRetryAfter(error) {
|
||||
const headers = error?.response?.headers;
|
||||
if (headers) {
|
||||
const retryAfter = headers["retry-after"] || headers["Retry-After"];
|
||||
if (retryAfter) {
|
||||
const seconds = parseInt(retryAfter, 10);
|
||||
if (!isNaN(seconds)) {
|
||||
return seconds;
|
||||
}
|
||||
}
|
||||
}
|
||||
return void 0;
|
||||
}
|
||||
function extractErrorMessage(error) {
|
||||
if (error instanceof Error) {
|
||||
return error.message;
|
||||
}
|
||||
const apiError = error?.response?.data?.error;
|
||||
if (apiError) {
|
||||
if (typeof apiError === "string") {
|
||||
return apiError;
|
||||
}
|
||||
if (apiError.message) {
|
||||
return apiError.message;
|
||||
}
|
||||
}
|
||||
const errorData = error?.error;
|
||||
if (errorData) {
|
||||
if (typeof errorData === "string") {
|
||||
return errorData;
|
||||
}
|
||||
if (errorData.message) {
|
||||
return errorData.message;
|
||||
}
|
||||
}
|
||||
if (typeof error === "string") {
|
||||
return error;
|
||||
}
|
||||
return "An unknown error occurred";
|
||||
}
|
||||
function isModelNotFoundMessage(message) {
|
||||
const lowerMessage = message.toLowerCase();
|
||||
return lowerMessage.includes("model") && (lowerMessage.includes("not found") || lowerMessage.includes("does not exist") || lowerMessage.includes("invalid") || lowerMessage.includes("pull")) || lowerMessage.includes("does_not_exist");
|
||||
}
|
||||
function isInsufficientCreditsMessage(message) {
|
||||
const lowerMessage = message.toLowerCase();
|
||||
return lowerMessage.includes("insufficient") || lowerMessage.includes("credit") || lowerMessage.includes("quota") || lowerMessage.includes("balance too low") || lowerMessage.includes("billing") || lowerMessage.includes("payment required") || lowerMessage.includes("exceeded");
|
||||
}
|
||||
function normalizeEngineError(error, provider, model) {
|
||||
if (error instanceof ModelNotFoundError || error instanceof AuthenticationError3 || error instanceof InsufficientCreditsError || error instanceof RateLimitError3 || error instanceof ServiceUnavailableError) {
|
||||
return error;
|
||||
}
|
||||
const statusCode = getStatusCode(error);
|
||||
const message = extractErrorMessage(error);
|
||||
switch (statusCode) {
|
||||
case 401:
|
||||
return new AuthenticationError3(provider, message);
|
||||
case 402:
|
||||
return new InsufficientCreditsError(provider, message);
|
||||
case 404:
|
||||
if (isModelNotFoundMessage(message)) {
|
||||
return new ModelNotFoundError(model, provider, 404);
|
||||
}
|
||||
return error instanceof Error ? error : new Error(message);
|
||||
case 429:
|
||||
const retryAfter = getRetryAfter(error);
|
||||
return new RateLimitError3(provider, retryAfter, message);
|
||||
case 500:
|
||||
case 502:
|
||||
case 503:
|
||||
case 504:
|
||||
return new ServiceUnavailableError(provider, statusCode, message);
|
||||
}
|
||||
if (isModelNotFoundMessage(message)) {
|
||||
return new ModelNotFoundError(model, provider, 404);
|
||||
}
|
||||
if (isInsufficientCreditsMessage(message)) {
|
||||
return new InsufficientCreditsError(provider, message);
|
||||
}
|
||||
const lowerMessage = message.toLowerCase();
|
||||
if (lowerMessage.includes("rate limit") || lowerMessage.includes("rate_limit") || lowerMessage.includes("too many requests")) {
|
||||
return new RateLimitError3(provider, void 0, message);
|
||||
}
|
||||
if (lowerMessage.includes("unauthorized") || lowerMessage.includes("api key") || lowerMessage.includes("apikey") || lowerMessage.includes("authentication") || lowerMessage.includes("invalid_api_key")) {
|
||||
return new AuthenticationError3(provider, message);
|
||||
}
|
||||
return error instanceof Error ? error : new Error(message);
|
||||
}
|
||||
|
||||
// src/utils/removeContentTags.ts
|
||||
function removeContentTags(content, tag) {
|
||||
@@ -57342,25 +57679,7 @@ var AnthropicEngine = class {
|
||||
let content = message;
|
||||
return removeContentTags(content, "think");
|
||||
} catch (error) {
|
||||
const err = error;
|
||||
if (err.message?.toLowerCase().includes("model") && (err.message?.toLowerCase().includes("not found") || err.message?.toLowerCase().includes("does not exist") || err.message?.toLowerCase().includes("invalid"))) {
|
||||
throw new ModelNotFoundError(this.config.model, "anthropic", 404);
|
||||
}
|
||||
if ("status" in error && error.status === 404) {
|
||||
throw new ModelNotFoundError(this.config.model, "anthropic", 404);
|
||||
}
|
||||
ce(`${source_default.red("\u2716")} ${err?.message || err}`);
|
||||
if (axios_default.isAxiosError(error) && error.response?.status === 401) {
|
||||
const anthropicAiError = error.response.data.error;
|
||||
if (anthropicAiError?.message) ce(anthropicAiError.message);
|
||||
ce(
|
||||
"For help look into README https://github.com/di-sukharev/opencommit#setup"
|
||||
);
|
||||
}
|
||||
if (axios_default.isAxiosError(error) && error.response?.status === 404) {
|
||||
throw new ModelNotFoundError(this.config.model, "anthropic", 404);
|
||||
}
|
||||
throw err;
|
||||
throw normalizeEngineError(error, "anthropic", this.config.model);
|
||||
}
|
||||
};
|
||||
this.config = config7;
|
||||
@@ -61176,7 +61495,6 @@ var OpenAIClient = class {
|
||||
};
|
||||
|
||||
// src/engine/azure.ts
|
||||
init_dist2();
|
||||
var AzureEngine = class {
|
||||
constructor(config7) {
|
||||
this.generateCommitMessage = async (messages) => {
|
||||
@@ -61196,17 +61514,7 @@ var AzureEngine = class {
|
||||
let content = message?.content;
|
||||
return removeContentTags(content, "think");
|
||||
} catch (error) {
|
||||
ce(`${source_default.red("\u2716")} ${this.config.model}`);
|
||||
const err = error;
|
||||
ce(`${source_default.red("\u2716")} ${JSON.stringify(error)}`);
|
||||
if (axios_default.isAxiosError(error) && error.response?.status === 401) {
|
||||
const openAiError = error.response.data.error;
|
||||
if (openAiError?.message) ce(openAiError.message);
|
||||
ce(
|
||||
"For help look into README https://github.com/di-sukharev/opencommit#setup"
|
||||
);
|
||||
}
|
||||
throw err;
|
||||
throw normalizeEngineError(error, "azure", this.config.model);
|
||||
}
|
||||
};
|
||||
this.config = config7;
|
||||
@@ -61240,9 +61548,8 @@ var FlowiseEngine = class {
|
||||
const message = response.data;
|
||||
let content = message?.text;
|
||||
return removeContentTags(content, "think");
|
||||
} catch (err) {
|
||||
const message = err.response?.data?.error ?? err.message;
|
||||
throw new Error("local model issues. details: " + message);
|
||||
} catch (error) {
|
||||
throw normalizeEngineError(error, "flowise", this.config.model);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -62102,18 +62409,7 @@ var GeminiEngine = class {
|
||||
const content = result.response.text();
|
||||
return removeContentTags(content, "think");
|
||||
} catch (error) {
|
||||
const err = error;
|
||||
if (err.message?.toLowerCase().includes("model") && (err.message?.toLowerCase().includes("not found") || err.message?.toLowerCase().includes("does not exist") || err.message?.toLowerCase().includes("invalid"))) {
|
||||
throw new ModelNotFoundError(this.config.model, "gemini", 404);
|
||||
}
|
||||
if (axios_default.isAxiosError(error) && error.response?.status === 401) {
|
||||
const geminiError = error.response.data.error;
|
||||
if (geminiError) throw new Error(geminiError?.message);
|
||||
}
|
||||
if (axios_default.isAxiosError(error) && error.response?.status === 404) {
|
||||
throw new ModelNotFoundError(this.config.model, "gemini", 404);
|
||||
}
|
||||
throw err;
|
||||
throw normalizeEngineError(error, "gemini", this.config.model);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -62146,15 +62442,8 @@ var OllamaEngine = class {
|
||||
const { message } = response.data;
|
||||
let content = message?.content;
|
||||
return removeContentTags(content, "think");
|
||||
} catch (err) {
|
||||
const message = err.response?.data?.error ?? err.message;
|
||||
if (message?.toLowerCase().includes("model") && (message?.toLowerCase().includes("not found") || message?.toLowerCase().includes("does not exist") || message?.toLowerCase().includes("pull"))) {
|
||||
throw new ModelNotFoundError(this.config.model, "ollama", 404);
|
||||
}
|
||||
if (err.response?.status === 404) {
|
||||
throw new ModelNotFoundError(this.config.model, "ollama", 404);
|
||||
}
|
||||
throw new Error(`Ollama provider error: ${message}`);
|
||||
} catch (error) {
|
||||
throw normalizeEngineError(error, "ollama", this.config.model);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -62166,7 +62455,7 @@ __export(error_exports2, {
|
||||
APIConnectionTimeoutError: () => APIConnectionTimeoutError3,
|
||||
APIError: () => APIError3,
|
||||
APIUserAbortError: () => APIUserAbortError3,
|
||||
AuthenticationError: () => AuthenticationError3,
|
||||
AuthenticationError: () => AuthenticationError4,
|
||||
BadRequestError: () => BadRequestError3,
|
||||
ConflictError: () => ConflictError3,
|
||||
ContentFilterFinishReasonError: () => ContentFilterFinishReasonError,
|
||||
@@ -62175,7 +62464,7 @@ __export(error_exports2, {
|
||||
NotFoundError: () => NotFoundError3,
|
||||
OpenAIError: () => OpenAIError,
|
||||
PermissionDeniedError: () => PermissionDeniedError3,
|
||||
RateLimitError: () => RateLimitError3,
|
||||
RateLimitError: () => RateLimitError4,
|
||||
UnprocessableEntityError: () => UnprocessableEntityError3
|
||||
});
|
||||
|
||||
@@ -63447,7 +63736,7 @@ var APIError3 = class _APIError extends OpenAIError {
|
||||
return new BadRequestError3(status, error, message, headers);
|
||||
}
|
||||
if (status === 401) {
|
||||
return new AuthenticationError3(status, error, message, headers);
|
||||
return new AuthenticationError4(status, error, message, headers);
|
||||
}
|
||||
if (status === 403) {
|
||||
return new PermissionDeniedError3(status, error, message, headers);
|
||||
@@ -63462,7 +63751,7 @@ var APIError3 = class _APIError extends OpenAIError {
|
||||
return new UnprocessableEntityError3(status, error, message, headers);
|
||||
}
|
||||
if (status === 429) {
|
||||
return new RateLimitError3(status, error, message, headers);
|
||||
return new RateLimitError4(status, error, message, headers);
|
||||
}
|
||||
if (status >= 500) {
|
||||
return new InternalServerError3(status, error, message, headers);
|
||||
@@ -63495,7 +63784,7 @@ var BadRequestError3 = class extends APIError3 {
|
||||
this.status = 400;
|
||||
}
|
||||
};
|
||||
var AuthenticationError3 = class extends APIError3 {
|
||||
var AuthenticationError4 = class extends APIError3 {
|
||||
constructor() {
|
||||
super(...arguments);
|
||||
this.status = 401;
|
||||
@@ -63525,7 +63814,7 @@ var UnprocessableEntityError3 = class extends APIError3 {
|
||||
this.status = 422;
|
||||
}
|
||||
};
|
||||
var RateLimitError3 = class extends APIError3 {
|
||||
var RateLimitError4 = class extends APIError3 {
|
||||
constructor() {
|
||||
super(...arguments);
|
||||
this.status = 429;
|
||||
@@ -66834,15 +67123,15 @@ OpenAI.APIConnectionTimeoutError = APIConnectionTimeoutError3;
|
||||
OpenAI.APIUserAbortError = APIUserAbortError3;
|
||||
OpenAI.NotFoundError = NotFoundError3;
|
||||
OpenAI.ConflictError = ConflictError3;
|
||||
OpenAI.RateLimitError = RateLimitError3;
|
||||
OpenAI.RateLimitError = RateLimitError4;
|
||||
OpenAI.BadRequestError = BadRequestError3;
|
||||
OpenAI.AuthenticationError = AuthenticationError3;
|
||||
OpenAI.AuthenticationError = AuthenticationError4;
|
||||
OpenAI.InternalServerError = InternalServerError3;
|
||||
OpenAI.PermissionDeniedError = PermissionDeniedError3;
|
||||
OpenAI.UnprocessableEntityError = UnprocessableEntityError3;
|
||||
OpenAI.toFile = toFile2;
|
||||
OpenAI.fileFromPath = fileFromPath4;
|
||||
var { OpenAIError: OpenAIError2, APIError: APIError4, APIConnectionError: APIConnectionError4, APIConnectionTimeoutError: APIConnectionTimeoutError4, APIUserAbortError: APIUserAbortError4, NotFoundError: NotFoundError4, ConflictError: ConflictError4, RateLimitError: RateLimitError4, BadRequestError: BadRequestError4, AuthenticationError: AuthenticationError4, InternalServerError: InternalServerError4, PermissionDeniedError: PermissionDeniedError4, UnprocessableEntityError: UnprocessableEntityError4 } = error_exports2;
|
||||
var { OpenAIError: OpenAIError2, APIError: APIError4, APIConnectionError: APIConnectionError4, APIConnectionTimeoutError: APIConnectionTimeoutError4, APIUserAbortError: APIUserAbortError4, NotFoundError: NotFoundError4, ConflictError: ConflictError4, RateLimitError: RateLimitError5, BadRequestError: BadRequestError4, AuthenticationError: AuthenticationError5, InternalServerError: InternalServerError4, PermissionDeniedError: PermissionDeniedError4, UnprocessableEntityError: UnprocessableEntityError4 } = error_exports2;
|
||||
(function(OpenAI2) {
|
||||
OpenAI2.Page = Page;
|
||||
OpenAI2.CursorPage = CursorPage;
|
||||
@@ -66883,21 +67172,7 @@ var OpenAiEngine = class {
|
||||
let content = message?.content;
|
||||
return removeContentTags(content, "think");
|
||||
} catch (error) {
|
||||
const err = error;
|
||||
if (err.message?.toLowerCase().includes("model") && (err.message?.toLowerCase().includes("not found") || err.message?.toLowerCase().includes("does not exist") || err.message?.toLowerCase().includes("invalid"))) {
|
||||
throw new ModelNotFoundError(this.config.model, "openai", 404);
|
||||
}
|
||||
if ("status" in error && error.status === 404) {
|
||||
throw new ModelNotFoundError(this.config.model, "openai", 404);
|
||||
}
|
||||
if (axios_default.isAxiosError(error) && error.response?.status === 401) {
|
||||
const openAiError = error.response.data.error;
|
||||
if (openAiError) throw new Error(openAiError.message);
|
||||
}
|
||||
if (axios_default.isAxiosError(error) && error.response?.status === 404) {
|
||||
throw new ModelNotFoundError(this.config.model, "openai", 404);
|
||||
}
|
||||
throw err;
|
||||
throw normalizeEngineError(error, "openai", this.config.model);
|
||||
}
|
||||
};
|
||||
this.config = config7;
|
||||
@@ -66941,12 +67216,7 @@ var MistralAiEngine = class {
|
||||
let content = message.content;
|
||||
return removeContentTags(content, "think");
|
||||
} catch (error) {
|
||||
const err = error;
|
||||
if (axios_default.isAxiosError(error) && error.response?.status === 401) {
|
||||
const mistralError = error.response.data.error;
|
||||
if (mistralError) throw new Error(mistralError.message);
|
||||
}
|
||||
throw err;
|
||||
throw normalizeEngineError(error, "mistral", this.config.model);
|
||||
}
|
||||
};
|
||||
this.config = config7;
|
||||
@@ -66995,9 +67265,8 @@ var MLXEngine = class {
|
||||
const message = choices[0].message;
|
||||
let content = message?.content;
|
||||
return removeContentTags(content, "think");
|
||||
} catch (err) {
|
||||
const message = err.response?.data?.error ?? err.message;
|
||||
throw new Error(`MLX provider error: ${message}`);
|
||||
} catch (error) {
|
||||
throw normalizeEngineError(error, "mlx", this.config.model);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -67027,12 +67296,7 @@ var DeepseekEngine = class extends OpenAiEngine {
|
||||
let content = message?.content;
|
||||
return removeContentTags(content, "think");
|
||||
} catch (error) {
|
||||
const err = error;
|
||||
if (axios_default.isAxiosError(error) && error.response?.status === 401) {
|
||||
const openAiError = error.response.data.error;
|
||||
if (openAiError) throw new Error(openAiError.message);
|
||||
}
|
||||
throw err;
|
||||
throw normalizeEngineError(error, "deepseek", this.config.model);
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -67051,12 +67315,7 @@ var AimlApiEngine = class {
|
||||
const message = response.data.choices?.[0]?.message;
|
||||
return message?.content ?? null;
|
||||
} catch (error) {
|
||||
const err = error;
|
||||
if (axios_default.isAxiosError(error) && error.response?.status === 401) {
|
||||
const apiError = error.response.data.error;
|
||||
if (apiError) throw new Error(apiError.message);
|
||||
}
|
||||
throw err;
|
||||
throw normalizeEngineError(error, "aimlapi", this.config.model);
|
||||
}
|
||||
};
|
||||
this.client = axios_default.create({
|
||||
@@ -67086,12 +67345,7 @@ var OpenRouterEngine = class {
|
||||
let content = message?.content;
|
||||
return removeContentTags(content, "think");
|
||||
} catch (error) {
|
||||
const err = error;
|
||||
if (axios_default.isAxiosError(error) && error.response?.status === 401) {
|
||||
const openRouterError = error.response.data.error;
|
||||
if (openRouterError) throw new Error(openRouterError.message);
|
||||
}
|
||||
throw err;
|
||||
throw normalizeEngineError(error, "openrouter", this.config.model);
|
||||
}
|
||||
};
|
||||
this.client = axios_default.create({
|
||||
@@ -68153,9 +68407,10 @@ ${source_default.grey("\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2
|
||||
commitGenerationSpinner.stop(
|
||||
`${source_default.red("\u2716")} Failed to generate the commit message`
|
||||
);
|
||||
console.log(error);
|
||||
const err = error;
|
||||
ce(`${source_default.red("\u2716")} ${err?.message || err}`);
|
||||
const errorConfig = getConfig();
|
||||
const provider = errorConfig.OCO_AI_PROVIDER || "openai";
|
||||
const formatted = formatUserFriendlyError(error, provider);
|
||||
ce(printFormattedError(formatted));
|
||||
process.exit(1);
|
||||
}
|
||||
};
|
||||
@@ -68458,9 +68713,97 @@ async function fetchOllamaModels(baseUrl = "http://localhost:11434") {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
async function fetchModelsForProvider(provider, apiKey, baseUrl) {
|
||||
async function fetchAnthropicModels(apiKey) {
|
||||
try {
|
||||
const response = await fetch("https://api.anthropic.com/v1/models", {
|
||||
headers: {
|
||||
"x-api-key": apiKey,
|
||||
"anthropic-version": "2023-06-01"
|
||||
}
|
||||
});
|
||||
if (!response.ok) {
|
||||
return MODEL_LIST.anthropic;
|
||||
}
|
||||
const data = await response.json();
|
||||
const models = data.data?.map((m5) => m5.id).filter((id) => id.startsWith("claude-")).sort();
|
||||
return models && models.length > 0 ? models : MODEL_LIST.anthropic;
|
||||
} catch {
|
||||
return MODEL_LIST.anthropic;
|
||||
}
|
||||
}
|
||||
async function fetchMistralModels(apiKey) {
|
||||
try {
|
||||
const response = await fetch("https://api.mistral.ai/v1/models", {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`
|
||||
}
|
||||
});
|
||||
if (!response.ok) {
|
||||
return MODEL_LIST.mistral;
|
||||
}
|
||||
const data = await response.json();
|
||||
const models = data.data?.map((m5) => m5.id).sort();
|
||||
return models && models.length > 0 ? models : MODEL_LIST.mistral;
|
||||
} catch {
|
||||
return MODEL_LIST.mistral;
|
||||
}
|
||||
}
|
||||
async function fetchGroqModels(apiKey) {
|
||||
try {
|
||||
const response = await fetch("https://api.groq.com/openai/v1/models", {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`
|
||||
}
|
||||
});
|
||||
if (!response.ok) {
|
||||
return MODEL_LIST.groq;
|
||||
}
|
||||
const data = await response.json();
|
||||
const models = data.data?.map((m5) => m5.id).sort();
|
||||
return models && models.length > 0 ? models : MODEL_LIST.groq;
|
||||
} catch {
|
||||
return MODEL_LIST.groq;
|
||||
}
|
||||
}
|
||||
async function fetchOpenRouterModels(apiKey) {
|
||||
try {
|
||||
const response = await fetch("https://openrouter.ai/api/v1/models", {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`
|
||||
}
|
||||
});
|
||||
if (!response.ok) {
|
||||
return MODEL_LIST.openrouter;
|
||||
}
|
||||
const data = await response.json();
|
||||
const models = data.data?.filter(
|
||||
(m5) => m5.context_length && m5.context_length > 0
|
||||
).map((m5) => m5.id).sort();
|
||||
return models && models.length > 0 ? models : MODEL_LIST.openrouter;
|
||||
} catch {
|
||||
return MODEL_LIST.openrouter;
|
||||
}
|
||||
}
|
||||
async function fetchDeepSeekModels(apiKey) {
|
||||
try {
|
||||
const response = await fetch("https://api.deepseek.com/v1/models", {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`
|
||||
}
|
||||
});
|
||||
if (!response.ok) {
|
||||
return MODEL_LIST.deepseek;
|
||||
}
|
||||
const data = await response.json();
|
||||
const models = data.data?.map((m5) => m5.id).sort();
|
||||
return models && models.length > 0 ? models : MODEL_LIST.deepseek;
|
||||
} catch {
|
||||
return MODEL_LIST.deepseek;
|
||||
}
|
||||
}
|
||||
async function fetchModelsForProvider(provider, apiKey, baseUrl, forceRefresh = false) {
|
||||
const cache = readCache();
|
||||
if (isCacheValid(cache) && cache.models[provider]) {
|
||||
if (!forceRefresh && isCacheValid(cache) && cache.models[provider]) {
|
||||
return cache.models[provider];
|
||||
}
|
||||
let models = [];
|
||||
@@ -68476,25 +68819,45 @@ async function fetchModelsForProvider(provider, apiKey, baseUrl) {
|
||||
models = await fetchOllamaModels(baseUrl);
|
||||
break;
|
||||
case "anthropic" /* ANTHROPIC */:
|
||||
models = MODEL_LIST.anthropic;
|
||||
if (apiKey) {
|
||||
models = await fetchAnthropicModels(apiKey);
|
||||
} else {
|
||||
models = MODEL_LIST.anthropic;
|
||||
}
|
||||
break;
|
||||
case "gemini" /* GEMINI */:
|
||||
models = MODEL_LIST.gemini;
|
||||
break;
|
||||
case "groq" /* GROQ */:
|
||||
models = MODEL_LIST.groq;
|
||||
if (apiKey) {
|
||||
models = await fetchGroqModels(apiKey);
|
||||
} else {
|
||||
models = MODEL_LIST.groq;
|
||||
}
|
||||
break;
|
||||
case "mistral" /* MISTRAL */:
|
||||
models = MODEL_LIST.mistral;
|
||||
if (apiKey) {
|
||||
models = await fetchMistralModels(apiKey);
|
||||
} else {
|
||||
models = MODEL_LIST.mistral;
|
||||
}
|
||||
break;
|
||||
case "deepseek" /* DEEPSEEK */:
|
||||
models = MODEL_LIST.deepseek;
|
||||
if (apiKey) {
|
||||
models = await fetchDeepSeekModels(apiKey);
|
||||
} else {
|
||||
models = MODEL_LIST.deepseek;
|
||||
}
|
||||
break;
|
||||
case "aimlapi" /* AIMLAPI */:
|
||||
models = MODEL_LIST.aimlapi;
|
||||
break;
|
||||
case "openrouter" /* OPENROUTER */:
|
||||
models = MODEL_LIST.openrouter;
|
||||
if (apiKey) {
|
||||
models = await fetchOpenRouterModels(apiKey);
|
||||
} else {
|
||||
models = MODEL_LIST.openrouter;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
models = MODEL_LIST.openai;
|
||||
@@ -68504,6 +68867,31 @@ async function fetchModelsForProvider(provider, apiKey, baseUrl) {
|
||||
writeCache(existingCache);
|
||||
return models;
|
||||
}
|
||||
function clearModelCache() {
|
||||
try {
|
||||
if ((0, import_fs5.existsSync)(MODEL_CACHE_PATH)) {
|
||||
(0, import_fs5.writeFileSync)(MODEL_CACHE_PATH, "{}", "utf8");
|
||||
}
|
||||
} catch {
|
||||
}
|
||||
}
|
||||
function getCacheInfo() {
|
||||
const cache = readCache();
|
||||
if (!cache) {
|
||||
return { timestamp: null, providers: [] };
|
||||
}
|
||||
return {
|
||||
timestamp: cache.timestamp,
|
||||
providers: Object.keys(cache.models || {})
|
||||
};
|
||||
}
|
||||
function getCachedModels(provider) {
|
||||
const cache = readCache();
|
||||
if (!cache || !cache.models[provider]) {
|
||||
return null;
|
||||
}
|
||||
return cache.models[provider];
|
||||
}
|
||||
|
||||
// src/commands/setup.ts
|
||||
var PROVIDER_DISPLAY_NAMES = {
|
||||
@@ -68582,17 +68970,42 @@ ${source_default.dim(` Get your key at: ${url2}`)}`;
|
||||
}
|
||||
});
|
||||
}
|
||||
function formatCacheAge(timestamp) {
|
||||
if (!timestamp) return "";
|
||||
const ageMs = Date.now() - timestamp;
|
||||
const days = Math.floor(ageMs / (1e3 * 60 * 60 * 24));
|
||||
const hours = Math.floor(ageMs / (1e3 * 60 * 60));
|
||||
if (days > 0) {
|
||||
return `${days} day${days === 1 ? "" : "s"} ago`;
|
||||
} else if (hours > 0) {
|
||||
return `${hours} hour${hours === 1 ? "" : "s"} ago`;
|
||||
}
|
||||
return "just now";
|
||||
}
|
||||
async function selectModel(provider, apiKey) {
|
||||
const providerDisplayName = PROVIDER_DISPLAY_NAMES[provider]?.split(" (")[0] || provider;
|
||||
const loadingSpinner = le();
|
||||
loadingSpinner.start("Fetching available models...");
|
||||
loadingSpinner.start(`Fetching models from ${providerDisplayName}...`);
|
||||
let models = [];
|
||||
let usedFallback = false;
|
||||
try {
|
||||
models = await fetchModelsForProvider(provider, apiKey);
|
||||
} catch {
|
||||
usedFallback = true;
|
||||
const providerKey = provider.toLowerCase();
|
||||
models = MODEL_LIST[providerKey] || [];
|
||||
}
|
||||
loadingSpinner.stop("Models loaded");
|
||||
const cacheInfo = getCacheInfo();
|
||||
const cacheAge = formatCacheAge(cacheInfo.timestamp);
|
||||
if (usedFallback) {
|
||||
loadingSpinner.stop(
|
||||
source_default.yellow("Could not fetch models from API. Using default list.")
|
||||
);
|
||||
} else if (cacheAge) {
|
||||
loadingSpinner.stop(`Models loaded ${source_default.dim(`(cached ${cacheAge})`)}`);
|
||||
} else {
|
||||
loadingSpinner.stop("Models loaded");
|
||||
}
|
||||
if (models.length === 0) {
|
||||
if (NO_API_KEY_PROVIDERS.includes(provider)) {
|
||||
return await J4({
|
||||
@@ -68844,6 +69257,116 @@ var setupCommand = G3(
|
||||
}
|
||||
);
|
||||
|
||||
// src/commands/models.ts
|
||||
init_dist2();
|
||||
function formatCacheAge2(timestamp) {
|
||||
if (!timestamp) return "never";
|
||||
const ageMs = Date.now() - timestamp;
|
||||
const days = Math.floor(ageMs / (1e3 * 60 * 60 * 24));
|
||||
const hours = Math.floor(ageMs / (1e3 * 60 * 60));
|
||||
const minutes = Math.floor(ageMs / (1e3 * 60));
|
||||
if (days > 0) {
|
||||
return `${days} day${days === 1 ? "" : "s"} ago`;
|
||||
} else if (hours > 0) {
|
||||
return `${hours} hour${hours === 1 ? "" : "s"} ago`;
|
||||
} else if (minutes > 0) {
|
||||
return `${minutes} minute${minutes === 1 ? "" : "s"} ago`;
|
||||
}
|
||||
return "just now";
|
||||
}
|
||||
async function listModels(provider, useCache = true) {
|
||||
const config7 = getConfig();
|
||||
const apiKey = config7.OCO_API_KEY;
|
||||
const currentModel = config7.OCO_MODEL;
|
||||
let models = [];
|
||||
if (useCache) {
|
||||
const cached = getCachedModels(provider);
|
||||
if (cached) {
|
||||
models = cached;
|
||||
}
|
||||
}
|
||||
if (models.length === 0) {
|
||||
const providerKey = provider.toLowerCase();
|
||||
models = MODEL_LIST[providerKey] || [];
|
||||
}
|
||||
console.log(`
|
||||
${source_default.bold("Available models for")} ${source_default.cyan(provider)}:
|
||||
`);
|
||||
if (models.length === 0) {
|
||||
console.log(source_default.dim(" No models found"));
|
||||
} else {
|
||||
models.forEach((model) => {
|
||||
const isCurrent = model === currentModel;
|
||||
const prefix = isCurrent ? source_default.green("* ") : " ";
|
||||
const label = isCurrent ? source_default.green(model) : model;
|
||||
console.log(`${prefix}${label}`);
|
||||
});
|
||||
}
|
||||
console.log("");
|
||||
}
|
||||
async function refreshModels(provider) {
|
||||
const config7 = getConfig();
|
||||
const apiKey = config7.OCO_API_KEY;
|
||||
const loadingSpinner = le();
|
||||
loadingSpinner.start(`Fetching models from ${provider}...`);
|
||||
clearModelCache();
|
||||
try {
|
||||
const models = await fetchModelsForProvider(provider, apiKey, void 0, true);
|
||||
loadingSpinner.stop(`${source_default.green("+")} Fetched ${models.length} models`);
|
||||
await listModels(provider, true);
|
||||
} catch (error) {
|
||||
loadingSpinner.stop(source_default.red("Failed to fetch models"));
|
||||
console.error(source_default.red(`Error: ${error instanceof Error ? error.message : "Unknown error"}`));
|
||||
}
|
||||
}
|
||||
var modelsCommand = G3(
|
||||
{
|
||||
name: "models" /* models */,
|
||||
help: {
|
||||
description: "List and manage cached models for your AI provider"
|
||||
},
|
||||
flags: {
|
||||
refresh: {
|
||||
type: Boolean,
|
||||
alias: "r",
|
||||
description: "Clear cache and re-fetch models from the provider",
|
||||
default: false
|
||||
},
|
||||
provider: {
|
||||
type: String,
|
||||
alias: "p",
|
||||
description: "Specify provider (defaults to current OCO_AI_PROVIDER)"
|
||||
}
|
||||
}
|
||||
},
|
||||
async ({ flags }) => {
|
||||
const config7 = getConfig();
|
||||
const provider = flags.provider || config7.OCO_AI_PROVIDER || "openai" /* OPENAI */;
|
||||
ae(source_default.bgCyan(" OpenCommit Models "));
|
||||
const cacheInfo = getCacheInfo();
|
||||
if (cacheInfo.timestamp) {
|
||||
console.log(
|
||||
source_default.dim(` Cache last updated: ${formatCacheAge2(cacheInfo.timestamp)}`)
|
||||
);
|
||||
if (cacheInfo.providers.length > 0) {
|
||||
console.log(
|
||||
source_default.dim(` Cached providers: ${cacheInfo.providers.join(", ")}`)
|
||||
);
|
||||
}
|
||||
} else {
|
||||
console.log(source_default.dim(" No cached models"));
|
||||
}
|
||||
if (flags.refresh) {
|
||||
await refreshModels(provider);
|
||||
} else {
|
||||
await listModels(provider);
|
||||
}
|
||||
ce(
|
||||
`Run ${source_default.cyan("oco models --refresh")} to update the model list`
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
// src/utils/checkIsLatestVersion.ts
|
||||
init_dist2();
|
||||
|
||||
@@ -69036,7 +69559,7 @@ Z2(
|
||||
{
|
||||
version: package_default.version,
|
||||
name: "opencommit",
|
||||
commands: [configCommand, hookCommand, commitlintConfigCommand, setupCommand],
|
||||
commands: [configCommand, hookCommand, commitlintConfigCommand, setupCommand, modelsCommand],
|
||||
flags: {
|
||||
fgm: {
|
||||
type: Boolean,
|
||||
|
||||
@@ -74889,9 +74889,6 @@ var { AnthropicError: AnthropicError2, APIError: APIError2, APIConnectionError:
|
||||
})(Anthropic || (Anthropic = {}));
|
||||
var sdk_default = Anthropic;
|
||||
|
||||
// src/engine/anthropic.ts
|
||||
init_dist2();
|
||||
|
||||
// node_modules/axios/lib/helpers/bind.js
|
||||
function bind(fn, thisArg) {
|
||||
return function wrap() {
|
||||
@@ -78202,6 +78199,51 @@ var {
|
||||
} = axios_default;
|
||||
|
||||
// src/utils/errors.ts
|
||||
var PROVIDER_BILLING_URLS = {
|
||||
["anthropic" /* ANTHROPIC */]: "https://console.anthropic.com/settings/billing",
|
||||
["openai" /* OPENAI */]: "https://platform.openai.com/settings/organization/billing",
|
||||
["gemini" /* GEMINI */]: "https://aistudio.google.com/app/plan",
|
||||
["groq" /* GROQ */]: "https://console.groq.com/settings/billing",
|
||||
["mistral" /* MISTRAL */]: "https://console.mistral.ai/billing/",
|
||||
["deepseek" /* DEEPSEEK */]: "https://platform.deepseek.com/usage",
|
||||
["openrouter" /* OPENROUTER */]: "https://openrouter.ai/credits",
|
||||
["aimlapi" /* AIMLAPI */]: "https://aimlapi.com/app/billing",
|
||||
["azure" /* AZURE */]: "https://portal.azure.com/#view/Microsoft_Azure_CostManagement",
|
||||
["ollama" /* OLLAMA */]: null,
|
||||
["mlx" /* MLX */]: null,
|
||||
["flowise" /* FLOWISE */]: null,
|
||||
["test" /* TEST */]: null
|
||||
};
|
||||
var InsufficientCreditsError = class extends Error {
|
||||
constructor(provider, message) {
|
||||
super(message || `Insufficient credits or quota for provider '${provider}'`);
|
||||
this.name = "InsufficientCreditsError";
|
||||
this.provider = provider;
|
||||
}
|
||||
};
|
||||
var RateLimitError3 = class extends Error {
|
||||
constructor(provider, retryAfter, message) {
|
||||
super(message || `Rate limit exceeded for provider '${provider}'`);
|
||||
this.name = "RateLimitError";
|
||||
this.provider = provider;
|
||||
this.retryAfter = retryAfter;
|
||||
}
|
||||
};
|
||||
var ServiceUnavailableError = class extends Error {
|
||||
constructor(provider, statusCode = 503, message) {
|
||||
super(message || `Service unavailable for provider '${provider}'`);
|
||||
this.name = "ServiceUnavailableError";
|
||||
this.provider = provider;
|
||||
this.statusCode = statusCode;
|
||||
}
|
||||
};
|
||||
var AuthenticationError3 = class extends Error {
|
||||
constructor(provider, message) {
|
||||
super(message || `Authentication failed for provider '${provider}'`);
|
||||
this.name = "AuthenticationError";
|
||||
this.provider = provider;
|
||||
}
|
||||
};
|
||||
var ModelNotFoundError = class extends Error {
|
||||
constructor(modelName, provider, statusCode = 404) {
|
||||
super(`Model '${modelName}' not found for provider '${provider}'`);
|
||||
@@ -78244,6 +78286,108 @@ function getSuggestedModels(provider, failedModel) {
|
||||
return models.filter((m4) => m4 !== failedModel).slice(0, 5);
|
||||
}
|
||||
|
||||
// src/utils/engineErrorHandler.ts
|
||||
function getStatusCode(error) {
|
||||
if (typeof error?.status === "number") {
|
||||
return error.status;
|
||||
}
|
||||
if (axios_default.isAxiosError(error)) {
|
||||
return error.response?.status ?? null;
|
||||
}
|
||||
if (typeof error?.response?.status === "number") {
|
||||
return error.response.status;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
function getRetryAfter(error) {
|
||||
const headers = error?.response?.headers;
|
||||
if (headers) {
|
||||
const retryAfter = headers["retry-after"] || headers["Retry-After"];
|
||||
if (retryAfter) {
|
||||
const seconds = parseInt(retryAfter, 10);
|
||||
if (!isNaN(seconds)) {
|
||||
return seconds;
|
||||
}
|
||||
}
|
||||
}
|
||||
return void 0;
|
||||
}
|
||||
function extractErrorMessage(error) {
|
||||
if (error instanceof Error) {
|
||||
return error.message;
|
||||
}
|
||||
const apiError = error?.response?.data?.error;
|
||||
if (apiError) {
|
||||
if (typeof apiError === "string") {
|
||||
return apiError;
|
||||
}
|
||||
if (apiError.message) {
|
||||
return apiError.message;
|
||||
}
|
||||
}
|
||||
const errorData = error?.error;
|
||||
if (errorData) {
|
||||
if (typeof errorData === "string") {
|
||||
return errorData;
|
||||
}
|
||||
if (errorData.message) {
|
||||
return errorData.message;
|
||||
}
|
||||
}
|
||||
if (typeof error === "string") {
|
||||
return error;
|
||||
}
|
||||
return "An unknown error occurred";
|
||||
}
|
||||
function isModelNotFoundMessage(message) {
|
||||
const lowerMessage = message.toLowerCase();
|
||||
return lowerMessage.includes("model") && (lowerMessage.includes("not found") || lowerMessage.includes("does not exist") || lowerMessage.includes("invalid") || lowerMessage.includes("pull")) || lowerMessage.includes("does_not_exist");
|
||||
}
|
||||
function isInsufficientCreditsMessage(message) {
|
||||
const lowerMessage = message.toLowerCase();
|
||||
return lowerMessage.includes("insufficient") || lowerMessage.includes("credit") || lowerMessage.includes("quota") || lowerMessage.includes("balance too low") || lowerMessage.includes("billing") || lowerMessage.includes("payment required") || lowerMessage.includes("exceeded");
|
||||
}
|
||||
function normalizeEngineError(error, provider, model) {
|
||||
if (error instanceof ModelNotFoundError || error instanceof AuthenticationError3 || error instanceof InsufficientCreditsError || error instanceof RateLimitError3 || error instanceof ServiceUnavailableError) {
|
||||
return error;
|
||||
}
|
||||
const statusCode = getStatusCode(error);
|
||||
const message = extractErrorMessage(error);
|
||||
switch (statusCode) {
|
||||
case 401:
|
||||
return new AuthenticationError3(provider, message);
|
||||
case 402:
|
||||
return new InsufficientCreditsError(provider, message);
|
||||
case 404:
|
||||
if (isModelNotFoundMessage(message)) {
|
||||
return new ModelNotFoundError(model, provider, 404);
|
||||
}
|
||||
return error instanceof Error ? error : new Error(message);
|
||||
case 429:
|
||||
const retryAfter = getRetryAfter(error);
|
||||
return new RateLimitError3(provider, retryAfter, message);
|
||||
case 500:
|
||||
case 502:
|
||||
case 503:
|
||||
case 504:
|
||||
return new ServiceUnavailableError(provider, statusCode, message);
|
||||
}
|
||||
if (isModelNotFoundMessage(message)) {
|
||||
return new ModelNotFoundError(model, provider, 404);
|
||||
}
|
||||
if (isInsufficientCreditsMessage(message)) {
|
||||
return new InsufficientCreditsError(provider, message);
|
||||
}
|
||||
const lowerMessage = message.toLowerCase();
|
||||
if (lowerMessage.includes("rate limit") || lowerMessage.includes("rate_limit") || lowerMessage.includes("too many requests")) {
|
||||
return new RateLimitError3(provider, void 0, message);
|
||||
}
|
||||
if (lowerMessage.includes("unauthorized") || lowerMessage.includes("api key") || lowerMessage.includes("apikey") || lowerMessage.includes("authentication") || lowerMessage.includes("invalid_api_key")) {
|
||||
return new AuthenticationError3(provider, message);
|
||||
}
|
||||
return error instanceof Error ? error : new Error(message);
|
||||
}
|
||||
|
||||
// src/utils/removeContentTags.ts
|
||||
function removeContentTags(content, tag) {
|
||||
if (!content || typeof content !== "string") {
|
||||
@@ -78320,25 +78464,7 @@ var AnthropicEngine = class {
|
||||
let content = message;
|
||||
return removeContentTags(content, "think");
|
||||
} catch (error) {
|
||||
const err = error;
|
||||
if (err.message?.toLowerCase().includes("model") && (err.message?.toLowerCase().includes("not found") || err.message?.toLowerCase().includes("does not exist") || err.message?.toLowerCase().includes("invalid"))) {
|
||||
throw new ModelNotFoundError(this.config.model, "anthropic", 404);
|
||||
}
|
||||
if ("status" in error && error.status === 404) {
|
||||
throw new ModelNotFoundError(this.config.model, "anthropic", 404);
|
||||
}
|
||||
ce(`${source_default.red("\u2716")} ${err?.message || err}`);
|
||||
if (axios_default.isAxiosError(error) && error.response?.status === 401) {
|
||||
const anthropicAiError = error.response.data.error;
|
||||
if (anthropicAiError?.message) ce(anthropicAiError.message);
|
||||
ce(
|
||||
"For help look into README https://github.com/di-sukharev/opencommit#setup"
|
||||
);
|
||||
}
|
||||
if (axios_default.isAxiosError(error) && error.response?.status === 404) {
|
||||
throw new ModelNotFoundError(this.config.model, "anthropic", 404);
|
||||
}
|
||||
throw err;
|
||||
throw normalizeEngineError(error, "anthropic", this.config.model);
|
||||
}
|
||||
};
|
||||
this.config = config6;
|
||||
@@ -82154,7 +82280,6 @@ var OpenAIClient = class {
|
||||
};
|
||||
|
||||
// src/engine/azure.ts
|
||||
init_dist2();
|
||||
var AzureEngine = class {
|
||||
constructor(config6) {
|
||||
this.generateCommitMessage = async (messages) => {
|
||||
@@ -82174,17 +82299,7 @@ var AzureEngine = class {
|
||||
let content = message?.content;
|
||||
return removeContentTags(content, "think");
|
||||
} catch (error) {
|
||||
ce(`${source_default.red("\u2716")} ${this.config.model}`);
|
||||
const err = error;
|
||||
ce(`${source_default.red("\u2716")} ${JSON.stringify(error)}`);
|
||||
if (axios_default.isAxiosError(error) && error.response?.status === 401) {
|
||||
const openAiError = error.response.data.error;
|
||||
if (openAiError?.message) ce(openAiError.message);
|
||||
ce(
|
||||
"For help look into README https://github.com/di-sukharev/opencommit#setup"
|
||||
);
|
||||
}
|
||||
throw err;
|
||||
throw normalizeEngineError(error, "azure", this.config.model);
|
||||
}
|
||||
};
|
||||
this.config = config6;
|
||||
@@ -82218,9 +82333,8 @@ var FlowiseEngine = class {
|
||||
const message = response.data;
|
||||
let content = message?.text;
|
||||
return removeContentTags(content, "think");
|
||||
} catch (err) {
|
||||
const message = err.response?.data?.error ?? err.message;
|
||||
throw new Error("local model issues. details: " + message);
|
||||
} catch (error) {
|
||||
throw normalizeEngineError(error, "flowise", this.config.model);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -83080,18 +83194,7 @@ var GeminiEngine = class {
|
||||
const content = result.response.text();
|
||||
return removeContentTags(content, "think");
|
||||
} catch (error) {
|
||||
const err = error;
|
||||
if (err.message?.toLowerCase().includes("model") && (err.message?.toLowerCase().includes("not found") || err.message?.toLowerCase().includes("does not exist") || err.message?.toLowerCase().includes("invalid"))) {
|
||||
throw new ModelNotFoundError(this.config.model, "gemini", 404);
|
||||
}
|
||||
if (axios_default.isAxiosError(error) && error.response?.status === 401) {
|
||||
const geminiError = error.response.data.error;
|
||||
if (geminiError) throw new Error(geminiError?.message);
|
||||
}
|
||||
if (axios_default.isAxiosError(error) && error.response?.status === 404) {
|
||||
throw new ModelNotFoundError(this.config.model, "gemini", 404);
|
||||
}
|
||||
throw err;
|
||||
throw normalizeEngineError(error, "gemini", this.config.model);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -83124,15 +83227,8 @@ var OllamaEngine = class {
|
||||
const { message } = response.data;
|
||||
let content = message?.content;
|
||||
return removeContentTags(content, "think");
|
||||
} catch (err) {
|
||||
const message = err.response?.data?.error ?? err.message;
|
||||
if (message?.toLowerCase().includes("model") && (message?.toLowerCase().includes("not found") || message?.toLowerCase().includes("does not exist") || message?.toLowerCase().includes("pull"))) {
|
||||
throw new ModelNotFoundError(this.config.model, "ollama", 404);
|
||||
}
|
||||
if (err.response?.status === 404) {
|
||||
throw new ModelNotFoundError(this.config.model, "ollama", 404);
|
||||
}
|
||||
throw new Error(`Ollama provider error: ${message}`);
|
||||
} catch (error) {
|
||||
throw normalizeEngineError(error, "ollama", this.config.model);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -83144,7 +83240,7 @@ __export(error_exports2, {
|
||||
APIConnectionTimeoutError: () => APIConnectionTimeoutError3,
|
||||
APIError: () => APIError3,
|
||||
APIUserAbortError: () => APIUserAbortError3,
|
||||
AuthenticationError: () => AuthenticationError3,
|
||||
AuthenticationError: () => AuthenticationError4,
|
||||
BadRequestError: () => BadRequestError3,
|
||||
ConflictError: () => ConflictError3,
|
||||
ContentFilterFinishReasonError: () => ContentFilterFinishReasonError,
|
||||
@@ -83153,7 +83249,7 @@ __export(error_exports2, {
|
||||
NotFoundError: () => NotFoundError3,
|
||||
OpenAIError: () => OpenAIError,
|
||||
PermissionDeniedError: () => PermissionDeniedError3,
|
||||
RateLimitError: () => RateLimitError3,
|
||||
RateLimitError: () => RateLimitError4,
|
||||
UnprocessableEntityError: () => UnprocessableEntityError3
|
||||
});
|
||||
|
||||
@@ -84425,7 +84521,7 @@ var APIError3 = class _APIError extends OpenAIError {
|
||||
return new BadRequestError3(status, error, message, headers);
|
||||
}
|
||||
if (status === 401) {
|
||||
return new AuthenticationError3(status, error, message, headers);
|
||||
return new AuthenticationError4(status, error, message, headers);
|
||||
}
|
||||
if (status === 403) {
|
||||
return new PermissionDeniedError3(status, error, message, headers);
|
||||
@@ -84440,7 +84536,7 @@ var APIError3 = class _APIError extends OpenAIError {
|
||||
return new UnprocessableEntityError3(status, error, message, headers);
|
||||
}
|
||||
if (status === 429) {
|
||||
return new RateLimitError3(status, error, message, headers);
|
||||
return new RateLimitError4(status, error, message, headers);
|
||||
}
|
||||
if (status >= 500) {
|
||||
return new InternalServerError3(status, error, message, headers);
|
||||
@@ -84473,7 +84569,7 @@ var BadRequestError3 = class extends APIError3 {
|
||||
this.status = 400;
|
||||
}
|
||||
};
|
||||
var AuthenticationError3 = class extends APIError3 {
|
||||
var AuthenticationError4 = class extends APIError3 {
|
||||
constructor() {
|
||||
super(...arguments);
|
||||
this.status = 401;
|
||||
@@ -84503,7 +84599,7 @@ var UnprocessableEntityError3 = class extends APIError3 {
|
||||
this.status = 422;
|
||||
}
|
||||
};
|
||||
var RateLimitError3 = class extends APIError3 {
|
||||
var RateLimitError4 = class extends APIError3 {
|
||||
constructor() {
|
||||
super(...arguments);
|
||||
this.status = 429;
|
||||
@@ -87812,15 +87908,15 @@ OpenAI.APIConnectionTimeoutError = APIConnectionTimeoutError3;
|
||||
OpenAI.APIUserAbortError = APIUserAbortError3;
|
||||
OpenAI.NotFoundError = NotFoundError3;
|
||||
OpenAI.ConflictError = ConflictError3;
|
||||
OpenAI.RateLimitError = RateLimitError3;
|
||||
OpenAI.RateLimitError = RateLimitError4;
|
||||
OpenAI.BadRequestError = BadRequestError3;
|
||||
OpenAI.AuthenticationError = AuthenticationError3;
|
||||
OpenAI.AuthenticationError = AuthenticationError4;
|
||||
OpenAI.InternalServerError = InternalServerError3;
|
||||
OpenAI.PermissionDeniedError = PermissionDeniedError3;
|
||||
OpenAI.UnprocessableEntityError = UnprocessableEntityError3;
|
||||
OpenAI.toFile = toFile2;
|
||||
OpenAI.fileFromPath = fileFromPath4;
|
||||
var { OpenAIError: OpenAIError2, APIError: APIError4, APIConnectionError: APIConnectionError4, APIConnectionTimeoutError: APIConnectionTimeoutError4, APIUserAbortError: APIUserAbortError4, NotFoundError: NotFoundError4, ConflictError: ConflictError4, RateLimitError: RateLimitError4, BadRequestError: BadRequestError4, AuthenticationError: AuthenticationError4, InternalServerError: InternalServerError4, PermissionDeniedError: PermissionDeniedError4, UnprocessableEntityError: UnprocessableEntityError4 } = error_exports2;
|
||||
var { OpenAIError: OpenAIError2, APIError: APIError4, APIConnectionError: APIConnectionError4, APIConnectionTimeoutError: APIConnectionTimeoutError4, APIUserAbortError: APIUserAbortError4, NotFoundError: NotFoundError4, ConflictError: ConflictError4, RateLimitError: RateLimitError5, BadRequestError: BadRequestError4, AuthenticationError: AuthenticationError5, InternalServerError: InternalServerError4, PermissionDeniedError: PermissionDeniedError4, UnprocessableEntityError: UnprocessableEntityError4 } = error_exports2;
|
||||
(function(OpenAI2) {
|
||||
OpenAI2.Page = Page;
|
||||
OpenAI2.CursorPage = CursorPage;
|
||||
@@ -87861,21 +87957,7 @@ var OpenAiEngine = class {
|
||||
let content = message?.content;
|
||||
return removeContentTags(content, "think");
|
||||
} catch (error) {
|
||||
const err = error;
|
||||
if (err.message?.toLowerCase().includes("model") && (err.message?.toLowerCase().includes("not found") || err.message?.toLowerCase().includes("does not exist") || err.message?.toLowerCase().includes("invalid"))) {
|
||||
throw new ModelNotFoundError(this.config.model, "openai", 404);
|
||||
}
|
||||
if ("status" in error && error.status === 404) {
|
||||
throw new ModelNotFoundError(this.config.model, "openai", 404);
|
||||
}
|
||||
if (axios_default.isAxiosError(error) && error.response?.status === 401) {
|
||||
const openAiError = error.response.data.error;
|
||||
if (openAiError) throw new Error(openAiError.message);
|
||||
}
|
||||
if (axios_default.isAxiosError(error) && error.response?.status === 404) {
|
||||
throw new ModelNotFoundError(this.config.model, "openai", 404);
|
||||
}
|
||||
throw err;
|
||||
throw normalizeEngineError(error, "openai", this.config.model);
|
||||
}
|
||||
};
|
||||
this.config = config6;
|
||||
@@ -87919,12 +88001,7 @@ var MistralAiEngine = class {
|
||||
let content = message.content;
|
||||
return removeContentTags(content, "think");
|
||||
} catch (error) {
|
||||
const err = error;
|
||||
if (axios_default.isAxiosError(error) && error.response?.status === 401) {
|
||||
const mistralError = error.response.data.error;
|
||||
if (mistralError) throw new Error(mistralError.message);
|
||||
}
|
||||
throw err;
|
||||
throw normalizeEngineError(error, "mistral", this.config.model);
|
||||
}
|
||||
};
|
||||
this.config = config6;
|
||||
@@ -87973,9 +88050,8 @@ var MLXEngine = class {
|
||||
const message = choices[0].message;
|
||||
let content = message?.content;
|
||||
return removeContentTags(content, "think");
|
||||
} catch (err) {
|
||||
const message = err.response?.data?.error ?? err.message;
|
||||
throw new Error(`MLX provider error: ${message}`);
|
||||
} catch (error) {
|
||||
throw normalizeEngineError(error, "mlx", this.config.model);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -88005,12 +88081,7 @@ var DeepseekEngine = class extends OpenAiEngine {
|
||||
let content = message?.content;
|
||||
return removeContentTags(content, "think");
|
||||
} catch (error) {
|
||||
const err = error;
|
||||
if (axios_default.isAxiosError(error) && error.response?.status === 401) {
|
||||
const openAiError = error.response.data.error;
|
||||
if (openAiError) throw new Error(openAiError.message);
|
||||
}
|
||||
throw err;
|
||||
throw normalizeEngineError(error, "deepseek", this.config.model);
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -88029,12 +88100,7 @@ var AimlApiEngine = class {
|
||||
const message = response.data.choices?.[0]?.message;
|
||||
return message?.content ?? null;
|
||||
} catch (error) {
|
||||
const err = error;
|
||||
if (axios_default.isAxiosError(error) && error.response?.status === 401) {
|
||||
const apiError = error.response.data.error;
|
||||
if (apiError) throw new Error(apiError.message);
|
||||
}
|
||||
throw err;
|
||||
throw normalizeEngineError(error, "aimlapi", this.config.model);
|
||||
}
|
||||
};
|
||||
this.client = axios_default.create({
|
||||
@@ -88064,12 +88130,7 @@ var OpenRouterEngine = class {
|
||||
let content = message?.content;
|
||||
return removeContentTags(content, "think");
|
||||
} catch (error) {
|
||||
const err = error;
|
||||
if (axios_default.isAxiosError(error) && error.response?.status === 401) {
|
||||
const openRouterError = error.response.data.error;
|
||||
if (openRouterError) throw new Error(openRouterError.message);
|
||||
}
|
||||
throw err;
|
||||
throw normalizeEngineError(error, "openrouter", this.config.model);
|
||||
}
|
||||
};
|
||||
this.client = axios_default.create({
|
||||
|
||||
4
package-lock.json
generated
4
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "opencommit",
|
||||
"version": "3.2.11",
|
||||
"version": "3.2.12",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "opencommit",
|
||||
"version": "3.2.11",
|
||||
"version": "3.2.12",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@actions/core": "^1.10.0",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "opencommit",
|
||||
"version": "3.2.11",
|
||||
"version": "3.2.12",
|
||||
"description": "Auto-generate impressive commits in 1 second. Killing lame commits with AI 🤯🔫",
|
||||
"keywords": [
|
||||
"git",
|
||||
|
||||
@@ -14,6 +14,7 @@ import {
|
||||
runSetup,
|
||||
promptForMissingApiKey
|
||||
} from './commands/setup';
|
||||
import { modelsCommand } from './commands/models';
|
||||
import { checkIsLatestVersion } from './utils/checkIsLatestVersion';
|
||||
import { runMigrations } from './migrations/_run.js';
|
||||
|
||||
@@ -23,7 +24,7 @@ cli(
|
||||
{
|
||||
version: packageJSON.version,
|
||||
name: 'opencommit',
|
||||
commands: [configCommand, hookCommand, commitlintConfigCommand, setupCommand],
|
||||
commands: [configCommand, hookCommand, commitlintConfigCommand, setupCommand, modelsCommand],
|
||||
flags: {
|
||||
fgm: {
|
||||
type: Boolean,
|
||||
|
||||
@@ -2,5 +2,6 @@ export enum COMMANDS {
|
||||
config = 'config',
|
||||
hook = 'hook',
|
||||
commitlint = 'commitlint',
|
||||
setup = 'setup'
|
||||
setup = 'setup',
|
||||
models = 'models'
|
||||
}
|
||||
|
||||
@@ -11,6 +11,10 @@ import {
|
||||
import chalk from 'chalk';
|
||||
import { execa } from 'execa';
|
||||
import { generateCommitMessageByDiff } from '../generateCommitMessageFromGitDiff';
|
||||
import {
|
||||
formatUserFriendlyError,
|
||||
printFormattedError
|
||||
} from '../utils/errors';
|
||||
import {
|
||||
assertGitRepo,
|
||||
getChangedFiles,
|
||||
@@ -211,10 +215,11 @@ ${chalk.grey('——————————————————')}`
|
||||
`${chalk.red('✖')} Failed to generate the commit message`
|
||||
);
|
||||
|
||||
console.log(error);
|
||||
const errorConfig = getConfig();
|
||||
const provider = errorConfig.OCO_AI_PROVIDER || 'openai';
|
||||
const formatted = formatUserFriendlyError(error, provider);
|
||||
outro(printFormattedError(formatted));
|
||||
|
||||
const err = error as Error;
|
||||
outro(`${chalk.red('✖')} ${err?.message || err}`);
|
||||
process.exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
144
src/commands/models.ts
Normal file
144
src/commands/models.ts
Normal file
@@ -0,0 +1,144 @@
|
||||
import { intro, outro, spinner } from '@clack/prompts';
|
||||
import chalk from 'chalk';
|
||||
import { command } from 'cleye';
|
||||
import { COMMANDS } from './ENUMS';
|
||||
import {
|
||||
MODEL_LIST,
|
||||
OCO_AI_PROVIDER_ENUM,
|
||||
getConfig
|
||||
} from './config';
|
||||
import {
|
||||
fetchModelsForProvider,
|
||||
clearModelCache,
|
||||
getCacheInfo,
|
||||
getCachedModels
|
||||
} from '../utils/modelCache';
|
||||
|
||||
function formatCacheAge(timestamp: number | null): string {
|
||||
if (!timestamp) return 'never';
|
||||
const ageMs = Date.now() - timestamp;
|
||||
const days = Math.floor(ageMs / (1000 * 60 * 60 * 24));
|
||||
const hours = Math.floor(ageMs / (1000 * 60 * 60));
|
||||
const minutes = Math.floor(ageMs / (1000 * 60));
|
||||
|
||||
if (days > 0) {
|
||||
return `${days} day${days === 1 ? '' : 's'} ago`;
|
||||
} else if (hours > 0) {
|
||||
return `${hours} hour${hours === 1 ? '' : 's'} ago`;
|
||||
} else if (minutes > 0) {
|
||||
return `${minutes} minute${minutes === 1 ? '' : 's'} ago`;
|
||||
}
|
||||
return 'just now';
|
||||
}
|
||||
|
||||
async function listModels(provider: string, useCache: boolean = true): Promise<void> {
|
||||
const config = getConfig();
|
||||
const apiKey = config.OCO_API_KEY;
|
||||
const currentModel = config.OCO_MODEL;
|
||||
|
||||
// Get cached models or fetch new ones
|
||||
let models: string[] = [];
|
||||
|
||||
if (useCache) {
|
||||
const cached = getCachedModels(provider);
|
||||
if (cached) {
|
||||
models = cached;
|
||||
}
|
||||
}
|
||||
|
||||
if (models.length === 0) {
|
||||
// Fallback to hardcoded list
|
||||
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
|
||||
models = MODEL_LIST[providerKey] || [];
|
||||
}
|
||||
|
||||
console.log(`\n${chalk.bold('Available models for')} ${chalk.cyan(provider)}:\n`);
|
||||
|
||||
if (models.length === 0) {
|
||||
console.log(chalk.dim(' No models found'));
|
||||
} else {
|
||||
models.forEach((model) => {
|
||||
const isCurrent = model === currentModel;
|
||||
const prefix = isCurrent ? chalk.green('* ') : ' ';
|
||||
const label = isCurrent ? chalk.green(model) : model;
|
||||
console.log(`${prefix}${label}`);
|
||||
});
|
||||
}
|
||||
|
||||
console.log('');
|
||||
}
|
||||
|
||||
async function refreshModels(provider: string): Promise<void> {
|
||||
const config = getConfig();
|
||||
const apiKey = config.OCO_API_KEY;
|
||||
|
||||
const loadingSpinner = spinner();
|
||||
loadingSpinner.start(`Fetching models from ${provider}...`);
|
||||
|
||||
// Clear cache first
|
||||
clearModelCache();
|
||||
|
||||
try {
|
||||
const models = await fetchModelsForProvider(provider, apiKey, undefined, true);
|
||||
loadingSpinner.stop(`${chalk.green('+')} Fetched ${models.length} models`);
|
||||
|
||||
// List the models
|
||||
await listModels(provider, true);
|
||||
} catch (error) {
|
||||
loadingSpinner.stop(chalk.red('Failed to fetch models'));
|
||||
console.error(chalk.red(`Error: ${error instanceof Error ? error.message : 'Unknown error'}`));
|
||||
}
|
||||
}
|
||||
|
||||
export const modelsCommand = command(
|
||||
{
|
||||
name: COMMANDS.models,
|
||||
help: {
|
||||
description: 'List and manage cached models for your AI provider'
|
||||
},
|
||||
flags: {
|
||||
refresh: {
|
||||
type: Boolean,
|
||||
alias: 'r',
|
||||
description: 'Clear cache and re-fetch models from the provider',
|
||||
default: false
|
||||
},
|
||||
provider: {
|
||||
type: String,
|
||||
alias: 'p',
|
||||
description: 'Specify provider (defaults to current OCO_AI_PROVIDER)'
|
||||
}
|
||||
}
|
||||
},
|
||||
async ({ flags }) => {
|
||||
const config = getConfig();
|
||||
const provider = flags.provider || config.OCO_AI_PROVIDER || OCO_AI_PROVIDER_ENUM.OPENAI;
|
||||
|
||||
intro(chalk.bgCyan(' OpenCommit Models '));
|
||||
|
||||
// Show cache info
|
||||
const cacheInfo = getCacheInfo();
|
||||
if (cacheInfo.timestamp) {
|
||||
console.log(
|
||||
chalk.dim(` Cache last updated: ${formatCacheAge(cacheInfo.timestamp)}`)
|
||||
);
|
||||
if (cacheInfo.providers.length > 0) {
|
||||
console.log(
|
||||
chalk.dim(` Cached providers: ${cacheInfo.providers.join(', ')}`)
|
||||
);
|
||||
}
|
||||
} else {
|
||||
console.log(chalk.dim(' No cached models'));
|
||||
}
|
||||
|
||||
if (flags.refresh) {
|
||||
await refreshModels(provider);
|
||||
} else {
|
||||
await listModels(provider);
|
||||
}
|
||||
|
||||
outro(
|
||||
`Run ${chalk.cyan('oco models --refresh')} to update the model list`
|
||||
);
|
||||
}
|
||||
);
|
||||
@@ -16,7 +16,8 @@ import {
|
||||
} from './config';
|
||||
import {
|
||||
fetchModelsForProvider,
|
||||
fetchOllamaModels
|
||||
fetchOllamaModels,
|
||||
getCacheInfo
|
||||
} from '../utils/modelCache';
|
||||
|
||||
const PROVIDER_DISPLAY_NAMES: Record<string, string> = {
|
||||
@@ -108,24 +109,53 @@ async function getApiKey(provider: string): Promise<string | symbol> {
|
||||
});
|
||||
}
|
||||
|
||||
function formatCacheAge(timestamp: number | null): string {
|
||||
if (!timestamp) return '';
|
||||
const ageMs = Date.now() - timestamp;
|
||||
const days = Math.floor(ageMs / (1000 * 60 * 60 * 24));
|
||||
const hours = Math.floor(ageMs / (1000 * 60 * 60));
|
||||
|
||||
if (days > 0) {
|
||||
return `${days} day${days === 1 ? '' : 's'} ago`;
|
||||
} else if (hours > 0) {
|
||||
return `${hours} hour${hours === 1 ? '' : 's'} ago`;
|
||||
}
|
||||
return 'just now';
|
||||
}
|
||||
|
||||
async function selectModel(
|
||||
provider: string,
|
||||
apiKey?: string
|
||||
): Promise<string | symbol> {
|
||||
const providerDisplayName = PROVIDER_DISPLAY_NAMES[provider]?.split(' (')[0] || provider;
|
||||
const loadingSpinner = spinner();
|
||||
loadingSpinner.start('Fetching available models...');
|
||||
loadingSpinner.start(`Fetching models from ${providerDisplayName}...`);
|
||||
|
||||
let models: string[] = [];
|
||||
let usedFallback = false;
|
||||
|
||||
try {
|
||||
models = await fetchModelsForProvider(provider, apiKey);
|
||||
} catch {
|
||||
// Fall back to hardcoded list
|
||||
usedFallback = true;
|
||||
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
|
||||
models = MODEL_LIST[providerKey] || [];
|
||||
}
|
||||
|
||||
loadingSpinner.stop('Models loaded');
|
||||
// Check cache info for display
|
||||
const cacheInfo = getCacheInfo();
|
||||
const cacheAge = formatCacheAge(cacheInfo.timestamp);
|
||||
|
||||
if (usedFallback) {
|
||||
loadingSpinner.stop(
|
||||
chalk.yellow('Could not fetch models from API. Using default list.')
|
||||
);
|
||||
} else if (cacheAge) {
|
||||
loadingSpinner.stop(`Models loaded ${chalk.dim(`(cached ${cacheAge})`)}`);
|
||||
} else {
|
||||
loadingSpinner.stop('Models loaded');
|
||||
}
|
||||
|
||||
if (models.length === 0) {
|
||||
// For Ollama/MLX, prompt for manual entry
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import OpenAI from 'openai';
|
||||
import axios, { AxiosInstance } from 'axios';
|
||||
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
||||
import { AiEngine, AiEngineConfig } from './Engine';
|
||||
|
||||
interface AimlApiConfig extends AiEngineConfig {}
|
||||
@@ -32,16 +33,7 @@ export class AimlApiEngine implements AiEngine {
|
||||
const message = response.data.choices?.[0]?.message;
|
||||
return message?.content ?? null;
|
||||
} catch (error) {
|
||||
const err = error as Error;
|
||||
if (
|
||||
axios.isAxiosError<{ error?: { message: string } }>(error) &&
|
||||
error.response?.status === 401
|
||||
) {
|
||||
const apiError = error.response.data.error;
|
||||
if (apiError) throw new Error(apiError.message);
|
||||
}
|
||||
|
||||
throw err;
|
||||
throw normalizeEngineError(error, 'aimlapi', this.config.model);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -3,12 +3,9 @@ import {
|
||||
MessageCreateParamsNonStreaming,
|
||||
MessageParam
|
||||
} from '@anthropic-ai/sdk/resources/messages.mjs';
|
||||
import { outro } from '@clack/prompts';
|
||||
import axios from 'axios';
|
||||
import chalk from 'chalk';
|
||||
import { OpenAI } from 'openai';
|
||||
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
|
||||
import { ModelNotFoundError } from '../utils/errors';
|
||||
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
||||
import { removeContentTags } from '../utils/removeContentTags';
|
||||
import { tokenCount } from '../utils/tokenCount';
|
||||
import { AiEngine, AiEngineConfig } from './Engine';
|
||||
@@ -59,41 +56,7 @@ export class AnthropicEngine implements AiEngine {
|
||||
let content = message;
|
||||
return removeContentTags(content, 'think');
|
||||
} catch (error) {
|
||||
const err = error as Error;
|
||||
|
||||
// Check for model not found errors
|
||||
if (err.message?.toLowerCase().includes('model') &&
|
||||
(err.message?.toLowerCase().includes('not found') ||
|
||||
err.message?.toLowerCase().includes('does not exist') ||
|
||||
err.message?.toLowerCase().includes('invalid'))) {
|
||||
throw new ModelNotFoundError(this.config.model, 'anthropic', 404);
|
||||
}
|
||||
|
||||
// Check for 404 errors
|
||||
if ('status' in (error as any) && (error as any).status === 404) {
|
||||
throw new ModelNotFoundError(this.config.model, 'anthropic', 404);
|
||||
}
|
||||
|
||||
outro(`${chalk.red('✖')} ${err?.message || err}`);
|
||||
|
||||
if (
|
||||
axios.isAxiosError<{ error?: { message: string } }>(error) &&
|
||||
error.response?.status === 401
|
||||
) {
|
||||
const anthropicAiError = error.response.data.error;
|
||||
|
||||
if (anthropicAiError?.message) outro(anthropicAiError.message);
|
||||
outro(
|
||||
'For help look into README https://github.com/di-sukharev/opencommit#setup'
|
||||
);
|
||||
}
|
||||
|
||||
// Check axios 404 errors
|
||||
if (axios.isAxiosError(error) && error.response?.status === 404) {
|
||||
throw new ModelNotFoundError(this.config.model, 'anthropic', 404);
|
||||
}
|
||||
|
||||
throw err;
|
||||
throw normalizeEngineError(error, 'anthropic', this.config.model);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -2,11 +2,9 @@ import {
|
||||
AzureKeyCredential,
|
||||
OpenAIClient as AzureOpenAIClient
|
||||
} from '@azure/openai';
|
||||
import { outro } from '@clack/prompts';
|
||||
import axios from 'axios';
|
||||
import chalk from 'chalk';
|
||||
import { OpenAI } from 'openai';
|
||||
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
|
||||
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
||||
import { removeContentTags } from '../utils/removeContentTags';
|
||||
import { tokenCount } from '../utils/tokenCount';
|
||||
import { AiEngine, AiEngineConfig } from './Engine';
|
||||
@@ -57,24 +55,7 @@ export class AzureEngine implements AiEngine {
|
||||
let content = message?.content;
|
||||
return removeContentTags(content, 'think');
|
||||
} catch (error) {
|
||||
outro(`${chalk.red('✖')} ${this.config.model}`);
|
||||
|
||||
const err = error as Error;
|
||||
outro(`${chalk.red('✖')} ${JSON.stringify(error)}`);
|
||||
|
||||
if (
|
||||
axios.isAxiosError<{ error?: { message: string } }>(error) &&
|
||||
error.response?.status === 401
|
||||
) {
|
||||
const openAiError = error.response.data.error;
|
||||
|
||||
if (openAiError?.message) outro(openAiError.message);
|
||||
outro(
|
||||
'For help look into README https://github.com/di-sukharev/opencommit#setup'
|
||||
);
|
||||
}
|
||||
|
||||
throw err;
|
||||
throw normalizeEngineError(error, 'azure', this.config.model);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import axios from 'axios';
|
||||
import { OpenAI } from 'openai';
|
||||
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
|
||||
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
||||
import { removeContentTags } from '../utils/removeContentTags';
|
||||
import { tokenCount } from '../utils/tokenCount';
|
||||
import { OpenAiEngine, OpenAiConfig } from './openAi';
|
||||
@@ -45,17 +45,7 @@ export class DeepseekEngine extends OpenAiEngine {
|
||||
let content = message?.content;
|
||||
return removeContentTags(content, 'think');
|
||||
} catch (error) {
|
||||
const err = error as Error;
|
||||
if (
|
||||
axios.isAxiosError<{ error?: { message: string } }>(error) &&
|
||||
error.response?.status === 401
|
||||
) {
|
||||
const openAiError = error.response.data.error;
|
||||
|
||||
if (openAiError) throw new Error(openAiError.message);
|
||||
}
|
||||
|
||||
throw err;
|
||||
throw normalizeEngineError(error, 'deepseek', this.config.model);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import axios, { AxiosInstance } from 'axios';
|
||||
import { OpenAI } from 'openai';
|
||||
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
||||
import { removeContentTags } from '../utils/removeContentTags';
|
||||
import { AiEngine, AiEngineConfig } from './Engine';
|
||||
|
||||
@@ -39,9 +40,8 @@ export class FlowiseEngine implements AiEngine {
|
||||
const message = response.data;
|
||||
let content = message?.text;
|
||||
return removeContentTags(content, 'think');
|
||||
} catch (err: any) {
|
||||
const message = err.response?.data?.error ?? err.message;
|
||||
throw new Error('local model issues. details: ' + message);
|
||||
} catch (error) {
|
||||
throw normalizeEngineError(error, 'flowise', this.config.model);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,9 +5,8 @@ import {
|
||||
HarmCategory,
|
||||
Part
|
||||
} from '@google/generative-ai';
|
||||
import axios from 'axios';
|
||||
import { OpenAI } from 'openai';
|
||||
import { ModelNotFoundError } from '../utils/errors';
|
||||
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
||||
import { removeContentTags } from '../utils/removeContentTags';
|
||||
import { AiEngine, AiEngineConfig } from './Engine';
|
||||
|
||||
@@ -76,30 +75,7 @@ export class GeminiEngine implements AiEngine {
|
||||
const content = result.response.text();
|
||||
return removeContentTags(content, 'think');
|
||||
} catch (error) {
|
||||
const err = error as Error;
|
||||
|
||||
// Check for model not found errors
|
||||
if (err.message?.toLowerCase().includes('model') &&
|
||||
(err.message?.toLowerCase().includes('not found') ||
|
||||
err.message?.toLowerCase().includes('does not exist') ||
|
||||
err.message?.toLowerCase().includes('invalid'))) {
|
||||
throw new ModelNotFoundError(this.config.model, 'gemini', 404);
|
||||
}
|
||||
|
||||
if (
|
||||
axios.isAxiosError<{ error?: { message: string } }>(error) &&
|
||||
error.response?.status === 401
|
||||
) {
|
||||
const geminiError = error.response.data.error;
|
||||
if (geminiError) throw new Error(geminiError?.message);
|
||||
}
|
||||
|
||||
// Check axios 404 errors
|
||||
if (axios.isAxiosError(error) && error.response?.status === 404) {
|
||||
throw new ModelNotFoundError(this.config.model, 'gemini', 404);
|
||||
}
|
||||
|
||||
throw err;
|
||||
throw normalizeEngineError(error, 'gemini', this.config.model);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import axios from 'axios';
|
||||
import { OpenAI } from 'openai';
|
||||
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
|
||||
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
||||
import { removeContentTags } from '../utils/removeContentTags';
|
||||
import { tokenCount } from '../utils/tokenCount';
|
||||
import { AiEngine, AiEngineConfig } from './Engine';
|
||||
@@ -63,17 +63,7 @@ export class MistralAiEngine implements AiEngine {
|
||||
let content = message.content as string;
|
||||
return removeContentTags(content, 'think');
|
||||
} catch (error) {
|
||||
const err = error as Error;
|
||||
if (
|
||||
axios.isAxiosError<{ error?: { message: string } }>(error) &&
|
||||
error.response?.status === 401
|
||||
) {
|
||||
const mistralError = error.response.data.error;
|
||||
|
||||
if (mistralError) throw new Error(mistralError.message);
|
||||
}
|
||||
|
||||
throw err;
|
||||
throw normalizeEngineError(error, 'mistral', this.config.model);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import axios, { AxiosInstance } from 'axios';
|
||||
import { OpenAI } from 'openai';
|
||||
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
||||
import { removeContentTags } from '../utils/removeContentTags';
|
||||
import { AiEngine, AiEngineConfig } from './Engine';
|
||||
|
||||
@@ -39,9 +40,8 @@ export class MLXEngine implements AiEngine {
|
||||
const message = choices[0].message;
|
||||
let content = message?.content;
|
||||
return removeContentTags(content, 'think');
|
||||
} catch (err: any) {
|
||||
const message = err.response?.data?.error ?? err.message;
|
||||
throw new Error(`MLX provider error: ${message}`);
|
||||
} catch (error) {
|
||||
throw normalizeEngineError(error, 'mlx', this.config.model);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import axios, { AxiosInstance } from 'axios';
|
||||
import { OpenAI } from 'openai';
|
||||
import { ModelNotFoundError } from '../utils/errors';
|
||||
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
||||
import { removeContentTags } from '../utils/removeContentTags';
|
||||
import { AiEngine, AiEngineConfig } from './Engine';
|
||||
|
||||
@@ -45,23 +45,8 @@ export class OllamaEngine implements AiEngine {
|
||||
const { message } = response.data;
|
||||
let content = message?.content;
|
||||
return removeContentTags(content, 'think');
|
||||
} catch (err: any) {
|
||||
const message = err.response?.data?.error ?? err.message;
|
||||
|
||||
// Check for model not found errors
|
||||
if (message?.toLowerCase().includes('model') &&
|
||||
(message?.toLowerCase().includes('not found') ||
|
||||
message?.toLowerCase().includes('does not exist') ||
|
||||
message?.toLowerCase().includes('pull'))) {
|
||||
throw new ModelNotFoundError(this.config.model, 'ollama', 404);
|
||||
}
|
||||
|
||||
// Check for 404 status
|
||||
if (err.response?.status === 404) {
|
||||
throw new ModelNotFoundError(this.config.model, 'ollama', 404);
|
||||
}
|
||||
|
||||
throw new Error(`Ollama provider error: ${message}`);
|
||||
} catch (error) {
|
||||
throw normalizeEngineError(error, 'ollama', this.config.model);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import axios from 'axios';
|
||||
import { OpenAI } from 'openai';
|
||||
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
|
||||
import { parseCustomHeaders } from '../utils/engine';
|
||||
import { ModelNotFoundError } from '../utils/errors';
|
||||
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
||||
import { removeContentTags } from '../utils/removeContentTags';
|
||||
import { tokenCount } from '../utils/tokenCount';
|
||||
import { AiEngine, AiEngineConfig } from './Engine';
|
||||
@@ -62,36 +61,7 @@ export class OpenAiEngine implements AiEngine {
|
||||
let content = message?.content;
|
||||
return removeContentTags(content, 'think');
|
||||
} catch (error) {
|
||||
const err = error as Error;
|
||||
|
||||
// Check for model not found errors
|
||||
if (err.message?.toLowerCase().includes('model') &&
|
||||
(err.message?.toLowerCase().includes('not found') ||
|
||||
err.message?.toLowerCase().includes('does not exist') ||
|
||||
err.message?.toLowerCase().includes('invalid'))) {
|
||||
throw new ModelNotFoundError(this.config.model, 'openai', 404);
|
||||
}
|
||||
|
||||
// Check for 404 errors from API
|
||||
if ('status' in (error as any) && (error as any).status === 404) {
|
||||
throw new ModelNotFoundError(this.config.model, 'openai', 404);
|
||||
}
|
||||
|
||||
if (
|
||||
axios.isAxiosError<{ error?: { message: string } }>(error) &&
|
||||
error.response?.status === 401
|
||||
) {
|
||||
const openAiError = error.response.data.error;
|
||||
|
||||
if (openAiError) throw new Error(openAiError.message);
|
||||
}
|
||||
|
||||
// Check axios 404 errors
|
||||
if (axios.isAxiosError(error) && error.response?.status === 404) {
|
||||
throw new ModelNotFoundError(this.config.model, 'openai', 404);
|
||||
}
|
||||
|
||||
throw err;
|
||||
throw normalizeEngineError(error, 'openai', this.config.model);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import OpenAI from 'openai';
|
||||
import { AiEngine, AiEngineConfig } from './Engine';
|
||||
import axios, { AxiosInstance } from 'axios';
|
||||
import { normalizeEngineError } from '../utils/engineErrorHandler';
|
||||
import { removeContentTags } from '../utils/removeContentTags';
|
||||
import { AiEngine, AiEngineConfig } from './Engine';
|
||||
|
||||
interface OpenRouterConfig extends AiEngineConfig {}
|
||||
|
||||
@@ -33,17 +34,7 @@ export class OpenRouterEngine implements AiEngine {
|
||||
let content = message?.content;
|
||||
return removeContentTags(content, 'think');
|
||||
} catch (error) {
|
||||
const err = error as Error;
|
||||
if (
|
||||
axios.isAxiosError<{ error?: { message: string } }>(error) &&
|
||||
error.response?.status === 401
|
||||
) {
|
||||
const openRouterError = error.response.data.error;
|
||||
|
||||
if (openRouterError) throw new Error(openRouterError.message);
|
||||
}
|
||||
|
||||
throw err;
|
||||
throw normalizeEngineError(error, 'openrouter', this.config.model);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
205
src/utils/engineErrorHandler.ts
Normal file
205
src/utils/engineErrorHandler.ts
Normal file
@@ -0,0 +1,205 @@
|
||||
import axios from 'axios';
|
||||
import {
|
||||
AuthenticationError,
|
||||
InsufficientCreditsError,
|
||||
ModelNotFoundError,
|
||||
RateLimitError,
|
||||
ServiceUnavailableError
|
||||
} from './errors';
|
||||
|
||||
/**
|
||||
* Extracts HTTP status code from various error types
|
||||
*/
|
||||
function getStatusCode(error: unknown): number | null {
|
||||
// Direct status property (common in API SDKs)
|
||||
if (typeof (error as any)?.status === 'number') {
|
||||
return (error as any).status;
|
||||
}
|
||||
|
||||
// Axios-style errors
|
||||
if (axios.isAxiosError(error)) {
|
||||
return error.response?.status ?? null;
|
||||
}
|
||||
|
||||
// Response object with status
|
||||
if (typeof (error as any)?.response?.status === 'number') {
|
||||
return (error as any).response.status;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts retry-after value from error headers (for rate limiting)
|
||||
*/
|
||||
function getRetryAfter(error: unknown): number | undefined {
|
||||
const headers = (error as any)?.response?.headers;
|
||||
if (headers) {
|
||||
const retryAfter = headers['retry-after'] || headers['Retry-After'];
|
||||
if (retryAfter) {
|
||||
const seconds = parseInt(retryAfter, 10);
|
||||
if (!isNaN(seconds)) {
|
||||
return seconds;
|
||||
}
|
||||
}
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts the error message from various error structures
|
||||
*/
|
||||
function extractErrorMessage(error: unknown): string {
|
||||
if (error instanceof Error) {
|
||||
return error.message;
|
||||
}
|
||||
|
||||
// API error response structures
|
||||
const apiError = (error as any)?.response?.data?.error;
|
||||
if (apiError) {
|
||||
if (typeof apiError === 'string') {
|
||||
return apiError;
|
||||
}
|
||||
if (apiError.message) {
|
||||
return apiError.message;
|
||||
}
|
||||
}
|
||||
|
||||
// Direct error data
|
||||
const errorData = (error as any)?.error;
|
||||
if (errorData) {
|
||||
if (typeof errorData === 'string') {
|
||||
return errorData;
|
||||
}
|
||||
if (errorData.message) {
|
||||
return errorData.message;
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback
|
||||
if (typeof error === 'string') {
|
||||
return error;
|
||||
}
|
||||
|
||||
return 'An unknown error occurred';
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the error message indicates a model not found error
|
||||
*/
|
||||
function isModelNotFoundMessage(message: string): boolean {
|
||||
const lowerMessage = message.toLowerCase();
|
||||
return (
|
||||
(lowerMessage.includes('model') &&
|
||||
(lowerMessage.includes('not found') ||
|
||||
lowerMessage.includes('does not exist') ||
|
||||
lowerMessage.includes('invalid') ||
|
||||
lowerMessage.includes('pull'))) ||
|
||||
lowerMessage.includes('does_not_exist')
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the error message indicates insufficient credits
|
||||
*/
|
||||
function isInsufficientCreditsMessage(message: string): boolean {
|
||||
const lowerMessage = message.toLowerCase();
|
||||
return (
|
||||
lowerMessage.includes('insufficient') ||
|
||||
lowerMessage.includes('credit') ||
|
||||
lowerMessage.includes('quota') ||
|
||||
lowerMessage.includes('balance too low') ||
|
||||
lowerMessage.includes('billing') ||
|
||||
lowerMessage.includes('payment required') ||
|
||||
lowerMessage.includes('exceeded')
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalizes raw API errors into typed error classes.
|
||||
* This provides consistent error handling across all engine implementations.
|
||||
*
|
||||
* @param error - The raw error from the API call
|
||||
* @param provider - The AI provider name (e.g., 'openai', 'anthropic')
|
||||
* @param model - The model being used
|
||||
* @returns A typed Error instance
|
||||
*/
|
||||
export function normalizeEngineError(
|
||||
error: unknown,
|
||||
provider: string,
|
||||
model: string
|
||||
): Error {
|
||||
// If it's already one of our custom errors, return as-is
|
||||
if (
|
||||
error instanceof ModelNotFoundError ||
|
||||
error instanceof AuthenticationError ||
|
||||
error instanceof InsufficientCreditsError ||
|
||||
error instanceof RateLimitError ||
|
||||
error instanceof ServiceUnavailableError
|
||||
) {
|
||||
return error;
|
||||
}
|
||||
|
||||
const statusCode = getStatusCode(error);
|
||||
const message = extractErrorMessage(error);
|
||||
|
||||
// Handle based on HTTP status codes
|
||||
switch (statusCode) {
|
||||
case 401:
|
||||
return new AuthenticationError(provider, message);
|
||||
|
||||
case 402:
|
||||
return new InsufficientCreditsError(provider, message);
|
||||
|
||||
case 404:
|
||||
// Could be model not found or endpoint not found
|
||||
if (isModelNotFoundMessage(message)) {
|
||||
return new ModelNotFoundError(model, provider, 404);
|
||||
}
|
||||
// Return generic error for other 404s
|
||||
return error instanceof Error ? error : new Error(message);
|
||||
|
||||
case 429:
|
||||
const retryAfter = getRetryAfter(error);
|
||||
return new RateLimitError(provider, retryAfter, message);
|
||||
|
||||
case 500:
|
||||
case 502:
|
||||
case 503:
|
||||
case 504:
|
||||
return new ServiceUnavailableError(provider, statusCode, message);
|
||||
}
|
||||
|
||||
// Handle based on error message content
|
||||
if (isModelNotFoundMessage(message)) {
|
||||
return new ModelNotFoundError(model, provider, 404);
|
||||
}
|
||||
|
||||
if (isInsufficientCreditsMessage(message)) {
|
||||
return new InsufficientCreditsError(provider, message);
|
||||
}
|
||||
|
||||
// Check for rate limit patterns in message
|
||||
const lowerMessage = message.toLowerCase();
|
||||
if (
|
||||
lowerMessage.includes('rate limit') ||
|
||||
lowerMessage.includes('rate_limit') ||
|
||||
lowerMessage.includes('too many requests')
|
||||
) {
|
||||
return new RateLimitError(provider, undefined, message);
|
||||
}
|
||||
|
||||
// Check for auth patterns in message
|
||||
if (
|
||||
lowerMessage.includes('unauthorized') ||
|
||||
lowerMessage.includes('api key') ||
|
||||
lowerMessage.includes('apikey') ||
|
||||
lowerMessage.includes('authentication') ||
|
||||
lowerMessage.includes('invalid_api_key')
|
||||
) {
|
||||
return new AuthenticationError(provider, message);
|
||||
}
|
||||
|
||||
// Return original error or wrap in Error if needed
|
||||
return error instanceof Error ? error : new Error(message);
|
||||
}
|
||||
@@ -1,5 +1,71 @@
|
||||
import chalk from 'chalk';
|
||||
import { MODEL_LIST, OCO_AI_PROVIDER_ENUM } from '../commands/config';
|
||||
|
||||
// Provider billing/help URLs for common errors
|
||||
export const PROVIDER_BILLING_URLS: Record<string, string | null> = {
|
||||
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]: 'https://console.anthropic.com/settings/billing',
|
||||
[OCO_AI_PROVIDER_ENUM.OPENAI]: 'https://platform.openai.com/settings/organization/billing',
|
||||
[OCO_AI_PROVIDER_ENUM.GEMINI]: 'https://aistudio.google.com/app/plan',
|
||||
[OCO_AI_PROVIDER_ENUM.GROQ]: 'https://console.groq.com/settings/billing',
|
||||
[OCO_AI_PROVIDER_ENUM.MISTRAL]: 'https://console.mistral.ai/billing/',
|
||||
[OCO_AI_PROVIDER_ENUM.DEEPSEEK]: 'https://platform.deepseek.com/usage',
|
||||
[OCO_AI_PROVIDER_ENUM.OPENROUTER]: 'https://openrouter.ai/credits',
|
||||
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'https://aimlapi.com/app/billing',
|
||||
[OCO_AI_PROVIDER_ENUM.AZURE]: 'https://portal.azure.com/#view/Microsoft_Azure_CostManagement',
|
||||
[OCO_AI_PROVIDER_ENUM.OLLAMA]: null,
|
||||
[OCO_AI_PROVIDER_ENUM.MLX]: null,
|
||||
[OCO_AI_PROVIDER_ENUM.FLOWISE]: null,
|
||||
[OCO_AI_PROVIDER_ENUM.TEST]: null
|
||||
};
|
||||
|
||||
// Error type for insufficient credits/quota
|
||||
export class InsufficientCreditsError extends Error {
|
||||
public readonly provider: string;
|
||||
|
||||
constructor(provider: string, message?: string) {
|
||||
super(message || `Insufficient credits or quota for provider '${provider}'`);
|
||||
this.name = 'InsufficientCreditsError';
|
||||
this.provider = provider;
|
||||
}
|
||||
}
|
||||
|
||||
// Error type for rate limiting (429 errors)
|
||||
export class RateLimitError extends Error {
|
||||
public readonly provider: string;
|
||||
public readonly retryAfter?: number;
|
||||
|
||||
constructor(provider: string, retryAfter?: number, message?: string) {
|
||||
super(message || `Rate limit exceeded for provider '${provider}'`);
|
||||
this.name = 'RateLimitError';
|
||||
this.provider = provider;
|
||||
this.retryAfter = retryAfter;
|
||||
}
|
||||
}
|
||||
|
||||
// Error type for service unavailable (5xx errors)
|
||||
export class ServiceUnavailableError extends Error {
|
||||
public readonly provider: string;
|
||||
public readonly statusCode: number;
|
||||
|
||||
constructor(provider: string, statusCode: number = 503, message?: string) {
|
||||
super(message || `Service unavailable for provider '${provider}'`);
|
||||
this.name = 'ServiceUnavailableError';
|
||||
this.provider = provider;
|
||||
this.statusCode = statusCode;
|
||||
}
|
||||
}
|
||||
|
||||
// Error type for authentication failures
|
||||
export class AuthenticationError extends Error {
|
||||
public readonly provider: string;
|
||||
|
||||
constructor(provider: string, message?: string) {
|
||||
super(message || `Authentication failed for provider '${provider}'`);
|
||||
this.name = 'AuthenticationError';
|
||||
this.provider = provider;
|
||||
}
|
||||
}
|
||||
|
||||
export class ModelNotFoundError extends Error {
|
||||
public readonly modelName: string;
|
||||
public readonly provider: string;
|
||||
@@ -164,3 +230,242 @@ export function formatErrorWithRecovery(
|
||||
|
||||
return message;
|
||||
}
|
||||
|
||||
// Detect insufficient credits/quota errors from various providers
|
||||
export function isInsufficientCreditsError(error: unknown): boolean {
|
||||
if (error instanceof InsufficientCreditsError) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (error instanceof Error) {
|
||||
const message = error.message.toLowerCase();
|
||||
|
||||
// Common patterns for insufficient credits/quota
|
||||
if (
|
||||
message.includes('insufficient') ||
|
||||
message.includes('credit') ||
|
||||
message.includes('quota') ||
|
||||
message.includes('balance') ||
|
||||
message.includes('billing') ||
|
||||
message.includes('payment') ||
|
||||
message.includes('exceeded') ||
|
||||
message.includes('limit reached') ||
|
||||
message.includes('no remaining')
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check for 402 Payment Required status
|
||||
if ('status' in (error as any) && (error as any).status === 402) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if ('response' in (error as any)) {
|
||||
const response = (error as any).response;
|
||||
if (response?.status === 402) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// Detect rate limit errors (429)
|
||||
export function isRateLimitError(error: unknown): boolean {
|
||||
if (error instanceof RateLimitError) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (error instanceof Error) {
|
||||
const message = error.message.toLowerCase();
|
||||
|
||||
// Common patterns for rate limiting
|
||||
if (
|
||||
message.includes('rate limit') ||
|
||||
message.includes('rate_limit') ||
|
||||
message.includes('too many requests') ||
|
||||
message.includes('throttle')
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check for 429 status
|
||||
if ('status' in (error as any) && (error as any).status === 429) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if ('response' in (error as any)) {
|
||||
const response = (error as any).response;
|
||||
if (response?.status === 429) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// Detect service unavailable errors (5xx)
|
||||
export function isServiceUnavailableError(error: unknown): boolean {
|
||||
if (error instanceof ServiceUnavailableError) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (error instanceof Error) {
|
||||
const message = error.message.toLowerCase();
|
||||
|
||||
// Common patterns for service unavailable
|
||||
if (
|
||||
message.includes('service unavailable') ||
|
||||
message.includes('server error') ||
|
||||
message.includes('internal error') ||
|
||||
message.includes('temporarily unavailable') ||
|
||||
message.includes('overloaded')
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check for 5xx status
|
||||
const status = (error as any).status || (error as any).response?.status;
|
||||
if (status && status >= 500 && status < 600) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// User-friendly formatted error structure
|
||||
export interface FormattedError {
|
||||
title: string;
|
||||
message: string;
|
||||
helpUrl: string | null;
|
||||
suggestion: string | null;
|
||||
}
|
||||
|
||||
// Format an error into a user-friendly structure
|
||||
export function formatUserFriendlyError(error: unknown, provider: string): FormattedError {
|
||||
const billingUrl = PROVIDER_BILLING_URLS[provider] || null;
|
||||
|
||||
// Handle our custom error types first
|
||||
if (error instanceof InsufficientCreditsError) {
|
||||
return {
|
||||
title: 'Insufficient Credits',
|
||||
message: `Your ${provider} account has insufficient credits or quota.`,
|
||||
helpUrl: billingUrl,
|
||||
suggestion: 'Add credits to your account to continue using the service.'
|
||||
};
|
||||
}
|
||||
|
||||
if (error instanceof RateLimitError) {
|
||||
const retryMsg = error.retryAfter
|
||||
? `Please wait ${error.retryAfter} seconds before retrying.`
|
||||
: 'Please wait a moment before retrying.';
|
||||
return {
|
||||
title: 'Rate Limit Exceeded',
|
||||
message: `You've made too many requests to ${provider}.`,
|
||||
helpUrl: billingUrl,
|
||||
suggestion: retryMsg
|
||||
};
|
||||
}
|
||||
|
||||
if (error instanceof ServiceUnavailableError) {
|
||||
return {
|
||||
title: 'Service Unavailable',
|
||||
message: `The ${provider} service is temporarily unavailable.`,
|
||||
helpUrl: null,
|
||||
suggestion: 'Please try again in a few moments.'
|
||||
};
|
||||
}
|
||||
|
||||
if (error instanceof AuthenticationError) {
|
||||
return {
|
||||
title: 'Authentication Failed',
|
||||
message: `Your ${provider} API key is invalid or expired.`,
|
||||
helpUrl: billingUrl,
|
||||
suggestion: 'Run `oco setup` to configure a valid API key.'
|
||||
};
|
||||
}
|
||||
|
||||
if (error instanceof ModelNotFoundError) {
|
||||
return {
|
||||
title: 'Model Not Found',
|
||||
message: `The model '${error.modelName}' is not available for ${provider}.`,
|
||||
helpUrl: null,
|
||||
suggestion: 'Run `oco setup` to select a valid model.'
|
||||
};
|
||||
}
|
||||
|
||||
// Detect error type from raw errors
|
||||
if (isInsufficientCreditsError(error)) {
|
||||
return {
|
||||
title: 'Insufficient Credits',
|
||||
message: `Your ${provider} account has insufficient credits or quota.`,
|
||||
helpUrl: billingUrl,
|
||||
suggestion: 'Add credits to your account to continue using the service.'
|
||||
};
|
||||
}
|
||||
|
||||
if (isRateLimitError(error)) {
|
||||
return {
|
||||
title: 'Rate Limit Exceeded',
|
||||
message: `You've made too many requests to ${provider}.`,
|
||||
helpUrl: billingUrl,
|
||||
suggestion: 'Please wait a moment before retrying.'
|
||||
};
|
||||
}
|
||||
|
||||
if (isServiceUnavailableError(error)) {
|
||||
return {
|
||||
title: 'Service Unavailable',
|
||||
message: `The ${provider} service is temporarily unavailable.`,
|
||||
helpUrl: null,
|
||||
suggestion: 'Please try again in a few moments.'
|
||||
};
|
||||
}
|
||||
|
||||
if (isApiKeyError(error)) {
|
||||
return {
|
||||
title: 'Authentication Failed',
|
||||
message: `Your ${provider} API key is invalid or expired.`,
|
||||
helpUrl: billingUrl,
|
||||
suggestion: 'Run `oco setup` to configure a valid API key.'
|
||||
};
|
||||
}
|
||||
|
||||
if (isModelNotFoundError(error)) {
|
||||
const model = (error as any).modelName || (error as any).model || 'unknown';
|
||||
return {
|
||||
title: 'Model Not Found',
|
||||
message: `The model '${model}' is not available for ${provider}.`,
|
||||
helpUrl: null,
|
||||
suggestion: 'Run `oco setup` to select a valid model.'
|
||||
};
|
||||
}
|
||||
|
||||
// Default: generic error
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
return {
|
||||
title: 'Error',
|
||||
message: errorMessage,
|
||||
helpUrl: null,
|
||||
suggestion: 'Run `oco setup` to reconfigure or check your settings.'
|
||||
};
|
||||
}
|
||||
|
||||
// Print a formatted error as a chalk-styled string
|
||||
export function printFormattedError(formatted: FormattedError): string {
|
||||
let output = `\n${chalk.red('✖')} ${chalk.bold.red(formatted.title)}\n`;
|
||||
output += ` ${formatted.message}\n`;
|
||||
|
||||
if (formatted.helpUrl) {
|
||||
output += `\n ${chalk.cyan('Help:')} ${chalk.underline(formatted.helpUrl)}\n`;
|
||||
}
|
||||
|
||||
if (formatted.suggestion) {
|
||||
output += `\n ${chalk.yellow('Suggestion:')} ${formatted.suggestion}\n`;
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
@@ -87,15 +87,137 @@ export async function fetchOllamaModels(
|
||||
}
|
||||
}
|
||||
|
||||
export async function fetchAnthropicModels(apiKey: string): Promise<string[]> {
|
||||
try {
|
||||
const response = await fetch('https://api.anthropic.com/v1/models', {
|
||||
headers: {
|
||||
'x-api-key': apiKey,
|
||||
'anthropic-version': '2023-06-01'
|
||||
}
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
return MODEL_LIST.anthropic;
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const models = data.data
|
||||
?.map((m: { id: string }) => m.id)
|
||||
.filter((id: string) => id.startsWith('claude-'))
|
||||
.sort();
|
||||
|
||||
return models && models.length > 0 ? models : MODEL_LIST.anthropic;
|
||||
} catch {
|
||||
return MODEL_LIST.anthropic;
|
||||
}
|
||||
}
|
||||
|
||||
export async function fetchMistralModels(apiKey: string): Promise<string[]> {
|
||||
try {
|
||||
const response = await fetch('https://api.mistral.ai/v1/models', {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`
|
||||
}
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
return MODEL_LIST.mistral;
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const models = data.data
|
||||
?.map((m: { id: string }) => m.id)
|
||||
.sort();
|
||||
|
||||
return models && models.length > 0 ? models : MODEL_LIST.mistral;
|
||||
} catch {
|
||||
return MODEL_LIST.mistral;
|
||||
}
|
||||
}
|
||||
|
||||
export async function fetchGroqModels(apiKey: string): Promise<string[]> {
|
||||
try {
|
||||
const response = await fetch('https://api.groq.com/openai/v1/models', {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`
|
||||
}
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
return MODEL_LIST.groq;
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const models = data.data
|
||||
?.map((m: { id: string }) => m.id)
|
||||
.sort();
|
||||
|
||||
return models && models.length > 0 ? models : MODEL_LIST.groq;
|
||||
} catch {
|
||||
return MODEL_LIST.groq;
|
||||
}
|
||||
}
|
||||
|
||||
export async function fetchOpenRouterModels(apiKey: string): Promise<string[]> {
|
||||
try {
|
||||
const response = await fetch('https://openrouter.ai/api/v1/models', {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`
|
||||
}
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
return MODEL_LIST.openrouter;
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
// Filter to text-capable models only (exclude image/audio models)
|
||||
const models = data.data
|
||||
?.filter((m: { id: string; context_length?: number }) =>
|
||||
m.context_length && m.context_length > 0
|
||||
)
|
||||
.map((m: { id: string }) => m.id)
|
||||
.sort();
|
||||
|
||||
return models && models.length > 0 ? models : MODEL_LIST.openrouter;
|
||||
} catch {
|
||||
return MODEL_LIST.openrouter;
|
||||
}
|
||||
}
|
||||
|
||||
export async function fetchDeepSeekModels(apiKey: string): Promise<string[]> {
|
||||
try {
|
||||
const response = await fetch('https://api.deepseek.com/v1/models', {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`
|
||||
}
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
return MODEL_LIST.deepseek;
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const models = data.data
|
||||
?.map((m: { id: string }) => m.id)
|
||||
.sort();
|
||||
|
||||
return models && models.length > 0 ? models : MODEL_LIST.deepseek;
|
||||
} catch {
|
||||
return MODEL_LIST.deepseek;
|
||||
}
|
||||
}
|
||||
|
||||
export async function fetchModelsForProvider(
|
||||
provider: string,
|
||||
apiKey?: string,
|
||||
baseUrl?: string
|
||||
baseUrl?: string,
|
||||
forceRefresh: boolean = false
|
||||
): Promise<string[]> {
|
||||
const cache = readCache();
|
||||
|
||||
// Return cached models if valid
|
||||
if (isCacheValid(cache) && cache!.models[provider]) {
|
||||
// Return cached models if valid (unless force refresh)
|
||||
if (!forceRefresh && isCacheValid(cache) && cache!.models[provider]) {
|
||||
return cache!.models[provider];
|
||||
}
|
||||
|
||||
@@ -115,23 +237,40 @@ export async function fetchModelsForProvider(
|
||||
break;
|
||||
|
||||
case OCO_AI_PROVIDER_ENUM.ANTHROPIC:
|
||||
models = MODEL_LIST.anthropic;
|
||||
if (apiKey) {
|
||||
models = await fetchAnthropicModels(apiKey);
|
||||
} else {
|
||||
models = MODEL_LIST.anthropic;
|
||||
}
|
||||
break;
|
||||
|
||||
case OCO_AI_PROVIDER_ENUM.GEMINI:
|
||||
// Google's API doesn't easily list generative models, use hardcoded list
|
||||
models = MODEL_LIST.gemini;
|
||||
break;
|
||||
|
||||
case OCO_AI_PROVIDER_ENUM.GROQ:
|
||||
models = MODEL_LIST.groq;
|
||||
if (apiKey) {
|
||||
models = await fetchGroqModels(apiKey);
|
||||
} else {
|
||||
models = MODEL_LIST.groq;
|
||||
}
|
||||
break;
|
||||
|
||||
case OCO_AI_PROVIDER_ENUM.MISTRAL:
|
||||
models = MODEL_LIST.mistral;
|
||||
if (apiKey) {
|
||||
models = await fetchMistralModels(apiKey);
|
||||
} else {
|
||||
models = MODEL_LIST.mistral;
|
||||
}
|
||||
break;
|
||||
|
||||
case OCO_AI_PROVIDER_ENUM.DEEPSEEK:
|
||||
models = MODEL_LIST.deepseek;
|
||||
if (apiKey) {
|
||||
models = await fetchDeepSeekModels(apiKey);
|
||||
} else {
|
||||
models = MODEL_LIST.deepseek;
|
||||
}
|
||||
break;
|
||||
|
||||
case OCO_AI_PROVIDER_ENUM.AIMLAPI:
|
||||
@@ -139,7 +278,11 @@ export async function fetchModelsForProvider(
|
||||
break;
|
||||
|
||||
case OCO_AI_PROVIDER_ENUM.OPENROUTER:
|
||||
models = MODEL_LIST.openrouter;
|
||||
if (apiKey) {
|
||||
models = await fetchOpenRouterModels(apiKey);
|
||||
} else {
|
||||
models = MODEL_LIST.openrouter;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
@@ -168,3 +311,22 @@ export function clearModelCache(): void {
|
||||
// Silently fail
|
||||
}
|
||||
}
|
||||
|
||||
export function getCacheInfo(): { timestamp: number | null; providers: string[] } {
|
||||
const cache = readCache();
|
||||
if (!cache) {
|
||||
return { timestamp: null, providers: [] };
|
||||
}
|
||||
return {
|
||||
timestamp: cache.timestamp,
|
||||
providers: Object.keys(cache.models || {})
|
||||
};
|
||||
}
|
||||
|
||||
export function getCachedModels(provider: string): string[] | null {
|
||||
const cache = readCache();
|
||||
if (!cache || !cache.models[provider]) {
|
||||
return null;
|
||||
}
|
||||
return cache.models[provider];
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user