add oco models command

This commit is contained in:
di-sukharev
2026-01-17 23:46:04 +03:00
parent 5b241ed2d0
commit 6ed70d0382
9 changed files with 654 additions and 26 deletions

View File

@@ -201,6 +201,28 @@ or for as a cheaper option:
oco config set OCO_MODEL=gpt-3.5-turbo
```
### Model Management
OpenCommit automatically fetches available models from your provider when you run `oco setup`. Models are cached for 7 days to reduce API calls.
To see available models for your current provider:
```sh
oco models
```
To refresh the model list (e.g., after new models are released):
```sh
oco models --refresh
```
To see models for a specific provider:
```sh
oco models --provider anthropic
```
### Switch to other LLM providers with a custom URL
By default OpenCommit uses [OpenAI](https://openai.com).

View File

@@ -57222,7 +57222,7 @@ var {
// src/utils/errors.ts
var PROVIDER_BILLING_URLS = {
["anthropic" /* ANTHROPIC */]: "https://console.anthropic.com/settings/plans",
["anthropic" /* ANTHROPIC */]: "https://console.anthropic.com/settings/billing",
["openai" /* OPENAI */]: "https://platform.openai.com/settings/organization/billing",
["gemini" /* GEMINI */]: "https://aistudio.google.com/app/plan",
["groq" /* GROQ */]: "https://console.groq.com/settings/billing",
@@ -68713,9 +68713,97 @@ async function fetchOllamaModels(baseUrl = "http://localhost:11434") {
return [];
}
}
async function fetchModelsForProvider(provider, apiKey, baseUrl) {
async function fetchAnthropicModels(apiKey) {
try {
const response = await fetch("https://api.anthropic.com/v1/models", {
headers: {
"x-api-key": apiKey,
"anthropic-version": "2023-06-01"
}
});
if (!response.ok) {
return MODEL_LIST.anthropic;
}
const data = await response.json();
const models = data.data?.map((m5) => m5.id).filter((id) => id.startsWith("claude-")).sort();
return models && models.length > 0 ? models : MODEL_LIST.anthropic;
} catch {
return MODEL_LIST.anthropic;
}
}
async function fetchMistralModels(apiKey) {
try {
const response = await fetch("https://api.mistral.ai/v1/models", {
headers: {
Authorization: `Bearer ${apiKey}`
}
});
if (!response.ok) {
return MODEL_LIST.mistral;
}
const data = await response.json();
const models = data.data?.map((m5) => m5.id).sort();
return models && models.length > 0 ? models : MODEL_LIST.mistral;
} catch {
return MODEL_LIST.mistral;
}
}
async function fetchGroqModels(apiKey) {
try {
const response = await fetch("https://api.groq.com/openai/v1/models", {
headers: {
Authorization: `Bearer ${apiKey}`
}
});
if (!response.ok) {
return MODEL_LIST.groq;
}
const data = await response.json();
const models = data.data?.map((m5) => m5.id).sort();
return models && models.length > 0 ? models : MODEL_LIST.groq;
} catch {
return MODEL_LIST.groq;
}
}
async function fetchOpenRouterModels(apiKey) {
try {
const response = await fetch("https://openrouter.ai/api/v1/models", {
headers: {
Authorization: `Bearer ${apiKey}`
}
});
if (!response.ok) {
return MODEL_LIST.openrouter;
}
const data = await response.json();
const models = data.data?.filter(
(m5) => m5.context_length && m5.context_length > 0
).map((m5) => m5.id).sort();
return models && models.length > 0 ? models : MODEL_LIST.openrouter;
} catch {
return MODEL_LIST.openrouter;
}
}
async function fetchDeepSeekModels(apiKey) {
try {
const response = await fetch("https://api.deepseek.com/v1/models", {
headers: {
Authorization: `Bearer ${apiKey}`
}
});
if (!response.ok) {
return MODEL_LIST.deepseek;
}
const data = await response.json();
const models = data.data?.map((m5) => m5.id).sort();
return models && models.length > 0 ? models : MODEL_LIST.deepseek;
} catch {
return MODEL_LIST.deepseek;
}
}
async function fetchModelsForProvider(provider, apiKey, baseUrl, forceRefresh = false) {
const cache = readCache();
if (isCacheValid(cache) && cache.models[provider]) {
if (!forceRefresh && isCacheValid(cache) && cache.models[provider]) {
return cache.models[provider];
}
let models = [];
@@ -68731,25 +68819,45 @@ async function fetchModelsForProvider(provider, apiKey, baseUrl) {
models = await fetchOllamaModels(baseUrl);
break;
case "anthropic" /* ANTHROPIC */:
models = MODEL_LIST.anthropic;
if (apiKey) {
models = await fetchAnthropicModels(apiKey);
} else {
models = MODEL_LIST.anthropic;
}
break;
case "gemini" /* GEMINI */:
models = MODEL_LIST.gemini;
break;
case "groq" /* GROQ */:
models = MODEL_LIST.groq;
if (apiKey) {
models = await fetchGroqModels(apiKey);
} else {
models = MODEL_LIST.groq;
}
break;
case "mistral" /* MISTRAL */:
models = MODEL_LIST.mistral;
if (apiKey) {
models = await fetchMistralModels(apiKey);
} else {
models = MODEL_LIST.mistral;
}
break;
case "deepseek" /* DEEPSEEK */:
models = MODEL_LIST.deepseek;
if (apiKey) {
models = await fetchDeepSeekModels(apiKey);
} else {
models = MODEL_LIST.deepseek;
}
break;
case "aimlapi" /* AIMLAPI */:
models = MODEL_LIST.aimlapi;
break;
case "openrouter" /* OPENROUTER */:
models = MODEL_LIST.openrouter;
if (apiKey) {
models = await fetchOpenRouterModels(apiKey);
} else {
models = MODEL_LIST.openrouter;
}
break;
default:
models = MODEL_LIST.openai;
@@ -68759,6 +68867,31 @@ async function fetchModelsForProvider(provider, apiKey, baseUrl) {
writeCache(existingCache);
return models;
}
function clearModelCache() {
try {
if ((0, import_fs5.existsSync)(MODEL_CACHE_PATH)) {
(0, import_fs5.writeFileSync)(MODEL_CACHE_PATH, "{}", "utf8");
}
} catch {
}
}
function getCacheInfo() {
const cache = readCache();
if (!cache) {
return { timestamp: null, providers: [] };
}
return {
timestamp: cache.timestamp,
providers: Object.keys(cache.models || {})
};
}
function getCachedModels(provider) {
const cache = readCache();
if (!cache || !cache.models[provider]) {
return null;
}
return cache.models[provider];
}
// src/commands/setup.ts
var PROVIDER_DISPLAY_NAMES = {
@@ -68837,17 +68970,42 @@ ${source_default.dim(` Get your key at: ${url2}`)}`;
}
});
}
function formatCacheAge(timestamp) {
if (!timestamp) return "";
const ageMs = Date.now() - timestamp;
const days = Math.floor(ageMs / (1e3 * 60 * 60 * 24));
const hours = Math.floor(ageMs / (1e3 * 60 * 60));
if (days > 0) {
return `${days} day${days === 1 ? "" : "s"} ago`;
} else if (hours > 0) {
return `${hours} hour${hours === 1 ? "" : "s"} ago`;
}
return "just now";
}
async function selectModel(provider, apiKey) {
const providerDisplayName = PROVIDER_DISPLAY_NAMES[provider]?.split(" (")[0] || provider;
const loadingSpinner = le();
loadingSpinner.start("Fetching available models...");
loadingSpinner.start(`Fetching models from ${providerDisplayName}...`);
let models = [];
let usedFallback = false;
try {
models = await fetchModelsForProvider(provider, apiKey);
} catch {
usedFallback = true;
const providerKey = provider.toLowerCase();
models = MODEL_LIST[providerKey] || [];
}
loadingSpinner.stop("Models loaded");
const cacheInfo = getCacheInfo();
const cacheAge = formatCacheAge(cacheInfo.timestamp);
if (usedFallback) {
loadingSpinner.stop(
source_default.yellow("Could not fetch models from API. Using default list.")
);
} else if (cacheAge) {
loadingSpinner.stop(`Models loaded ${source_default.dim(`(cached ${cacheAge})`)}`);
} else {
loadingSpinner.stop("Models loaded");
}
if (models.length === 0) {
if (NO_API_KEY_PROVIDERS.includes(provider)) {
return await J4({
@@ -69099,6 +69257,116 @@ var setupCommand = G3(
}
);
// src/commands/models.ts
init_dist2();
function formatCacheAge2(timestamp) {
if (!timestamp) return "never";
const ageMs = Date.now() - timestamp;
const days = Math.floor(ageMs / (1e3 * 60 * 60 * 24));
const hours = Math.floor(ageMs / (1e3 * 60 * 60));
const minutes = Math.floor(ageMs / (1e3 * 60));
if (days > 0) {
return `${days} day${days === 1 ? "" : "s"} ago`;
} else if (hours > 0) {
return `${hours} hour${hours === 1 ? "" : "s"} ago`;
} else if (minutes > 0) {
return `${minutes} minute${minutes === 1 ? "" : "s"} ago`;
}
return "just now";
}
async function listModels(provider, useCache = true) {
const config7 = getConfig();
const apiKey = config7.OCO_API_KEY;
const currentModel = config7.OCO_MODEL;
let models = [];
if (useCache) {
const cached = getCachedModels(provider);
if (cached) {
models = cached;
}
}
if (models.length === 0) {
const providerKey = provider.toLowerCase();
models = MODEL_LIST[providerKey] || [];
}
console.log(`
${source_default.bold("Available models for")} ${source_default.cyan(provider)}:
`);
if (models.length === 0) {
console.log(source_default.dim(" No models found"));
} else {
models.forEach((model) => {
const isCurrent = model === currentModel;
const prefix = isCurrent ? source_default.green("* ") : " ";
const label = isCurrent ? source_default.green(model) : model;
console.log(`${prefix}${label}`);
});
}
console.log("");
}
async function refreshModels(provider) {
const config7 = getConfig();
const apiKey = config7.OCO_API_KEY;
const loadingSpinner = le();
loadingSpinner.start(`Fetching models from ${provider}...`);
clearModelCache();
try {
const models = await fetchModelsForProvider(provider, apiKey, void 0, true);
loadingSpinner.stop(`${source_default.green("+")} Fetched ${models.length} models`);
await listModels(provider, true);
} catch (error) {
loadingSpinner.stop(source_default.red("Failed to fetch models"));
console.error(source_default.red(`Error: ${error instanceof Error ? error.message : "Unknown error"}`));
}
}
var modelsCommand = G3(
{
name: "models" /* models */,
help: {
description: "List and manage cached models for your AI provider"
},
flags: {
refresh: {
type: Boolean,
alias: "r",
description: "Clear cache and re-fetch models from the provider",
default: false
},
provider: {
type: String,
alias: "p",
description: "Specify provider (defaults to current OCO_AI_PROVIDER)"
}
}
},
async ({ flags }) => {
const config7 = getConfig();
const provider = flags.provider || config7.OCO_AI_PROVIDER || "openai" /* OPENAI */;
ae(source_default.bgCyan(" OpenCommit Models "));
const cacheInfo = getCacheInfo();
if (cacheInfo.timestamp) {
console.log(
source_default.dim(` Cache last updated: ${formatCacheAge2(cacheInfo.timestamp)}`)
);
if (cacheInfo.providers.length > 0) {
console.log(
source_default.dim(` Cached providers: ${cacheInfo.providers.join(", ")}`)
);
}
} else {
console.log(source_default.dim(" No cached models"));
}
if (flags.refresh) {
await refreshModels(provider);
} else {
await listModels(provider);
}
ce(
`Run ${source_default.cyan("oco models --refresh")} to update the model list`
);
}
);
// src/utils/checkIsLatestVersion.ts
init_dist2();
@@ -69291,7 +69559,7 @@ Z2(
{
version: package_default.version,
name: "opencommit",
commands: [configCommand, hookCommand, commitlintConfigCommand, setupCommand],
commands: [configCommand, hookCommand, commitlintConfigCommand, setupCommand, modelsCommand],
flags: {
fgm: {
type: Boolean,

View File

@@ -78200,7 +78200,7 @@ var {
// src/utils/errors.ts
var PROVIDER_BILLING_URLS = {
["anthropic" /* ANTHROPIC */]: "https://console.anthropic.com/settings/plans",
["anthropic" /* ANTHROPIC */]: "https://console.anthropic.com/settings/billing",
["openai" /* OPENAI */]: "https://platform.openai.com/settings/organization/billing",
["gemini" /* GEMINI */]: "https://aistudio.google.com/app/plan",
["groq" /* GROQ */]: "https://console.groq.com/settings/billing",

View File

@@ -14,6 +14,7 @@ import {
runSetup,
promptForMissingApiKey
} from './commands/setup';
import { modelsCommand } from './commands/models';
import { checkIsLatestVersion } from './utils/checkIsLatestVersion';
import { runMigrations } from './migrations/_run.js';
@@ -23,7 +24,7 @@ cli(
{
version: packageJSON.version,
name: 'opencommit',
commands: [configCommand, hookCommand, commitlintConfigCommand, setupCommand],
commands: [configCommand, hookCommand, commitlintConfigCommand, setupCommand, modelsCommand],
flags: {
fgm: {
type: Boolean,

View File

@@ -2,5 +2,6 @@ export enum COMMANDS {
config = 'config',
hook = 'hook',
commitlint = 'commitlint',
setup = 'setup'
setup = 'setup',
models = 'models'
}

144
src/commands/models.ts Normal file
View File

@@ -0,0 +1,144 @@
import { intro, outro, spinner } from '@clack/prompts';
import chalk from 'chalk';
import { command } from 'cleye';
import { COMMANDS } from './ENUMS';
import {
MODEL_LIST,
OCO_AI_PROVIDER_ENUM,
getConfig
} from './config';
import {
fetchModelsForProvider,
clearModelCache,
getCacheInfo,
getCachedModels
} from '../utils/modelCache';
function formatCacheAge(timestamp: number | null): string {
if (!timestamp) return 'never';
const ageMs = Date.now() - timestamp;
const days = Math.floor(ageMs / (1000 * 60 * 60 * 24));
const hours = Math.floor(ageMs / (1000 * 60 * 60));
const minutes = Math.floor(ageMs / (1000 * 60));
if (days > 0) {
return `${days} day${days === 1 ? '' : 's'} ago`;
} else if (hours > 0) {
return `${hours} hour${hours === 1 ? '' : 's'} ago`;
} else if (minutes > 0) {
return `${minutes} minute${minutes === 1 ? '' : 's'} ago`;
}
return 'just now';
}
async function listModels(provider: string, useCache: boolean = true): Promise<void> {
const config = getConfig();
const apiKey = config.OCO_API_KEY;
const currentModel = config.OCO_MODEL;
// Get cached models or fetch new ones
let models: string[] = [];
if (useCache) {
const cached = getCachedModels(provider);
if (cached) {
models = cached;
}
}
if (models.length === 0) {
// Fallback to hardcoded list
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
models = MODEL_LIST[providerKey] || [];
}
console.log(`\n${chalk.bold('Available models for')} ${chalk.cyan(provider)}:\n`);
if (models.length === 0) {
console.log(chalk.dim(' No models found'));
} else {
models.forEach((model) => {
const isCurrent = model === currentModel;
const prefix = isCurrent ? chalk.green('* ') : ' ';
const label = isCurrent ? chalk.green(model) : model;
console.log(`${prefix}${label}`);
});
}
console.log('');
}
async function refreshModels(provider: string): Promise<void> {
const config = getConfig();
const apiKey = config.OCO_API_KEY;
const loadingSpinner = spinner();
loadingSpinner.start(`Fetching models from ${provider}...`);
// Clear cache first
clearModelCache();
try {
const models = await fetchModelsForProvider(provider, apiKey, undefined, true);
loadingSpinner.stop(`${chalk.green('+')} Fetched ${models.length} models`);
// List the models
await listModels(provider, true);
} catch (error) {
loadingSpinner.stop(chalk.red('Failed to fetch models'));
console.error(chalk.red(`Error: ${error instanceof Error ? error.message : 'Unknown error'}`));
}
}
export const modelsCommand = command(
{
name: COMMANDS.models,
help: {
description: 'List and manage cached models for your AI provider'
},
flags: {
refresh: {
type: Boolean,
alias: 'r',
description: 'Clear cache and re-fetch models from the provider',
default: false
},
provider: {
type: String,
alias: 'p',
description: 'Specify provider (defaults to current OCO_AI_PROVIDER)'
}
}
},
async ({ flags }) => {
const config = getConfig();
const provider = flags.provider || config.OCO_AI_PROVIDER || OCO_AI_PROVIDER_ENUM.OPENAI;
intro(chalk.bgCyan(' OpenCommit Models '));
// Show cache info
const cacheInfo = getCacheInfo();
if (cacheInfo.timestamp) {
console.log(
chalk.dim(` Cache last updated: ${formatCacheAge(cacheInfo.timestamp)}`)
);
if (cacheInfo.providers.length > 0) {
console.log(
chalk.dim(` Cached providers: ${cacheInfo.providers.join(', ')}`)
);
}
} else {
console.log(chalk.dim(' No cached models'));
}
if (flags.refresh) {
await refreshModels(provider);
} else {
await listModels(provider);
}
outro(
`Run ${chalk.cyan('oco models --refresh')} to update the model list`
);
}
);

View File

@@ -16,7 +16,8 @@ import {
} from './config';
import {
fetchModelsForProvider,
fetchOllamaModels
fetchOllamaModels,
getCacheInfo
} from '../utils/modelCache';
const PROVIDER_DISPLAY_NAMES: Record<string, string> = {
@@ -108,24 +109,53 @@ async function getApiKey(provider: string): Promise<string | symbol> {
});
}
function formatCacheAge(timestamp: number | null): string {
if (!timestamp) return '';
const ageMs = Date.now() - timestamp;
const days = Math.floor(ageMs / (1000 * 60 * 60 * 24));
const hours = Math.floor(ageMs / (1000 * 60 * 60));
if (days > 0) {
return `${days} day${days === 1 ? '' : 's'} ago`;
} else if (hours > 0) {
return `${hours} hour${hours === 1 ? '' : 's'} ago`;
}
return 'just now';
}
async function selectModel(
provider: string,
apiKey?: string
): Promise<string | symbol> {
const providerDisplayName = PROVIDER_DISPLAY_NAMES[provider]?.split(' (')[0] || provider;
const loadingSpinner = spinner();
loadingSpinner.start('Fetching available models...');
loadingSpinner.start(`Fetching models from ${providerDisplayName}...`);
let models: string[] = [];
let usedFallback = false;
try {
models = await fetchModelsForProvider(provider, apiKey);
} catch {
// Fall back to hardcoded list
usedFallback = true;
const providerKey = provider.toLowerCase() as keyof typeof MODEL_LIST;
models = MODEL_LIST[providerKey] || [];
}
loadingSpinner.stop('Models loaded');
// Check cache info for display
const cacheInfo = getCacheInfo();
const cacheAge = formatCacheAge(cacheInfo.timestamp);
if (usedFallback) {
loadingSpinner.stop(
chalk.yellow('Could not fetch models from API. Using default list.')
);
} else if (cacheAge) {
loadingSpinner.stop(`Models loaded ${chalk.dim(`(cached ${cacheAge})`)}`);
} else {
loadingSpinner.stop('Models loaded');
}
if (models.length === 0) {
// For Ollama/MLX, prompt for manual entry

View File

@@ -3,7 +3,7 @@ import { MODEL_LIST, OCO_AI_PROVIDER_ENUM } from '../commands/config';
// Provider billing/help URLs for common errors
export const PROVIDER_BILLING_URLS: Record<string, string | null> = {
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]: 'https://console.anthropic.com/settings/plans',
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]: 'https://console.anthropic.com/settings/billing',
[OCO_AI_PROVIDER_ENUM.OPENAI]: 'https://platform.openai.com/settings/organization/billing',
[OCO_AI_PROVIDER_ENUM.GEMINI]: 'https://aistudio.google.com/app/plan',
[OCO_AI_PROVIDER_ENUM.GROQ]: 'https://console.groq.com/settings/billing',

View File

@@ -87,15 +87,137 @@ export async function fetchOllamaModels(
}
}
export async function fetchAnthropicModels(apiKey: string): Promise<string[]> {
try {
const response = await fetch('https://api.anthropic.com/v1/models', {
headers: {
'x-api-key': apiKey,
'anthropic-version': '2023-06-01'
}
});
if (!response.ok) {
return MODEL_LIST.anthropic;
}
const data = await response.json();
const models = data.data
?.map((m: { id: string }) => m.id)
.filter((id: string) => id.startsWith('claude-'))
.sort();
return models && models.length > 0 ? models : MODEL_LIST.anthropic;
} catch {
return MODEL_LIST.anthropic;
}
}
export async function fetchMistralModels(apiKey: string): Promise<string[]> {
try {
const response = await fetch('https://api.mistral.ai/v1/models', {
headers: {
Authorization: `Bearer ${apiKey}`
}
});
if (!response.ok) {
return MODEL_LIST.mistral;
}
const data = await response.json();
const models = data.data
?.map((m: { id: string }) => m.id)
.sort();
return models && models.length > 0 ? models : MODEL_LIST.mistral;
} catch {
return MODEL_LIST.mistral;
}
}
export async function fetchGroqModels(apiKey: string): Promise<string[]> {
try {
const response = await fetch('https://api.groq.com/openai/v1/models', {
headers: {
Authorization: `Bearer ${apiKey}`
}
});
if (!response.ok) {
return MODEL_LIST.groq;
}
const data = await response.json();
const models = data.data
?.map((m: { id: string }) => m.id)
.sort();
return models && models.length > 0 ? models : MODEL_LIST.groq;
} catch {
return MODEL_LIST.groq;
}
}
export async function fetchOpenRouterModels(apiKey: string): Promise<string[]> {
try {
const response = await fetch('https://openrouter.ai/api/v1/models', {
headers: {
Authorization: `Bearer ${apiKey}`
}
});
if (!response.ok) {
return MODEL_LIST.openrouter;
}
const data = await response.json();
// Filter to text-capable models only (exclude image/audio models)
const models = data.data
?.filter((m: { id: string; context_length?: number }) =>
m.context_length && m.context_length > 0
)
.map((m: { id: string }) => m.id)
.sort();
return models && models.length > 0 ? models : MODEL_LIST.openrouter;
} catch {
return MODEL_LIST.openrouter;
}
}
export async function fetchDeepSeekModels(apiKey: string): Promise<string[]> {
try {
const response = await fetch('https://api.deepseek.com/v1/models', {
headers: {
Authorization: `Bearer ${apiKey}`
}
});
if (!response.ok) {
return MODEL_LIST.deepseek;
}
const data = await response.json();
const models = data.data
?.map((m: { id: string }) => m.id)
.sort();
return models && models.length > 0 ? models : MODEL_LIST.deepseek;
} catch {
return MODEL_LIST.deepseek;
}
}
export async function fetchModelsForProvider(
provider: string,
apiKey?: string,
baseUrl?: string
baseUrl?: string,
forceRefresh: boolean = false
): Promise<string[]> {
const cache = readCache();
// Return cached models if valid
if (isCacheValid(cache) && cache!.models[provider]) {
// Return cached models if valid (unless force refresh)
if (!forceRefresh && isCacheValid(cache) && cache!.models[provider]) {
return cache!.models[provider];
}
@@ -115,23 +237,40 @@ export async function fetchModelsForProvider(
break;
case OCO_AI_PROVIDER_ENUM.ANTHROPIC:
models = MODEL_LIST.anthropic;
if (apiKey) {
models = await fetchAnthropicModels(apiKey);
} else {
models = MODEL_LIST.anthropic;
}
break;
case OCO_AI_PROVIDER_ENUM.GEMINI:
// Google's API doesn't easily list generative models, use hardcoded list
models = MODEL_LIST.gemini;
break;
case OCO_AI_PROVIDER_ENUM.GROQ:
models = MODEL_LIST.groq;
if (apiKey) {
models = await fetchGroqModels(apiKey);
} else {
models = MODEL_LIST.groq;
}
break;
case OCO_AI_PROVIDER_ENUM.MISTRAL:
models = MODEL_LIST.mistral;
if (apiKey) {
models = await fetchMistralModels(apiKey);
} else {
models = MODEL_LIST.mistral;
}
break;
case OCO_AI_PROVIDER_ENUM.DEEPSEEK:
models = MODEL_LIST.deepseek;
if (apiKey) {
models = await fetchDeepSeekModels(apiKey);
} else {
models = MODEL_LIST.deepseek;
}
break;
case OCO_AI_PROVIDER_ENUM.AIMLAPI:
@@ -139,7 +278,11 @@ export async function fetchModelsForProvider(
break;
case OCO_AI_PROVIDER_ENUM.OPENROUTER:
models = MODEL_LIST.openrouter;
if (apiKey) {
models = await fetchOpenRouterModels(apiKey);
} else {
models = MODEL_LIST.openrouter;
}
break;
default:
@@ -168,3 +311,22 @@ export function clearModelCache(): void {
// Silently fail
}
}
export function getCacheInfo(): { timestamp: number | null; providers: string[] } {
const cache = readCache();
if (!cache) {
return { timestamp: null, providers: [] };
}
return {
timestamp: cache.timestamp,
providers: Object.keys(cache.models || {})
};
}
export function getCachedModels(provider: string): string[] | null {
const cache = readCache();
if (!cache || !cache.models[provider]) {
return null;
}
return cache.models[provider];
}