diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py
index 5808c4eb11..5e97aa7c06 100644
--- a/autogpt_platform/backend/backend/blocks/llm.py
+++ b/autogpt_platform/backend/backend/blocks/llm.py
@@ -79,6 +79,10 @@ class ModelMetadata(NamedTuple):
provider: str
context_window: int
max_output_tokens: int | None
+ display_name: str
+ provider_name: str
+ creator_name: str
+ price_tier: Literal[1, 2, 3]
class LlmModelMeta(EnumMeta):
@@ -174,21 +178,18 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
@classmethod
def __get_pydantic_json_schema__(cls, schema, handler):
json_schema = handler(schema)
- #TODO this may need to be updated once we have model registry
llm_model_metadata = {}
for model in cls:
model_name = model.value
- provider = model.metadata.provider
- if "/" in model_name:
- creator, title = model_name.split("/", 1)
- else:
- creator = provider
- title = model_name
+ metadata = model.metadata
llm_model_metadata[model_name] = {
- "creator": creator,
- "title": title,
- "provider": provider,
+ "creator": metadata.creator_name,
+ "creator_name": metadata.creator_name,
+ "title": metadata.display_name,
+ "provider": metadata.provider,
+ "provider_name": metadata.provider_name,
"name": model_name,
+ "price_tier": metadata.price_tier,
}
json_schema["llm_model"] = True
json_schema["llm_model_metadata"] = llm_model_metadata
@@ -213,119 +214,89 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
MODEL_METADATA = {
# https://platform.openai.com/docs/models
- LlmModel.O3: ModelMetadata("openai", 200000, 100000),
- LlmModel.O3_MINI: ModelMetadata("openai", 200000, 100000), # o3-mini-2025-01-31
- LlmModel.O1: ModelMetadata("openai", 200000, 100000), # o1-2024-12-17
- LlmModel.O1_MINI: ModelMetadata("openai", 128000, 65536), # o1-mini-2024-09-12
+ LlmModel.O3: ModelMetadata("openai", 200000, 100000, "O3", "OpenAI", "OpenAI", 2),
+ LlmModel.O3_MINI: ModelMetadata("openai", 200000, 100000, "O3 Mini", "OpenAI", "OpenAI", 1), # o3-mini-2025-01-31
+ LlmModel.O1: ModelMetadata("openai", 200000, 100000, "O1", "OpenAI", "OpenAI", 3), # o1-2024-12-17
+ LlmModel.O1_MINI: ModelMetadata("openai", 128000, 65536, "O1 Mini", "OpenAI", "OpenAI", 2), # o1-mini-2024-09-12
# GPT-5 models
- LlmModel.GPT5_2: ModelMetadata("openai", 400000, 128000),
- LlmModel.GPT5_1: ModelMetadata("openai", 400000, 128000),
- LlmModel.GPT5: ModelMetadata("openai", 400000, 128000),
- LlmModel.GPT5_MINI: ModelMetadata("openai", 400000, 128000),
- LlmModel.GPT5_NANO: ModelMetadata("openai", 400000, 128000),
- LlmModel.GPT5_CHAT: ModelMetadata("openai", 400000, 16384),
- LlmModel.GPT41: ModelMetadata("openai", 1047576, 32768),
- LlmModel.GPT41_MINI: ModelMetadata("openai", 1047576, 32768),
- LlmModel.GPT4O_MINI: ModelMetadata(
- "openai", 128000, 16384
- ), # gpt-4o-mini-2024-07-18
- LlmModel.GPT4O: ModelMetadata("openai", 128000, 16384), # gpt-4o-2024-08-06
- LlmModel.GPT4_TURBO: ModelMetadata(
- "openai", 128000, 4096
- ), # gpt-4-turbo-2024-04-09
- LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385, 4096), # gpt-3.5-turbo-0125
+ LlmModel.GPT5_2: ModelMetadata("openai", 400000, 128000, "GPT-5.2", "OpenAI", "OpenAI", 3),
+ LlmModel.GPT5_1: ModelMetadata("openai", 400000, 128000, "GPT-5.1", "OpenAI", "OpenAI", 2),
+ LlmModel.GPT5: ModelMetadata("openai", 400000, 128000, "GPT-5", "OpenAI", "OpenAI", 1),
+ LlmModel.GPT5_MINI: ModelMetadata("openai", 400000, 128000, "GPT-5 Mini", "OpenAI", "OpenAI", 1),
+ LlmModel.GPT5_NANO: ModelMetadata("openai", 400000, 128000, "GPT-5 Nano", "OpenAI", "OpenAI", 1),
+ LlmModel.GPT5_CHAT: ModelMetadata("openai", 400000, 16384, "GPT-5 Chat Latest", "OpenAI", "OpenAI", 2),
+ LlmModel.GPT41: ModelMetadata("openai", 1047576, 32768, "GPT-4.1", "OpenAI", "OpenAI", 1),
+ LlmModel.GPT41_MINI: ModelMetadata("openai", 1047576, 32768, "GPT-4.1 Mini", "OpenAI", "OpenAI", 1),
+ LlmModel.GPT4O_MINI: ModelMetadata("openai", 128000, 16384, "GPT-4o Mini", "OpenAI", "OpenAI", 1), # gpt-4o-mini-2024-07-18
+ LlmModel.GPT4O: ModelMetadata("openai", 128000, 16384, "GPT-4o", "OpenAI", "OpenAI", 2), # gpt-4o-2024-08-06
+ LlmModel.GPT4_TURBO: ModelMetadata("openai", 128000, 4096, "GPT-4 Turbo", "OpenAI", "OpenAI", 3), # gpt-4-turbo-2024-04-09
+ LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385, 4096, "GPT-3.5 Turbo", "OpenAI", "OpenAI", 1), # gpt-3.5-turbo-0125
# https://docs.anthropic.com/en/docs/about-claude/models
- LlmModel.CLAUDE_4_1_OPUS: ModelMetadata(
- "anthropic", 200000, 32000
- ), # claude-opus-4-1-20250805
- LlmModel.CLAUDE_4_OPUS: ModelMetadata(
- "anthropic", 200000, 32000
- ), # claude-4-opus-20250514
- LlmModel.CLAUDE_4_SONNET: ModelMetadata(
- "anthropic", 200000, 64000
- ), # claude-4-sonnet-20250514
- LlmModel.CLAUDE_4_5_OPUS: ModelMetadata(
- "anthropic", 200000, 64000
- ), # claude-opus-4-5-20251101
- LlmModel.CLAUDE_4_5_SONNET: ModelMetadata(
- "anthropic", 200000, 64000
- ), # claude-sonnet-4-5-20250929
- LlmModel.CLAUDE_4_5_HAIKU: ModelMetadata(
- "anthropic", 200000, 64000
- ), # claude-haiku-4-5-20251001
- LlmModel.CLAUDE_3_7_SONNET: ModelMetadata(
- "anthropic", 200000, 64000
- ), # claude-3-7-sonnet-20250219
- LlmModel.CLAUDE_3_HAIKU: ModelMetadata(
- "anthropic", 200000, 4096
- ), # claude-3-haiku-20240307
+ LlmModel.CLAUDE_4_1_OPUS: ModelMetadata("anthropic", 200000, 32000, "Claude Opus 4.1", "Anthropic", "Anthropic", 3), # claude-opus-4-1-20250805
+ LlmModel.CLAUDE_4_OPUS: ModelMetadata("anthropic", 200000, 32000, "Claude Opus 4", "Anthropic", "Anthropic", 3), # claude-4-opus-20250514
+ LlmModel.CLAUDE_4_SONNET: ModelMetadata("anthropic", 200000, 64000, "Claude Sonnet 4", "Anthropic", "Anthropic", 2), # claude-4-sonnet-20250514
+ LlmModel.CLAUDE_4_5_OPUS: ModelMetadata("anthropic", 200000, 64000, "Claude Opus 4.5", "Anthropic", "Anthropic", 3), # claude-opus-4-5-20251101
+ LlmModel.CLAUDE_4_5_SONNET: ModelMetadata("anthropic", 200000, 64000, "Claude Sonnet 4.5", "Anthropic", "Anthropic", 3), # claude-sonnet-4-5-20250929
+ LlmModel.CLAUDE_4_5_HAIKU: ModelMetadata("anthropic", 200000, 64000, "Claude Haiku 4.5", "Anthropic", "Anthropic", 2), # claude-haiku-4-5-20251001
+ LlmModel.CLAUDE_3_7_SONNET: ModelMetadata("anthropic", 200000, 64000, "Claude 3.7 Sonnet", "Anthropic", "Anthropic", 2), # claude-3-7-sonnet-20250219
+ LlmModel.CLAUDE_3_HAIKU: ModelMetadata("anthropic", 200000, 4096, "Claude 3 Haiku", "Anthropic", "Anthropic", 1), # claude-3-haiku-20240307
# https://docs.aimlapi.com/api-overview/model-database/text-models
- LlmModel.AIML_API_QWEN2_5_72B: ModelMetadata("aiml_api", 32000, 8000),
- LlmModel.AIML_API_LLAMA3_1_70B: ModelMetadata("aiml_api", 128000, 40000),
- LlmModel.AIML_API_LLAMA3_3_70B: ModelMetadata("aiml_api", 128000, None),
- LlmModel.AIML_API_META_LLAMA_3_1_70B: ModelMetadata("aiml_api", 131000, 2000),
- LlmModel.AIML_API_LLAMA_3_2_3B: ModelMetadata("aiml_api", 128000, None),
+ LlmModel.AIML_API_QWEN2_5_72B: ModelMetadata("aiml_api", 32000, 8000, "Qwen 2.5 72B Instruct Turbo", "AI/ML", "Qwen", 1),
+ LlmModel.AIML_API_LLAMA3_1_70B: ModelMetadata("aiml_api", 128000, 40000, "Llama 3.1 Nemotron 70B Instruct", "AI/ML", "Nvidia", 1),
+ LlmModel.AIML_API_LLAMA3_3_70B: ModelMetadata("aiml_api", 128000, None, "Llama 3.3 70B Instruct Turbo", "AI/ML", "Meta", 1),
+ LlmModel.AIML_API_META_LLAMA_3_1_70B: ModelMetadata("aiml_api", 131000, 2000, "Llama 3.1 70B Instruct Turbo", "AI/ML", "Meta", 1),
+ LlmModel.AIML_API_LLAMA_3_2_3B: ModelMetadata("aiml_api", 128000, None, "Llama 3.2 3B Instruct Turbo", "AI/ML", "Meta", 1),
# https://console.groq.com/docs/models
- LlmModel.LLAMA3_3_70B: ModelMetadata("groq", 128000, 32768),
- LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 128000, 8192),
+ LlmModel.LLAMA3_3_70B: ModelMetadata("groq", 128000, 32768, "Llama 3.3 70B Versatile", "Groq", "Groq", 1),
+ LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 128000, 8192, "Llama 3.1 8B Instant", "Groq", "Groq", 1),
# https://ollama.com/library
- LlmModel.OLLAMA_LLAMA3_3: ModelMetadata("ollama", 8192, None),
- LlmModel.OLLAMA_LLAMA3_2: ModelMetadata("ollama", 8192, None),
- LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192, None),
- LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192, None),
- LlmModel.OLLAMA_DOLPHIN: ModelMetadata("ollama", 32768, None),
+ LlmModel.OLLAMA_LLAMA3_3: ModelMetadata("ollama", 8192, None, "Llama 3.3", "Ollama", "Meta", 1),
+ LlmModel.OLLAMA_LLAMA3_2: ModelMetadata("ollama", 8192, None, "Llama 3.2", "Ollama", "Meta", 1),
+ LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192, None, "Llama 3", "Ollama", "Meta", 1),
+ LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192, None, "Llama 3.1 405B", "Ollama", "Meta", 1),
+ LlmModel.OLLAMA_DOLPHIN: ModelMetadata("ollama", 32768, None, "Dolphin Mistral Latest", "Ollama", "Mistral", 1),
# https://openrouter.ai/models
- LlmModel.GEMINI_2_5_PRO: ModelMetadata("open_router", 1050000, 8192),
- LlmModel.GEMINI_3_PRO_PREVIEW: ModelMetadata("open_router", 1048576, 65535),
- LlmModel.GEMINI_2_5_FLASH: ModelMetadata("open_router", 1048576, 65535),
- LlmModel.GEMINI_2_0_FLASH: ModelMetadata("open_router", 1048576, 8192),
- LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: ModelMetadata(
- "open_router", 1048576, 65535
- ),
- LlmModel.GEMINI_2_0_FLASH_LITE: ModelMetadata("open_router", 1048576, 8192),
- LlmModel.MISTRAL_NEMO: ModelMetadata("open_router", 128000, 4096),
- LlmModel.COHERE_COMMAND_R_08_2024: ModelMetadata("open_router", 128000, 4096),
- LlmModel.COHERE_COMMAND_R_PLUS_08_2024: ModelMetadata("open_router", 128000, 4096),
- LlmModel.DEEPSEEK_CHAT: ModelMetadata("open_router", 64000, 2048),
- LlmModel.DEEPSEEK_R1_0528: ModelMetadata("open_router", 163840, 163840),
- LlmModel.PERPLEXITY_SONAR: ModelMetadata("open_router", 127000, 8000),
- LlmModel.PERPLEXITY_SONAR_PRO: ModelMetadata("open_router", 200000, 8000),
- LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: ModelMetadata(
- "open_router",
- 128000,
- 16000,
- ),
- LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B: ModelMetadata(
- "open_router", 131000, 4096
- ),
- LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B: ModelMetadata(
- "open_router", 12288, 12288
- ),
- LlmModel.OPENAI_GPT_OSS_120B: ModelMetadata("open_router", 131072, 131072),
- LlmModel.OPENAI_GPT_OSS_20B: ModelMetadata("open_router", 131072, 32768),
- LlmModel.AMAZON_NOVA_LITE_V1: ModelMetadata("open_router", 300000, 5120),
- LlmModel.AMAZON_NOVA_MICRO_V1: ModelMetadata("open_router", 128000, 5120),
- LlmModel.AMAZON_NOVA_PRO_V1: ModelMetadata("open_router", 300000, 5120),
- LlmModel.MICROSOFT_WIZARDLM_2_8X22B: ModelMetadata("open_router", 65536, 4096),
- LlmModel.GRYPHE_MYTHOMAX_L2_13B: ModelMetadata("open_router", 4096, 4096),
- LlmModel.META_LLAMA_4_SCOUT: ModelMetadata("open_router", 131072, 131072),
- LlmModel.META_LLAMA_4_MAVERICK: ModelMetadata("open_router", 1048576, 1000000),
- LlmModel.GROK_4: ModelMetadata("open_router", 256000, 256000),
- LlmModel.GROK_4_FAST: ModelMetadata("open_router", 2000000, 30000),
- LlmModel.GROK_4_1_FAST: ModelMetadata("open_router", 2000000, 30000),
- LlmModel.GROK_CODE_FAST_1: ModelMetadata("open_router", 256000, 10000),
- LlmModel.KIMI_K2: ModelMetadata("open_router", 131000, 131000),
- LlmModel.QWEN3_235B_A22B_THINKING: ModelMetadata("open_router", 262144, 262144),
- LlmModel.QWEN3_CODER: ModelMetadata("open_router", 262144, 262144),
+ LlmModel.GEMINI_2_5_PRO: ModelMetadata("open_router", 1050000, 8192, "Gemini 2.5 Pro Preview 03.25", "OpenRouter", "Google", 2),
+ LlmModel.GEMINI_3_PRO_PREVIEW: ModelMetadata("open_router", 1048576, 65535, "Gemini 3 Pro Preview", "OpenRouter", "Google", 2),
+ LlmModel.GEMINI_2_5_FLASH: ModelMetadata("open_router", 1048576, 65535, "Gemini 2.5 Flash", "OpenRouter", "Google", 1),
+ LlmModel.GEMINI_2_0_FLASH: ModelMetadata("open_router", 1048576, 8192, "Gemini 2.0 Flash 001", "OpenRouter", "Google", 1),
+ LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: ModelMetadata("open_router", 1048576, 65535, "Gemini 2.5 Flash Lite Preview 06.17", "OpenRouter", "Google", 1),
+ LlmModel.GEMINI_2_0_FLASH_LITE: ModelMetadata("open_router", 1048576, 8192, "Gemini 2.0 Flash Lite 001", "OpenRouter", "Google", 1),
+ LlmModel.MISTRAL_NEMO: ModelMetadata("open_router", 128000, 4096, "Mistral Nemo", "OpenRouter", "Mistralai", 1),
+ LlmModel.COHERE_COMMAND_R_08_2024: ModelMetadata("open_router", 128000, 4096, "Command R 08.2024", "OpenRouter", "Cohere", 1),
+ LlmModel.COHERE_COMMAND_R_PLUS_08_2024: ModelMetadata("open_router", 128000, 4096, "Command R Plus 08.2024", "OpenRouter", "Cohere", 2),
+ LlmModel.DEEPSEEK_CHAT: ModelMetadata("open_router", 64000, 2048, "DeepSeek Chat", "OpenRouter", "Deepseek", 1),
+ LlmModel.DEEPSEEK_R1_0528: ModelMetadata("open_router", 163840, 163840, "DeepSeek R1 0528", "OpenRouter", "Deepseek", 1),
+ LlmModel.PERPLEXITY_SONAR: ModelMetadata("open_router", 127000, 8000, "Sonar", "OpenRouter", "Perplexity", 1),
+ LlmModel.PERPLEXITY_SONAR_PRO: ModelMetadata("open_router", 200000, 8000, "Sonar Pro", "OpenRouter", "Perplexity", 2),
+ LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: ModelMetadata("open_router", 128000, 16000, "Sonar Deep Research", "OpenRouter", "Perplexity", 3),
+ LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B: ModelMetadata("open_router", 131000, 4096, "Hermes 3 Llama 3.1 405B", "OpenRouter", "Nous Research", 1),
+ LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B: ModelMetadata("open_router", 12288, 12288, "Hermes 3 Llama 3.1 70B", "OpenRouter", "Nous Research", 1),
+ LlmModel.OPENAI_GPT_OSS_120B: ModelMetadata("open_router", 131072, 131072, "GPT-OSS 120B", "OpenRouter", "OpenAI", 1),
+ LlmModel.OPENAI_GPT_OSS_20B: ModelMetadata("open_router", 131072, 32768, "GPT-OSS 20B", "OpenRouter", "OpenAI", 1),
+ LlmModel.AMAZON_NOVA_LITE_V1: ModelMetadata("open_router", 300000, 5120, "Nova Lite V1", "OpenRouter", "Amazon", 1),
+ LlmModel.AMAZON_NOVA_MICRO_V1: ModelMetadata("open_router", 128000, 5120, "Nova Micro V1", "OpenRouter", "Amazon", 1),
+ LlmModel.AMAZON_NOVA_PRO_V1: ModelMetadata("open_router", 300000, 5120, "Nova Pro V1", "OpenRouter", "Amazon", 1),
+ LlmModel.MICROSOFT_WIZARDLM_2_8X22B: ModelMetadata("open_router", 65536, 4096, "WizardLM 2 8x22B", "OpenRouter", "Microsoft", 1),
+ LlmModel.GRYPHE_MYTHOMAX_L2_13B: ModelMetadata("open_router", 4096, 4096, "MythoMax L2 13B", "OpenRouter", "Gryphe", 1),
+ LlmModel.META_LLAMA_4_SCOUT: ModelMetadata("open_router", 131072, 131072, "Llama 4 Scout", "OpenRouter", "Meta", 1),
+ LlmModel.META_LLAMA_4_MAVERICK: ModelMetadata("open_router", 1048576, 1000000, "Llama 4 Maverick", "OpenRouter", "Meta", 1),
+ LlmModel.GROK_4: ModelMetadata("open_router", 256000, 256000, "Grok 4", "OpenRouter", "xAI", 3),
+ LlmModel.GROK_4_FAST: ModelMetadata("open_router", 2000000, 30000, "Grok 4 Fast", "OpenRouter", "xAI", 1),
+ LlmModel.GROK_4_1_FAST: ModelMetadata("open_router", 2000000, 30000, "Grok 4.1 Fast", "OpenRouter", "xAI", 1),
+ LlmModel.GROK_CODE_FAST_1: ModelMetadata("open_router", 256000, 10000, "Grok Code Fast 1", "OpenRouter", "xAI", 1),
+ LlmModel.KIMI_K2: ModelMetadata("open_router", 131000, 131000, "Kimi K2", "OpenRouter", "Moonshotai", 1),
+ LlmModel.QWEN3_235B_A22B_THINKING: ModelMetadata("open_router", 262144, 262144, "Qwen 3 235B A22B Thinking 2507", "OpenRouter", "Qwen", 1),
+ LlmModel.QWEN3_CODER: ModelMetadata("open_router", 262144, 262144, "Qwen 3 Coder", "OpenRouter", "Qwen", 3),
# Llama API models
- LlmModel.LLAMA_API_LLAMA_4_SCOUT: ModelMetadata("llama_api", 128000, 4028),
- LlmModel.LLAMA_API_LLAMA4_MAVERICK: ModelMetadata("llama_api", 128000, 4028),
- LlmModel.LLAMA_API_LLAMA3_3_8B: ModelMetadata("llama_api", 128000, 4028),
- LlmModel.LLAMA_API_LLAMA3_3_70B: ModelMetadata("llama_api", 128000, 4028),
+ LlmModel.LLAMA_API_LLAMA_4_SCOUT: ModelMetadata("llama_api", 128000, 4028, "Llama 4 Scout 17B 16E Instruct FP8", "Llama API", "Meta", 1),
+ LlmModel.LLAMA_API_LLAMA4_MAVERICK: ModelMetadata("llama_api", 128000, 4028, "Llama 4 Maverick 17B 128E Instruct FP8", "Llama API", "Meta", 1),
+ LlmModel.LLAMA_API_LLAMA3_3_8B: ModelMetadata("llama_api", 128000, 4028, "Llama 3.3 8B Instruct", "Llama API", "Meta", 1),
+ LlmModel.LLAMA_API_LLAMA3_3_70B: ModelMetadata("llama_api", 128000, 4028, "Llama 3.3 70B Instruct", "Llama API", "Meta", 1),
# v0 by Vercel models
- LlmModel.V0_1_5_MD: ModelMetadata("v0", 128000, 64000),
- LlmModel.V0_1_5_LG: ModelMetadata("v0", 512000, 64000),
- LlmModel.V0_1_0_MD: ModelMetadata("v0", 128000, 64000),
+ LlmModel.V0_1_5_MD: ModelMetadata("v0", 128000, 64000, "v0 1.5 MD", "V0", "V0", 1),
+ LlmModel.V0_1_5_LG: ModelMetadata("v0", 512000, 64000, "v0 1.5 LG", "V0", "V0", 1),
+ LlmModel.V0_1_0_MD: ModelMetadata("v0", 128000, 64000, "v0 1.0 MD", "V0", "V0", 1),
}
DEFAULT_LLM_MODEL = LlmModel.GPT5_2
diff --git a/autogpt_platform/backend/backend/data/block_cost_config.py b/autogpt_platform/backend/backend/data/block_cost_config.py
index 7f8ee97d52..1b54ae0942 100644
--- a/autogpt_platform/backend/backend/data/block_cost_config.py
+++ b/autogpt_platform/backend/backend/data/block_cost_config.py
@@ -99,10 +99,15 @@ MODEL_COST: dict[LlmModel, int] = {
LlmModel.OPENAI_GPT_OSS_20B: 1,
LlmModel.GEMINI_2_5_PRO: 4,
LlmModel.GEMINI_3_PRO_PREVIEW: 5,
+ LlmModel.GEMINI_2_5_FLASH: 1,
+ LlmModel.GEMINI_2_0_FLASH: 1,
+ LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: 1,
+ LlmModel.GEMINI_2_0_FLASH_LITE: 1,
LlmModel.MISTRAL_NEMO: 1,
LlmModel.COHERE_COMMAND_R_08_2024: 1,
LlmModel.COHERE_COMMAND_R_PLUS_08_2024: 3,
LlmModel.DEEPSEEK_CHAT: 2,
+ LlmModel.DEEPSEEK_R1_0528: 1,
LlmModel.PERPLEXITY_SONAR: 1,
LlmModel.PERPLEXITY_SONAR_PRO: 5,
LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: 10,
@@ -126,11 +131,6 @@ MODEL_COST: dict[LlmModel, int] = {
LlmModel.KIMI_K2: 1,
LlmModel.QWEN3_235B_A22B_THINKING: 1,
LlmModel.QWEN3_CODER: 9,
- LlmModel.GEMINI_2_5_FLASH: 1,
- LlmModel.GEMINI_2_0_FLASH: 1,
- LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: 1,
- LlmModel.GEMINI_2_0_FLASH_LITE: 1,
- LlmModel.DEEPSEEK_R1_0528: 1,
# v0 by Vercel models
LlmModel.V0_1_5_MD: 1,
LlmModel.V0_1_5_LG: 2,
diff --git a/autogpt_platform/frontend/src/app/(platform)/build/page.tsx b/autogpt_platform/frontend/src/app/(platform)/build/page.tsx
index f1d62ee5fb..e3cf5eac36 100644
--- a/autogpt_platform/frontend/src/app/(platform)/build/page.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/build/page.tsx
@@ -54,7 +54,7 @@ export default function BuilderPage() {
);
}
- return isNewFlowEditorEnabled ? (
+ return true ? (
diff --git a/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/components/LlmIcon.tsx b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/components/LlmIcon.tsx
index 9f647cb1f2..1fb34679ee 100644
--- a/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/components/LlmIcon.tsx
+++ b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/components/LlmIcon.tsx
@@ -2,7 +2,6 @@
import Image from "next/image";
import { Text } from "@/components/atoms/Text/Text";
-import { toLlmDisplayName } from "../helpers";
import claudeImg from "@/components/atoms/LLMItem/assets/claude.svg";
import gptImg from "@/components/atoms/LLMItem/assets/gpt.svg";
import perplexityImg from "@/components/atoms/LLMItem/assets/perplexity.svg";
@@ -31,7 +30,7 @@ export function LlmIcon({ value, size = 20 }: Props) {
return (
diff --git a/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/components/LlmModelPicker.tsx b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/components/LlmModelPicker.tsx
index aa3e24f2c2..67c0594586 100644
--- a/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/components/LlmModelPicker.tsx
+++ b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/components/LlmModelPicker.tsx
@@ -9,7 +9,13 @@ import {
} from "@/components/__legacy__/ui/popover";
import { Text } from "@/components/atoms/Text/Text";
import { cn } from "@/lib/utils";
-import { groupByCreator, groupByTitle, toLlmDisplayName } from "../helpers";
+import {
+ getCreatorDisplayName,
+ getModelDisplayName,
+ getProviderDisplayName,
+ groupByCreator,
+ groupByTitle,
+} from "../helpers";
import { LlmModelMetadata } from "../types";
import { LlmIcon } from "./LlmIcon";
import { LlmMenuHeader } from "./LlmMenuHeader";
@@ -39,26 +45,36 @@ export function LlmModelPicker({
const [activeTitle, setActiveTitle] = useState(null);
const creators = useMemo(() => {
- return Array.from(
- new Set(models.map((model) => model.creator)),
- ).sort((a, b) => toLlmDisplayName(a).localeCompare(toLlmDisplayName(b)));
+ const grouped = groupByCreator(models);
+ return Array.from(grouped.keys()).sort((a, b) => a.localeCompare(b));
}, [models]);
const modelsByCreator = useMemo(() => groupByCreator(models), [models]);
+ const creatorIconValues = useMemo(() => {
+ const map = new Map();
+ for (const [creator, entries] of modelsByCreator.entries()) {
+ map.set(creator, entries[0]?.creator ?? creator);
+ }
+ return map;
+ }, [modelsByCreator]);
+
useEffect(() => {
if (!open) {
return;
}
setView("creator");
- setActiveCreator(selectedModel?.creator ?? creators[0] ?? null);
- setActiveTitle(selectedModel?.title ?? null);
- }, [open, selectedModel?.creator, selectedModel?.title, creators]);
+ setActiveCreator(
+ selectedModel ? getCreatorDisplayName(selectedModel) : creators[0] ?? null,
+ );
+ setActiveTitle(selectedModel ? getModelDisplayName(selectedModel) : null);
+ }, [open, selectedModel, creators]);
const currentCreator = activeCreator ?? creators[0] ?? null;
const currentModels = currentCreator
? modelsByCreator.get(currentCreator) ?? []
: [];
+ const currentCreatorIcon = currentModels[0]?.creator ?? currentCreator;
const modelsByTitle = useMemo(() => groupByTitle(currentModels), [currentModels]);
@@ -88,7 +104,9 @@ export function LlmModelPicker({
};
const triggerModel = selectedModel ?? recommendedModel ?? models[0];
- const triggerTitle = triggerModel ? triggerModel.title : "Select model";
+ const triggerTitle = triggerModel
+ ? getModelDisplayName(triggerModel)
+ : "Select model";
const triggerCreator = triggerModel?.creator ?? "";
return (
@@ -120,7 +138,7 @@ export function LlmModelPicker({
{recommendedModel && (
<>
}
onClick={() => handleSelectModel(recommendedModel.name)}
@@ -131,10 +149,14 @@ export function LlmModelPicker({
{creators.map((creator) => (
}
+ title={creator}
+ icon={}
showChevron={true}
- isActive={selectedModel?.creator === creator}
+ isActive={
+ selectedModel
+ ? getCreatorDisplayName(selectedModel) === creator
+ : false
+ }
onClick={() => {
setActiveCreator(creator);
setView("model");
@@ -146,7 +168,7 @@ export function LlmModelPicker({
{view === "model" && currentCreator && (
setView("creator")}
/>
@@ -154,10 +176,14 @@ export function LlmModelPicker({
}
+ icon={}
rightSlot={}
showChevron={entry.providerCount > 1}
- isActive={selectedModel?.title === entry.title}
+ isActive={
+ selectedModel
+ ? getModelDisplayName(selectedModel) === entry.title
+ : false
+ }
onClick={() => {
if (entry.providerCount > 1) {
setActiveTitle(entry.title);
@@ -180,7 +206,7 @@ export function LlmModelPicker({
{providerEntries.map((entry) => (
}
isActive={selectedModel?.provider === entry.provider}
onClick={() => handleSelectModel(entry.name)}
diff --git a/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/helpers.ts b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/helpers.ts
index 7e823bd525..bb8619c25f 100644
--- a/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/helpers.ts
+++ b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/helpers.ts
@@ -1,38 +1,12 @@
import { LlmModelMetadata } from "./types";
-const displayNameOverrides: Record = {
- aiml_api: "AI/ML",
- anthropic: "Anthropic",
- openai: "OpenAI",
- open_router: "Open Router",
- llama_api: "Llama API",
- groq: "Groq",
- ollama: "Ollama",
- v0: "V0",
-};
-
-export function toLlmDisplayName(value: string): string {
- if (!value) {
- return "";
- }
- const normalized = value.toLowerCase();
- if (displayNameOverrides[normalized]) {
- return displayNameOverrides[normalized];
- }
- return value
- .split(/[_-]/)
- .map((word) =>
- word.length ? word[0].toUpperCase() + word.slice(1).toLowerCase() : "",
- )
- .join(" ");
-}
-
export function groupByCreator(models: LlmModelMetadata[]) {
const map = new Map();
for (const model of models) {
- const existing = map.get(model.creator) ?? [];
+ const key = getCreatorDisplayName(model);
+ const existing = map.get(key) ?? [];
existing.push(model);
- map.set(model.creator, existing);
+ map.set(key, existing);
}
return map;
}
@@ -40,9 +14,22 @@ export function groupByCreator(models: LlmModelMetadata[]) {
export function groupByTitle(models: LlmModelMetadata[]) {
const map = new Map();
for (const model of models) {
- const existing = map.get(model.title) ?? [];
+ const displayName = getModelDisplayName(model);
+ const existing = map.get(displayName) ?? [];
existing.push(model);
- map.set(model.title, existing);
+ map.set(displayName, existing);
}
return map;
}
+
+export function getCreatorDisplayName(model: LlmModelMetadata): string {
+ return model.creator_name || model.creator || "";
+}
+
+export function getModelDisplayName(model: LlmModelMetadata): string {
+ return model.title || model.name || "";
+}
+
+export function getProviderDisplayName(model: LlmModelMetadata): string {
+ return model.provider_name || model.provider || "";
+}
diff --git a/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/types.ts b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/types.ts
index 4be61ed692..39389b2535 100644
--- a/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/types.ts
+++ b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/types.ts
@@ -1,7 +1,9 @@
export type LlmModelMetadata = {
creator: string;
+ creator_name: string;
title: string;
provider: string;
+ provider_name: string;
name: string;
price_tier?: number;
};