diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py index 85e8fc36df..fdcd7f3568 100644 --- a/autogpt_platform/backend/backend/blocks/llm.py +++ b/autogpt_platform/backend/backend/blocks/llm.py @@ -79,6 +79,10 @@ class ModelMetadata(NamedTuple): provider: str context_window: int max_output_tokens: int | None + display_name: str + provider_name: str + creator_name: str + price_tier: Literal[1, 2, 3] class LlmModelMeta(EnumMeta): @@ -171,6 +175,26 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta): V0_1_5_LG = "v0-1.5-lg" V0_1_0_MD = "v0-1.0-md" + @classmethod + def __get_pydantic_json_schema__(cls, schema, handler): + json_schema = handler(schema) + llm_model_metadata = {} + for model in cls: + model_name = model.value + metadata = model.metadata + llm_model_metadata[model_name] = { + "creator": metadata.creator_name, + "creator_name": metadata.creator_name, + "title": metadata.display_name, + "provider": metadata.provider, + "provider_name": metadata.provider_name, + "name": model_name, + "price_tier": metadata.price_tier, + } + json_schema["llm_model"] = True + json_schema["llm_model_metadata"] = llm_model_metadata + return json_schema + @property def metadata(self) -> ModelMetadata: return MODEL_METADATA[self] @@ -190,119 +214,291 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta): MODEL_METADATA = { # https://platform.openai.com/docs/models - LlmModel.O3: ModelMetadata("openai", 200000, 100000), - LlmModel.O3_MINI: ModelMetadata("openai", 200000, 100000), # o3-mini-2025-01-31 - LlmModel.O1: ModelMetadata("openai", 200000, 100000), # o1-2024-12-17 - LlmModel.O1_MINI: ModelMetadata("openai", 128000, 65536), # o1-mini-2024-09-12 + LlmModel.O3: ModelMetadata("openai", 200000, 100000, "O3", "OpenAI", "OpenAI", 2), + LlmModel.O3_MINI: ModelMetadata( + "openai", 200000, 100000, "O3 Mini", "OpenAI", "OpenAI", 1 + ), # o3-mini-2025-01-31 + LlmModel.O1: ModelMetadata( + "openai", 200000, 100000, "O1", "OpenAI", "OpenAI", 3 + ), # o1-2024-12-17 + LlmModel.O1_MINI: ModelMetadata( + "openai", 128000, 65536, "O1 Mini", "OpenAI", "OpenAI", 2 + ), # o1-mini-2024-09-12 # GPT-5 models - LlmModel.GPT5_2: ModelMetadata("openai", 400000, 128000), - LlmModel.GPT5_1: ModelMetadata("openai", 400000, 128000), - LlmModel.GPT5: ModelMetadata("openai", 400000, 128000), - LlmModel.GPT5_MINI: ModelMetadata("openai", 400000, 128000), - LlmModel.GPT5_NANO: ModelMetadata("openai", 400000, 128000), - LlmModel.GPT5_CHAT: ModelMetadata("openai", 400000, 16384), - LlmModel.GPT41: ModelMetadata("openai", 1047576, 32768), - LlmModel.GPT41_MINI: ModelMetadata("openai", 1047576, 32768), + LlmModel.GPT5_2: ModelMetadata( + "openai", 400000, 128000, "GPT-5.2", "OpenAI", "OpenAI", 3 + ), + LlmModel.GPT5_1: ModelMetadata( + "openai", 400000, 128000, "GPT-5.1", "OpenAI", "OpenAI", 2 + ), + LlmModel.GPT5: ModelMetadata( + "openai", 400000, 128000, "GPT-5", "OpenAI", "OpenAI", 1 + ), + LlmModel.GPT5_MINI: ModelMetadata( + "openai", 400000, 128000, "GPT-5 Mini", "OpenAI", "OpenAI", 1 + ), + LlmModel.GPT5_NANO: ModelMetadata( + "openai", 400000, 128000, "GPT-5 Nano", "OpenAI", "OpenAI", 1 + ), + LlmModel.GPT5_CHAT: ModelMetadata( + "openai", 400000, 16384, "GPT-5 Chat Latest", "OpenAI", "OpenAI", 2 + ), + LlmModel.GPT41: ModelMetadata( + "openai", 1047576, 32768, "GPT-4.1", "OpenAI", "OpenAI", 1 + ), + LlmModel.GPT41_MINI: ModelMetadata( + "openai", 1047576, 32768, "GPT-4.1 Mini", "OpenAI", "OpenAI", 1 + ), LlmModel.GPT4O_MINI: ModelMetadata( - "openai", 128000, 16384 + "openai", 128000, 16384, "GPT-4o Mini", "OpenAI", "OpenAI", 1 ), # gpt-4o-mini-2024-07-18 - LlmModel.GPT4O: ModelMetadata("openai", 128000, 16384), # gpt-4o-2024-08-06 + LlmModel.GPT4O: ModelMetadata( + "openai", 128000, 16384, "GPT-4o", "OpenAI", "OpenAI", 2 + ), # gpt-4o-2024-08-06 LlmModel.GPT4_TURBO: ModelMetadata( - "openai", 128000, 4096 + "openai", 128000, 4096, "GPT-4 Turbo", "OpenAI", "OpenAI", 3 ), # gpt-4-turbo-2024-04-09 - LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385, 4096), # gpt-3.5-turbo-0125 + LlmModel.GPT3_5_TURBO: ModelMetadata( + "openai", 16385, 4096, "GPT-3.5 Turbo", "OpenAI", "OpenAI", 1 + ), # gpt-3.5-turbo-0125 # https://docs.anthropic.com/en/docs/about-claude/models LlmModel.CLAUDE_4_1_OPUS: ModelMetadata( - "anthropic", 200000, 32000 + "anthropic", 200000, 32000, "Claude Opus 4.1", "Anthropic", "Anthropic", 3 ), # claude-opus-4-1-20250805 LlmModel.CLAUDE_4_OPUS: ModelMetadata( - "anthropic", 200000, 32000 + "anthropic", 200000, 32000, "Claude Opus 4", "Anthropic", "Anthropic", 3 ), # claude-4-opus-20250514 LlmModel.CLAUDE_4_SONNET: ModelMetadata( - "anthropic", 200000, 64000 + "anthropic", 200000, 64000, "Claude Sonnet 4", "Anthropic", "Anthropic", 2 ), # claude-4-sonnet-20250514 LlmModel.CLAUDE_4_5_OPUS: ModelMetadata( - "anthropic", 200000, 64000 + "anthropic", 200000, 64000, "Claude Opus 4.5", "Anthropic", "Anthropic", 3 ), # claude-opus-4-5-20251101 LlmModel.CLAUDE_4_5_SONNET: ModelMetadata( - "anthropic", 200000, 64000 + "anthropic", 200000, 64000, "Claude Sonnet 4.5", "Anthropic", "Anthropic", 3 ), # claude-sonnet-4-5-20250929 LlmModel.CLAUDE_4_5_HAIKU: ModelMetadata( - "anthropic", 200000, 64000 + "anthropic", 200000, 64000, "Claude Haiku 4.5", "Anthropic", "Anthropic", 2 ), # claude-haiku-4-5-20251001 LlmModel.CLAUDE_3_7_SONNET: ModelMetadata( - "anthropic", 200000, 64000 + "anthropic", 200000, 64000, "Claude 3.7 Sonnet", "Anthropic", "Anthropic", 2 ), # claude-3-7-sonnet-20250219 LlmModel.CLAUDE_3_HAIKU: ModelMetadata( - "anthropic", 200000, 4096 + "anthropic", 200000, 4096, "Claude 3 Haiku", "Anthropic", "Anthropic", 1 ), # claude-3-haiku-20240307 # https://docs.aimlapi.com/api-overview/model-database/text-models - LlmModel.AIML_API_QWEN2_5_72B: ModelMetadata("aiml_api", 32000, 8000), - LlmModel.AIML_API_LLAMA3_1_70B: ModelMetadata("aiml_api", 128000, 40000), - LlmModel.AIML_API_LLAMA3_3_70B: ModelMetadata("aiml_api", 128000, None), - LlmModel.AIML_API_META_LLAMA_3_1_70B: ModelMetadata("aiml_api", 131000, 2000), - LlmModel.AIML_API_LLAMA_3_2_3B: ModelMetadata("aiml_api", 128000, None), - # https://console.groq.com/docs/models - LlmModel.LLAMA3_3_70B: ModelMetadata("groq", 128000, 32768), - LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 128000, 8192), - # https://ollama.com/library - LlmModel.OLLAMA_LLAMA3_3: ModelMetadata("ollama", 8192, None), - LlmModel.OLLAMA_LLAMA3_2: ModelMetadata("ollama", 8192, None), - LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192, None), - LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192, None), - LlmModel.OLLAMA_DOLPHIN: ModelMetadata("ollama", 32768, None), - # https://openrouter.ai/models - LlmModel.GEMINI_2_5_PRO: ModelMetadata("open_router", 1050000, 8192), - LlmModel.GEMINI_3_PRO_PREVIEW: ModelMetadata("open_router", 1048576, 65535), - LlmModel.GEMINI_2_5_FLASH: ModelMetadata("open_router", 1048576, 65535), - LlmModel.GEMINI_2_0_FLASH: ModelMetadata("open_router", 1048576, 8192), - LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: ModelMetadata( - "open_router", 1048576, 65535 + LlmModel.AIML_API_QWEN2_5_72B: ModelMetadata( + "aiml_api", 32000, 8000, "Qwen 2.5 72B Instruct Turbo", "AI/ML", "Qwen", 1 + ), + LlmModel.AIML_API_LLAMA3_1_70B: ModelMetadata( + "aiml_api", + 128000, + 40000, + "Llama 3.1 Nemotron 70B Instruct", + "AI/ML", + "Nvidia", + 1, + ), + LlmModel.AIML_API_LLAMA3_3_70B: ModelMetadata( + "aiml_api", 128000, None, "Llama 3.3 70B Instruct Turbo", "AI/ML", "Meta", 1 + ), + LlmModel.AIML_API_META_LLAMA_3_1_70B: ModelMetadata( + "aiml_api", 131000, 2000, "Llama 3.1 70B Instruct Turbo", "AI/ML", "Meta", 1 + ), + LlmModel.AIML_API_LLAMA_3_2_3B: ModelMetadata( + "aiml_api", 128000, None, "Llama 3.2 3B Instruct Turbo", "AI/ML", "Meta", 1 + ), + # https://console.groq.com/docs/models + LlmModel.LLAMA3_3_70B: ModelMetadata( + "groq", 128000, 32768, "Llama 3.3 70B Versatile", "Groq", "Meta", 1 + ), + LlmModel.LLAMA3_1_8B: ModelMetadata( + "groq", 128000, 8192, "Llama 3.1 8B Instant", "Groq", "Meta", 1 + ), + # https://ollama.com/library + LlmModel.OLLAMA_LLAMA3_3: ModelMetadata( + "ollama", 8192, None, "Llama 3.3", "Ollama", "Meta", 1 + ), + LlmModel.OLLAMA_LLAMA3_2: ModelMetadata( + "ollama", 8192, None, "Llama 3.2", "Ollama", "Meta", 1 + ), + LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata( + "ollama", 8192, None, "Llama 3", "Ollama", "Meta", 1 + ), + LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata( + "ollama", 8192, None, "Llama 3.1 405B", "Ollama", "Meta", 1 + ), + LlmModel.OLLAMA_DOLPHIN: ModelMetadata( + "ollama", 32768, None, "Dolphin Mistral Latest", "Ollama", "Mistral AI", 1 + ), + # https://openrouter.ai/models + LlmModel.GEMINI_2_5_PRO: ModelMetadata( + "open_router", + 1050000, + 8192, + "Gemini 2.5 Pro Preview 03.25", + "OpenRouter", + "Google", + 2, + ), + LlmModel.GEMINI_3_PRO_PREVIEW: ModelMetadata( + "open_router", 1048576, 65535, "Gemini 3 Pro Preview", "OpenRouter", "Google", 2 + ), + LlmModel.GEMINI_2_5_FLASH: ModelMetadata( + "open_router", 1048576, 65535, "Gemini 2.5 Flash", "OpenRouter", "Google", 1 + ), + LlmModel.GEMINI_2_0_FLASH: ModelMetadata( + "open_router", 1048576, 8192, "Gemini 2.0 Flash 001", "OpenRouter", "Google", 1 + ), + LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: ModelMetadata( + "open_router", + 1048576, + 65535, + "Gemini 2.5 Flash Lite Preview 06.17", + "OpenRouter", + "Google", + 1, + ), + LlmModel.GEMINI_2_0_FLASH_LITE: ModelMetadata( + "open_router", + 1048576, + 8192, + "Gemini 2.0 Flash Lite 001", + "OpenRouter", + "Google", + 1, + ), + LlmModel.MISTRAL_NEMO: ModelMetadata( + "open_router", 128000, 4096, "Mistral Nemo", "OpenRouter", "Mistral AI", 1 + ), + LlmModel.COHERE_COMMAND_R_08_2024: ModelMetadata( + "open_router", 128000, 4096, "Command R 08.2024", "OpenRouter", "Cohere", 1 + ), + LlmModel.COHERE_COMMAND_R_PLUS_08_2024: ModelMetadata( + "open_router", 128000, 4096, "Command R Plus 08.2024", "OpenRouter", "Cohere", 2 + ), + LlmModel.DEEPSEEK_CHAT: ModelMetadata( + "open_router", 64000, 2048, "DeepSeek Chat", "OpenRouter", "DeepSeek", 1 + ), + LlmModel.DEEPSEEK_R1_0528: ModelMetadata( + "open_router", 163840, 163840, "DeepSeek R1 0528", "OpenRouter", "DeepSeek", 1 + ), + LlmModel.PERPLEXITY_SONAR: ModelMetadata( + "open_router", 127000, 8000, "Sonar", "OpenRouter", "Perplexity", 1 + ), + LlmModel.PERPLEXITY_SONAR_PRO: ModelMetadata( + "open_router", 200000, 8000, "Sonar Pro", "OpenRouter", "Perplexity", 2 ), - LlmModel.GEMINI_2_0_FLASH_LITE: ModelMetadata("open_router", 1048576, 8192), - LlmModel.MISTRAL_NEMO: ModelMetadata("open_router", 128000, 4096), - LlmModel.COHERE_COMMAND_R_08_2024: ModelMetadata("open_router", 128000, 4096), - LlmModel.COHERE_COMMAND_R_PLUS_08_2024: ModelMetadata("open_router", 128000, 4096), - LlmModel.DEEPSEEK_CHAT: ModelMetadata("open_router", 64000, 2048), - LlmModel.DEEPSEEK_R1_0528: ModelMetadata("open_router", 163840, 163840), - LlmModel.PERPLEXITY_SONAR: ModelMetadata("open_router", 127000, 8000), - LlmModel.PERPLEXITY_SONAR_PRO: ModelMetadata("open_router", 200000, 8000), LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: ModelMetadata( "open_router", 128000, 16000, + "Sonar Deep Research", + "OpenRouter", + "Perplexity", + 3, ), LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B: ModelMetadata( - "open_router", 131000, 4096 + "open_router", + 131000, + 4096, + "Hermes 3 Llama 3.1 405B", + "OpenRouter", + "Nous Research", + 1, ), LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B: ModelMetadata( - "open_router", 12288, 12288 + "open_router", + 12288, + 12288, + "Hermes 3 Llama 3.1 70B", + "OpenRouter", + "Nous Research", + 1, + ), + LlmModel.OPENAI_GPT_OSS_120B: ModelMetadata( + "open_router", 131072, 131072, "GPT-OSS 120B", "OpenRouter", "OpenAI", 1 + ), + LlmModel.OPENAI_GPT_OSS_20B: ModelMetadata( + "open_router", 131072, 32768, "GPT-OSS 20B", "OpenRouter", "OpenAI", 1 + ), + LlmModel.AMAZON_NOVA_LITE_V1: ModelMetadata( + "open_router", 300000, 5120, "Nova Lite V1", "OpenRouter", "Amazon", 1 + ), + LlmModel.AMAZON_NOVA_MICRO_V1: ModelMetadata( + "open_router", 128000, 5120, "Nova Micro V1", "OpenRouter", "Amazon", 1 + ), + LlmModel.AMAZON_NOVA_PRO_V1: ModelMetadata( + "open_router", 300000, 5120, "Nova Pro V1", "OpenRouter", "Amazon", 1 + ), + LlmModel.MICROSOFT_WIZARDLM_2_8X22B: ModelMetadata( + "open_router", 65536, 4096, "WizardLM 2 8x22B", "OpenRouter", "Microsoft", 1 + ), + LlmModel.GRYPHE_MYTHOMAX_L2_13B: ModelMetadata( + "open_router", 4096, 4096, "MythoMax L2 13B", "OpenRouter", "Gryphe", 1 + ), + LlmModel.META_LLAMA_4_SCOUT: ModelMetadata( + "open_router", 131072, 131072, "Llama 4 Scout", "OpenRouter", "Meta", 1 + ), + LlmModel.META_LLAMA_4_MAVERICK: ModelMetadata( + "open_router", 1048576, 1000000, "Llama 4 Maverick", "OpenRouter", "Meta", 1 + ), + LlmModel.GROK_4: ModelMetadata( + "open_router", 256000, 256000, "Grok 4", "OpenRouter", "xAI", 3 + ), + LlmModel.GROK_4_FAST: ModelMetadata( + "open_router", 2000000, 30000, "Grok 4 Fast", "OpenRouter", "xAI", 1 + ), + LlmModel.GROK_4_1_FAST: ModelMetadata( + "open_router", 2000000, 30000, "Grok 4.1 Fast", "OpenRouter", "xAI", 1 + ), + LlmModel.GROK_CODE_FAST_1: ModelMetadata( + "open_router", 256000, 10000, "Grok Code Fast 1", "OpenRouter", "xAI", 1 + ), + LlmModel.KIMI_K2: ModelMetadata( + "open_router", 131000, 131000, "Kimi K2", "OpenRouter", "Moonshot AI", 1 + ), + LlmModel.QWEN3_235B_A22B_THINKING: ModelMetadata( + "open_router", + 262144, + 262144, + "Qwen 3 235B A22B Thinking 2507", + "OpenRouter", + "Qwen", + 1, + ), + LlmModel.QWEN3_CODER: ModelMetadata( + "open_router", 262144, 262144, "Qwen 3 Coder", "OpenRouter", "Qwen", 3 ), - LlmModel.OPENAI_GPT_OSS_120B: ModelMetadata("open_router", 131072, 131072), - LlmModel.OPENAI_GPT_OSS_20B: ModelMetadata("open_router", 131072, 32768), - LlmModel.AMAZON_NOVA_LITE_V1: ModelMetadata("open_router", 300000, 5120), - LlmModel.AMAZON_NOVA_MICRO_V1: ModelMetadata("open_router", 128000, 5120), - LlmModel.AMAZON_NOVA_PRO_V1: ModelMetadata("open_router", 300000, 5120), - LlmModel.MICROSOFT_WIZARDLM_2_8X22B: ModelMetadata("open_router", 65536, 4096), - LlmModel.GRYPHE_MYTHOMAX_L2_13B: ModelMetadata("open_router", 4096, 4096), - LlmModel.META_LLAMA_4_SCOUT: ModelMetadata("open_router", 131072, 131072), - LlmModel.META_LLAMA_4_MAVERICK: ModelMetadata("open_router", 1048576, 1000000), - LlmModel.GROK_4: ModelMetadata("open_router", 256000, 256000), - LlmModel.GROK_4_FAST: ModelMetadata("open_router", 2000000, 30000), - LlmModel.GROK_4_1_FAST: ModelMetadata("open_router", 2000000, 30000), - LlmModel.GROK_CODE_FAST_1: ModelMetadata("open_router", 256000, 10000), - LlmModel.KIMI_K2: ModelMetadata("open_router", 131000, 131000), - LlmModel.QWEN3_235B_A22B_THINKING: ModelMetadata("open_router", 262144, 262144), - LlmModel.QWEN3_CODER: ModelMetadata("open_router", 262144, 262144), # Llama API models - LlmModel.LLAMA_API_LLAMA_4_SCOUT: ModelMetadata("llama_api", 128000, 4028), - LlmModel.LLAMA_API_LLAMA4_MAVERICK: ModelMetadata("llama_api", 128000, 4028), - LlmModel.LLAMA_API_LLAMA3_3_8B: ModelMetadata("llama_api", 128000, 4028), - LlmModel.LLAMA_API_LLAMA3_3_70B: ModelMetadata("llama_api", 128000, 4028), + LlmModel.LLAMA_API_LLAMA_4_SCOUT: ModelMetadata( + "llama_api", + 128000, + 4028, + "Llama 4 Scout 17B 16E Instruct FP8", + "Llama API", + "Meta", + 1, + ), + LlmModel.LLAMA_API_LLAMA4_MAVERICK: ModelMetadata( + "llama_api", + 128000, + 4028, + "Llama 4 Maverick 17B 128E Instruct FP8", + "Llama API", + "Meta", + 1, + ), + LlmModel.LLAMA_API_LLAMA3_3_8B: ModelMetadata( + "llama_api", 128000, 4028, "Llama 3.3 8B Instruct", "Llama API", "Meta", 1 + ), + LlmModel.LLAMA_API_LLAMA3_3_70B: ModelMetadata( + "llama_api", 128000, 4028, "Llama 3.3 70B Instruct", "Llama API", "Meta", 1 + ), # v0 by Vercel models - LlmModel.V0_1_5_MD: ModelMetadata("v0", 128000, 64000), - LlmModel.V0_1_5_LG: ModelMetadata("v0", 512000, 64000), - LlmModel.V0_1_0_MD: ModelMetadata("v0", 128000, 64000), + LlmModel.V0_1_5_MD: ModelMetadata("v0", 128000, 64000, "v0 1.5 MD", "V0", "V0", 1), + LlmModel.V0_1_5_LG: ModelMetadata("v0", 512000, 64000, "v0 1.5 LG", "V0", "V0", 1), + LlmModel.V0_1_0_MD: ModelMetadata("v0", 128000, 64000, "v0 1.0 MD", "V0", "V0", 1), } DEFAULT_LLM_MODEL = LlmModel.GPT5_2 diff --git a/autogpt_platform/backend/backend/data/block_cost_config.py b/autogpt_platform/backend/backend/data/block_cost_config.py index 7f8ee97d52..1b54ae0942 100644 --- a/autogpt_platform/backend/backend/data/block_cost_config.py +++ b/autogpt_platform/backend/backend/data/block_cost_config.py @@ -99,10 +99,15 @@ MODEL_COST: dict[LlmModel, int] = { LlmModel.OPENAI_GPT_OSS_20B: 1, LlmModel.GEMINI_2_5_PRO: 4, LlmModel.GEMINI_3_PRO_PREVIEW: 5, + LlmModel.GEMINI_2_5_FLASH: 1, + LlmModel.GEMINI_2_0_FLASH: 1, + LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: 1, + LlmModel.GEMINI_2_0_FLASH_LITE: 1, LlmModel.MISTRAL_NEMO: 1, LlmModel.COHERE_COMMAND_R_08_2024: 1, LlmModel.COHERE_COMMAND_R_PLUS_08_2024: 3, LlmModel.DEEPSEEK_CHAT: 2, + LlmModel.DEEPSEEK_R1_0528: 1, LlmModel.PERPLEXITY_SONAR: 1, LlmModel.PERPLEXITY_SONAR_PRO: 5, LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: 10, @@ -126,11 +131,6 @@ MODEL_COST: dict[LlmModel, int] = { LlmModel.KIMI_K2: 1, LlmModel.QWEN3_235B_A22B_THINKING: 1, LlmModel.QWEN3_CODER: 9, - LlmModel.GEMINI_2_5_FLASH: 1, - LlmModel.GEMINI_2_0_FLASH: 1, - LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: 1, - LlmModel.GEMINI_2_0_FLASH_LITE: 1, - LlmModel.DEEPSEEK_R1_0528: 1, # v0 by Vercel models LlmModel.V0_1_5_MD: 1, LlmModel.V0_1_5_LG: 2, diff --git a/autogpt_platform/frontend/public/integrations/amazon.png b/autogpt_platform/frontend/public/integrations/amazon.png new file mode 100644 index 0000000000..4e8605abef Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/amazon.png differ diff --git a/autogpt_platform/frontend/public/integrations/anthropic-color.png b/autogpt_platform/frontend/public/integrations/anthropic-color.png new file mode 100644 index 0000000000..dfc85ecaa3 Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/anthropic-color.png differ diff --git a/autogpt_platform/frontend/public/integrations/cohere.png b/autogpt_platform/frontend/public/integrations/cohere.png new file mode 100644 index 0000000000..3da0b83737 Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/cohere.png differ diff --git a/autogpt_platform/frontend/public/integrations/deepseek.png b/autogpt_platform/frontend/public/integrations/deepseek.png new file mode 100644 index 0000000000..b11d0a9d86 Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/deepseek.png differ diff --git a/autogpt_platform/frontend/public/integrations/gemini.png b/autogpt_platform/frontend/public/integrations/gemini.png new file mode 100644 index 0000000000..cd78455bc5 Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/gemini.png differ diff --git a/autogpt_platform/frontend/public/integrations/gryphe.png b/autogpt_platform/frontend/public/integrations/gryphe.png new file mode 100644 index 0000000000..13d87e1d9d Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/gryphe.png differ diff --git a/autogpt_platform/frontend/public/integrations/microsoft.webp b/autogpt_platform/frontend/public/integrations/microsoft.webp new file mode 100644 index 0000000000..c416883b2a Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/microsoft.webp differ diff --git a/autogpt_platform/frontend/public/integrations/mistral.png b/autogpt_platform/frontend/public/integrations/mistral.png new file mode 100644 index 0000000000..a7169adcc8 Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/mistral.png differ diff --git a/autogpt_platform/frontend/public/integrations/moonshot.png b/autogpt_platform/frontend/public/integrations/moonshot.png new file mode 100644 index 0000000000..01c7d5694a Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/moonshot.png differ diff --git a/autogpt_platform/frontend/public/integrations/nousresearch.avif b/autogpt_platform/frontend/public/integrations/nousresearch.avif new file mode 100644 index 0000000000..e55a0626b6 Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/nousresearch.avif differ diff --git a/autogpt_platform/frontend/public/integrations/perplexity.webp b/autogpt_platform/frontend/public/integrations/perplexity.webp new file mode 100644 index 0000000000..29515cc066 Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/perplexity.webp differ diff --git a/autogpt_platform/frontend/public/integrations/qwen.png b/autogpt_platform/frontend/public/integrations/qwen.png new file mode 100644 index 0000000000..e6fba24e8d Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/qwen.png differ diff --git a/autogpt_platform/frontend/public/integrations/xai.webp b/autogpt_platform/frontend/public/integrations/xai.webp new file mode 100644 index 0000000000..fe6050b103 Binary files /dev/null and b/autogpt_platform/frontend/public/integrations/xai.webp differ diff --git a/autogpt_platform/frontend/src/components/molecules/Popover/Popover.tsx b/autogpt_platform/frontend/src/components/molecules/Popover/Popover.tsx new file mode 100644 index 0000000000..f6bb16d54a --- /dev/null +++ b/autogpt_platform/frontend/src/components/molecules/Popover/Popover.tsx @@ -0,0 +1,33 @@ +"use client"; + +import * as PopoverPrimitive from "@radix-ui/react-popover"; +import * as React from "react"; + +import { cn } from "@/lib/utils"; + +const Popover = PopoverPrimitive.Root; + +const PopoverTrigger = PopoverPrimitive.Trigger; + +const PopoverAnchor = PopoverPrimitive.Anchor; + +const PopoverContent = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, align = "center", sideOffset = 4, ...props }, ref) => ( + + + +)); +PopoverContent.displayName = PopoverPrimitive.Content.displayName; + +export { Popover, PopoverAnchor, PopoverContent, PopoverTrigger }; diff --git a/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/LlmModelField.tsx b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/LlmModelField.tsx new file mode 100644 index 0000000000..c2057fdad0 --- /dev/null +++ b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/LlmModelField.tsx @@ -0,0 +1,92 @@ +"use client"; + +import { + descriptionId, + FieldProps, + getTemplate, + RJSFSchema, + titleId, +} from "@rjsf/utils"; +import { useMemo } from "react"; +import { LlmModelPicker } from "./components/LlmModelPicker"; +import { LlmModelMetadataMap } from "./types"; +import { updateUiOption } from "../../helpers"; + +type LlmModelSchema = RJSFSchema & { + llm_model_metadata?: LlmModelMetadataMap; +}; + +export function LlmModelField(props: FieldProps) { + const { schema, formData, onChange, disabled, readonly, fieldPathId } = props; + + const metadata = useMemo(() => { + return (schema as LlmModelSchema)?.llm_model_metadata ?? {}; + }, [schema]); + + const models = useMemo(() => { + return Object.values(metadata); + }, [metadata]); + + const selectedName = + typeof formData === "string" + ? formData + : typeof schema.default === "string" + ? schema.default + : ""; + + const selectedModel = selectedName + ? (metadata[selectedName] ?? + models.find((model) => model.name === selectedName)) + : undefined; + + const recommendedName = + typeof schema.default === "string" ? schema.default : models[0]?.name; + + const recommendedModel = + recommendedName && metadata[recommendedName] + ? metadata[recommendedName] + : undefined; + + if (models.length === 0) { + return null; + } + + const TitleFieldTemplate = getTemplate("TitleFieldTemplate", props.registry); + const DescriptionFieldTemplate = getTemplate( + "DescriptionFieldTemplate", + props.registry, + ); + + const updatedUiSchema = updateUiOption(props.uiSchema, { + showHandles: false, + }); + + return ( + <> +
+ + +
+ + onChange(value, fieldPathId?.path)} + disabled={disabled || readonly} + /> + + ); +} diff --git a/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/components/LlmIcon.tsx b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/components/LlmIcon.tsx new file mode 100644 index 0000000000..cad91a3dde --- /dev/null +++ b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/components/LlmIcon.tsx @@ -0,0 +1,66 @@ +"use client"; + +import Image from "next/image"; +import { Text } from "@/components/atoms/Text/Text"; + +const creatorIconMap: Record = { + anthropic: "/integrations/anthropic-color.png", + openai: "/integrations/openai.png", + google: "/integrations/gemini.png", + nvidia: "/integrations/nvidia.png", + groq: "/integrations/groq.png", + ollama: "/integrations/ollama.png", + openrouter: "/integrations/open_router.png", + v0: "/integrations/v0.png", + xai: "/integrations/xai.webp", + meta: "/integrations/llama_api.png", + amazon: "/integrations/amazon.png", + cohere: "/integrations/cohere.png", + deepseek: "/integrations/deepseek.png", + gryphe: "/integrations/gryphe.png", + microsoft: "/integrations/microsoft.webp", + moonshotai: "/integrations/moonshot.png", + mistral: "/integrations/mistral.png", + mistralai: "/integrations/mistral.png", + nousresearch: "/integrations/nousresearch.avif", + perplexity: "/integrations/perplexity.webp", + qwen: "/integrations/qwen.png", +}; + +type Props = { + value: string; + size?: number; +}; + +export function LlmIcon({ value, size = 20 }: Props) { + const normalized = value.trim().toLowerCase().replace(/\s+/g, ""); + const src = creatorIconMap[normalized]; + if (src) { + return ( +
+ {value} +
+ ); + } + + const fallback = value?.trim().slice(0, 1).toUpperCase() || "?"; + return ( +
+ + {fallback} + +
+ ); +} diff --git a/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/components/LlmMenuHeader.tsx b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/components/LlmMenuHeader.tsx new file mode 100644 index 0000000000..40bce8dafc --- /dev/null +++ b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/components/LlmMenuHeader.tsx @@ -0,0 +1,24 @@ +"use client"; + +import { ArrowLeftIcon } from "@phosphor-icons/react"; +import { Text } from "@/components/atoms/Text/Text"; + +type Props = { + label: string; + onBack: () => void; +}; + +export function LlmMenuHeader({ label, onBack }: Props) { + return ( + + ); +} diff --git a/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/components/LlmMenuItem.tsx b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/components/LlmMenuItem.tsx new file mode 100644 index 0000000000..02f8eb968d --- /dev/null +++ b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/components/LlmMenuItem.tsx @@ -0,0 +1,61 @@ +"use client"; + +import { CaretRightIcon, CheckIcon } from "@phosphor-icons/react"; +import { Text } from "@/components/atoms/Text/Text"; +import { cn } from "@/lib/utils"; + +type Props = { + title: string; + subtitle?: string; + icon?: React.ReactNode; + showChevron?: boolean; + rightSlot?: React.ReactNode; + onClick: () => void; + isActive?: boolean; +}; + +export function LlmMenuItem({ + title, + subtitle, + icon, + showChevron, + rightSlot, + onClick, + isActive, +}: Props) { + const hasIcon = Boolean(icon); + + return ( + + ); +} diff --git a/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/components/LlmModelPicker.tsx b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/components/LlmModelPicker.tsx new file mode 100644 index 0000000000..c107120f01 --- /dev/null +++ b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/components/LlmModelPicker.tsx @@ -0,0 +1,235 @@ +"use client"; + +import { useCallback, useEffect, useMemo, useState } from "react"; +import { CaretDownIcon } from "@phosphor-icons/react"; +import { + Popover, + PopoverContent, + PopoverTrigger, +} from "@/components/molecules/Popover/Popover"; +import { Text } from "@/components/atoms/Text/Text"; +import { cn } from "@/lib/utils"; +import { + getCreatorDisplayName, + getModelDisplayName, + getProviderDisplayName, + groupByCreator, + groupByTitle, +} from "../helpers"; +import { LlmModelMetadata } from "../types"; +import { LlmIcon } from "./LlmIcon"; +import { LlmMenuHeader } from "./LlmMenuHeader"; +import { LlmMenuItem } from "./LlmMenuItem"; +import { LlmPriceTier } from "./LlmPriceTier"; + +type MenuView = "creator" | "model" | "provider"; + +type Props = { + models: LlmModelMetadata[]; + selectedModel?: LlmModelMetadata; + recommendedModel?: LlmModelMetadata; + onSelect: (value: string) => void; + disabled?: boolean; +}; + +export function LlmModelPicker({ + models, + selectedModel, + recommendedModel, + onSelect, + disabled, +}: Props) { + const [open, setOpen] = useState(false); + const [view, setView] = useState("creator"); + const [activeCreator, setActiveCreator] = useState(null); + const [activeTitle, setActiveTitle] = useState(null); + + const modelsByCreator = useMemo(() => groupByCreator(models), [models]); + + const creators = useMemo(() => { + return Array.from(modelsByCreator.keys()).sort((a, b) => + a.localeCompare(b), + ); + }, [modelsByCreator]); + + const creatorIconValues = useMemo(() => { + const map = new Map(); + for (const [creator, entries] of modelsByCreator.entries()) { + map.set(creator, entries[0]?.creator ?? creator); + } + return map; + }, [modelsByCreator]); + + useEffect(() => { + if (!open) { + return; + } + setView("creator"); + setActiveCreator( + selectedModel + ? getCreatorDisplayName(selectedModel) + : (creators[0] ?? null), + ); + setActiveTitle(selectedModel ? getModelDisplayName(selectedModel) : null); + }, [open, selectedModel, creators]); + + const currentCreator = activeCreator ?? creators[0] ?? null; + + const currentModels = useMemo(() => { + return currentCreator ? (modelsByCreator.get(currentCreator) ?? []) : []; + }, [currentCreator, modelsByCreator]); + + const currentCreatorIcon = useMemo(() => { + return currentModels[0]?.creator ?? currentCreator; + }, [currentModels, currentCreator]); + + const modelsByTitle = useMemo( + () => groupByTitle(currentModels), + [currentModels], + ); + + const modelEntries = useMemo(() => { + return Array.from(modelsByTitle.entries()) + .map(([title, entries]) => { + const providers = new Set(entries.map((entry) => entry.provider)); + return { + title, + entries, + providerCount: providers.size, + }; + }) + .sort((a, b) => a.title.localeCompare(b.title)); + }, [modelsByTitle]); + + const providerEntries = useMemo(() => { + if (!activeTitle) { + return []; + } + return modelsByTitle.get(activeTitle) ?? []; + }, [activeTitle, modelsByTitle]); + + const handleSelectModel = useCallback( + (modelName: string) => { + onSelect(modelName); + setOpen(false); + }, + [onSelect], + ); + + const triggerModel = selectedModel ?? recommendedModel ?? models[0]; + const triggerTitle = triggerModel + ? getModelDisplayName(triggerModel) + : "Select model"; + const triggerCreator = triggerModel?.creator ?? ""; + + return ( + + + + + + {view === "creator" && ( +
+ {recommendedModel && ( + <> + } + onClick={() => handleSelectModel(recommendedModel.name)} + /> +
+ + )} + {creators.map((creator) => ( + + } + showChevron={true} + isActive={ + selectedModel + ? getCreatorDisplayName(selectedModel) === creator + : false + } + onClick={() => { + setActiveCreator(creator); + setView("model"); + }} + /> + ))} +
+ )} + {view === "model" && currentCreator && ( +
+ setView("creator")} + /> +
+ {modelEntries.map((entry) => ( + } + rightSlot={} + showChevron={entry.providerCount > 1} + isActive={ + selectedModel + ? getModelDisplayName(selectedModel) === entry.title + : false + } + onClick={() => { + if (entry.providerCount > 1) { + setActiveTitle(entry.title); + setView("provider"); + return; + } + handleSelectModel(entry.entries[0].name); + }} + /> + ))} +
+ )} + {view === "provider" && activeTitle && ( +
+ setView("model")} + /> +
+ {providerEntries.map((entry) => ( + } + isActive={selectedModel?.provider === entry.provider} + onClick={() => handleSelectModel(entry.name)} + /> + ))} +
+ )} + + + ); +} diff --git a/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/components/LlmPriceTier.tsx b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/components/LlmPriceTier.tsx new file mode 100644 index 0000000000..2913c5dff0 --- /dev/null +++ b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/components/LlmPriceTier.tsx @@ -0,0 +1,25 @@ +"use client"; + +import { CurrencyDollarSimpleIcon } from "@phosphor-icons/react"; + +type Props = { + tier?: number; +}; + +export function LlmPriceTier({ tier }: Props) { + if (!tier || tier <= 0) { + return null; + } + const clamped = Math.min(3, Math.max(1, tier)); + return ( +
+ {Array.from({ length: clamped }).map((_, index) => ( + + ))} +
+ ); +} diff --git a/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/helpers.ts b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/helpers.ts new file mode 100644 index 0000000000..bb8619c25f --- /dev/null +++ b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/helpers.ts @@ -0,0 +1,35 @@ +import { LlmModelMetadata } from "./types"; + +export function groupByCreator(models: LlmModelMetadata[]) { + const map = new Map(); + for (const model of models) { + const key = getCreatorDisplayName(model); + const existing = map.get(key) ?? []; + existing.push(model); + map.set(key, existing); + } + return map; +} + +export function groupByTitle(models: LlmModelMetadata[]) { + const map = new Map(); + for (const model of models) { + const displayName = getModelDisplayName(model); + const existing = map.get(displayName) ?? []; + existing.push(model); + map.set(displayName, existing); + } + return map; +} + +export function getCreatorDisplayName(model: LlmModelMetadata): string { + return model.creator_name || model.creator || ""; +} + +export function getModelDisplayName(model: LlmModelMetadata): string { + return model.title || model.name || ""; +} + +export function getProviderDisplayName(model: LlmModelMetadata): string { + return model.provider_name || model.provider || ""; +} diff --git a/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/types.ts b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/types.ts new file mode 100644 index 0000000000..39389b2535 --- /dev/null +++ b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/LlmModelField/types.ts @@ -0,0 +1,11 @@ +export type LlmModelMetadata = { + creator: string; + creator_name: string; + title: string; + provider: string; + provider_name: string; + name: string; + price_tier?: number; +}; + +export type LlmModelMetadataMap = Record; diff --git a/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/custom-registry.ts b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/custom-registry.ts index 257a98ad93..74000ec60e 100644 --- a/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/custom-registry.ts +++ b/autogpt_platform/frontend/src/components/renderers/InputRenderer/custom/custom-registry.ts @@ -8,6 +8,7 @@ import { isMultiSelectSchema, } from "../utils/schema-utils"; import { TableField } from "./TableField/TableField"; +import { LlmModelField } from "./LlmModelField/LlmModelField"; export interface CustomFieldDefinition { id: string; @@ -57,6 +58,15 @@ export const CUSTOM_FIELDS: CustomFieldDefinition[] = [ }, component: TableField, }, + { + id: "custom/llm_model_field", + matcher: (schema: any) => { + return ( + typeof schema === "object" && schema !== null && "llm_model" in schema + ); + }, + component: LlmModelField, + }, ]; export function findCustomFieldId(schema: any): string | null {