mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-04-08 03:00:28 -04:00
feat(backend): add support for v0 by Vercel models and credentials (#10641)
## Summary This PR adds support for v0 by Vercel's Model API to the AutoGPT platform, enabling users to leverage v0's framework-aware AI models optimized for React and Next.js code generation. v0 provides OpenAI-compatible endpoints with models specifically trained for frontend development, making them ideal for generating UI components and web applications. ### Changes 🏗️ #### Backend Changes - **Added v0 Provider**: Added `V0 = "v0"` to `ProviderName` enum in `/backend/backend/integrations/providers.py` - **Added v0 Models**: Added three v0 models to `LlmModel` enum in `/backend/backend/blocks/llm.py`: - `V0_1_5_MD = "v0-1.5-md"` - Everyday tasks and UI generation (128K context, 64K output) - `V0_1_5_LG = "v0-1.5-lg"` - Advanced reasoning (512K context, 64K output) - `V0_1_0_MD = "v0-1.0-md"` - Legacy model (128K context, 64K output) - **Implemented v0 Provider**: Added v0 support in `llm_call()` function using OpenAI-compatible client with base URL `https://api.v0.dev/v1` - **Added Credentials Support**: Created `v0_credentials` in `/backend/backend/integrations/credentials_store.py` with UUID `c4e6d1a0-3b5f-4789-a8e2-9b123456789f` - **Cost Configuration**: Added model costs in `/backend/backend/data/block_cost_config.py`: - v0-1.5-md: 1 credit - v0-1.5-lg: 2 credits - v0-1.0-md: 1 credit #### Configuration Changes - **Settings**: Added `v0_api_key` field to `Secrets` class in `/backend/backend/util/settings.py` - **Environment Variables**: Added `V0_API_KEY=` to `/backend/.env.default` ### Features - ✅ Full OpenAI-compatible API support - ✅ Tool/function calling support - ✅ JSON response format support - ✅ Framework-aware completions optimized for React/Next.js - ✅ Large context windows (up to 512K tokens) - ✅ Integrated with platform credit system ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: <!-- Put your test plan here: --> - [x] Run existing block tests to ensure no regressions: `poetry run pytest backend/blocks/test/test_block.py` - [x] Verify AITextGeneratorBlock works with v0 models - [x] Confirm all model metadata is correctly configured - [x] Validate cost configuration is properly set up - [x] Check that v0_credentials has a valid UUID4 #### For configuration changes: - [x] `.env.example` is updated or already compatible with my changes - Added `V0_API_KEY=` to `/backend/.env.default` - [x] `docker-compose.yml` is updated or already compatible with my changes - No changes needed - uses existing environment variable patterns - [x] I have included a list of my configuration changes in the PR description (under **Changes**) ### Configuration Requirements Users need to: 1. Obtain a v0 API key from [v0.app](https://v0.app) (requires Premium or Team plan) 2. Add `V0_API_KEY=your-api-key` to their `.env` file ### API Documentation - v0 API Docs: https://v0.app/docs/api - Model API Docs: https://v0.app/docs/api/model ### Testing All existing tests pass with the new v0 integration: ```bash poetry run pytest backend/blocks/test/test_block.py::test_available_blocks -k "AITextGeneratorBlock" -xvs # Result: PASSED ```
This commit is contained in:
@@ -55,6 +55,7 @@ ANTHROPIC_API_KEY=
|
||||
GROQ_API_KEY=
|
||||
LLAMA_API_KEY=
|
||||
AIML_API_KEY=
|
||||
V0_API_KEY=
|
||||
OPEN_ROUTER_API_KEY=
|
||||
NVIDIA_API_KEY=
|
||||
|
||||
|
||||
@@ -37,6 +37,7 @@ LLMProviderName = Literal[
|
||||
ProviderName.OPENAI,
|
||||
ProviderName.OPEN_ROUTER,
|
||||
ProviderName.LLAMA_API,
|
||||
ProviderName.V0,
|
||||
]
|
||||
AICredentials = CredentialsMetaInput[LLMProviderName, Literal["api_key"]]
|
||||
|
||||
@@ -155,6 +156,10 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
LLAMA_API_LLAMA4_MAVERICK = "Llama-4-Maverick-17B-128E-Instruct-FP8"
|
||||
LLAMA_API_LLAMA3_3_8B = "Llama-3.3-8B-Instruct"
|
||||
LLAMA_API_LLAMA3_3_70B = "Llama-3.3-70B-Instruct"
|
||||
# v0 by Vercel models
|
||||
V0_1_5_MD = "v0-1.5-md"
|
||||
V0_1_5_LG = "v0-1.5-lg"
|
||||
V0_1_0_MD = "v0-1.0-md"
|
||||
|
||||
@property
|
||||
def metadata(self) -> ModelMetadata:
|
||||
@@ -280,6 +285,10 @@ MODEL_METADATA = {
|
||||
LlmModel.LLAMA_API_LLAMA4_MAVERICK: ModelMetadata("llama_api", 128000, 4028),
|
||||
LlmModel.LLAMA_API_LLAMA3_3_8B: ModelMetadata("llama_api", 128000, 4028),
|
||||
LlmModel.LLAMA_API_LLAMA3_3_70B: ModelMetadata("llama_api", 128000, 4028),
|
||||
# v0 by Vercel models
|
||||
LlmModel.V0_1_5_MD: ModelMetadata("v0", 128000, 64000),
|
||||
LlmModel.V0_1_5_LG: ModelMetadata("v0", 512000, 64000),
|
||||
LlmModel.V0_1_0_MD: ModelMetadata("v0", 128000, 64000),
|
||||
}
|
||||
|
||||
for model in LlmModel:
|
||||
@@ -700,6 +709,42 @@ async def llm_call(
|
||||
),
|
||||
reasoning=None,
|
||||
)
|
||||
elif provider == "v0":
|
||||
tools_param = tools if tools else openai.NOT_GIVEN
|
||||
client = openai.AsyncOpenAI(
|
||||
base_url="https://api.v0.dev/v1",
|
||||
api_key=credentials.api_key.get_secret_value(),
|
||||
)
|
||||
|
||||
response_format = None
|
||||
if json_format:
|
||||
response_format = {"type": "json_object"}
|
||||
|
||||
parallel_tool_calls_param = get_parallel_tool_calls_param(
|
||||
llm_model, parallel_tool_calls
|
||||
)
|
||||
|
||||
response = await client.chat.completions.create(
|
||||
model=llm_model.value,
|
||||
messages=prompt, # type: ignore
|
||||
response_format=response_format, # type: ignore
|
||||
max_tokens=max_tokens,
|
||||
tools=tools_param, # type: ignore
|
||||
parallel_tool_calls=parallel_tool_calls_param,
|
||||
)
|
||||
|
||||
tool_calls = extract_openai_tool_calls(response)
|
||||
reasoning = extract_openai_reasoning(response)
|
||||
|
||||
return LLMResponse(
|
||||
raw_response=response.choices[0].message,
|
||||
prompt=prompt,
|
||||
response=response.choices[0].message.content or "",
|
||||
tool_calls=tool_calls,
|
||||
prompt_tokens=response.usage.prompt_tokens if response.usage else 0,
|
||||
completion_tokens=response.usage.completion_tokens if response.usage else 0,
|
||||
reasoning=reasoning,
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unsupported LLM provider: {provider}")
|
||||
|
||||
|
||||
@@ -46,6 +46,7 @@ from backend.integrations.credentials_store import (
|
||||
replicate_credentials,
|
||||
revid_credentials,
|
||||
unreal_credentials,
|
||||
v0_credentials,
|
||||
)
|
||||
|
||||
# =============== Configure the cost for each LLM Model call =============== #
|
||||
@@ -122,6 +123,10 @@ MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: 1,
|
||||
LlmModel.GEMINI_2_0_FLASH_LITE: 1,
|
||||
LlmModel.DEEPSEEK_R1_0528: 1,
|
||||
# v0 by Vercel models
|
||||
LlmModel.V0_1_5_MD: 1,
|
||||
LlmModel.V0_1_5_LG: 2,
|
||||
LlmModel.V0_1_0_MD: 1,
|
||||
}
|
||||
|
||||
for model in LlmModel:
|
||||
@@ -211,6 +216,23 @@ LLM_COST = (
|
||||
for model, cost in MODEL_COST.items()
|
||||
if MODEL_METADATA[model].provider == "llama_api"
|
||||
]
|
||||
# v0 by Vercel Models
|
||||
+ [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"credentials": {
|
||||
"id": v0_credentials.id,
|
||||
"provider": v0_credentials.provider,
|
||||
"type": v0_credentials.type,
|
||||
},
|
||||
},
|
||||
cost_amount=cost,
|
||||
)
|
||||
for model, cost in MODEL_COST.items()
|
||||
if MODEL_METADATA[model].provider == "v0"
|
||||
]
|
||||
# AI/ML Api Models
|
||||
+ [
|
||||
BlockCost(
|
||||
|
||||
@@ -199,6 +199,14 @@ llama_api_credentials = APIKeyCredentials(
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
v0_credentials = APIKeyCredentials(
|
||||
id="c4e6d1a0-3b5f-4789-a8e2-9b123456789f",
|
||||
provider="v0",
|
||||
api_key=SecretStr(settings.secrets.v0_api_key),
|
||||
title="Use Credits for v0 by Vercel",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
DEFAULT_CREDENTIALS = [
|
||||
ollama_credentials,
|
||||
revid_credentials,
|
||||
@@ -223,6 +231,8 @@ DEFAULT_CREDENTIALS = [
|
||||
smartlead_credentials,
|
||||
zerobounce_credentials,
|
||||
google_maps_credentials,
|
||||
llama_api_credentials,
|
||||
v0_credentials,
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -48,6 +48,7 @@ class ProviderName(str, Enum):
|
||||
TWITTER = "twitter"
|
||||
TODOIST = "todoist"
|
||||
UNREAL_SPEECH = "unreal_speech"
|
||||
V0 = "v0"
|
||||
ZEROBOUNCE = "zerobounce"
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -472,6 +472,7 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
|
||||
groq_api_key: str = Field(default="", description="Groq API key")
|
||||
open_router_api_key: str = Field(default="", description="Open Router API Key")
|
||||
llama_api_key: str = Field(default="", description="Llama API Key")
|
||||
v0_api_key: str = Field(default="", description="v0 by Vercel API key")
|
||||
|
||||
reddit_client_id: str = Field(default="", description="Reddit client ID")
|
||||
reddit_client_secret: str = Field(default="", description="Reddit client secret")
|
||||
|
||||
Reference in New Issue
Block a user