Files
gpt-pilot/pilot/.env.example
2024-04-07 18:31:15 -07:00

48 lines
1.3 KiB
Plaintext

# OPENAI or AZURE or OPENROUTER (ignored for Anthropic)
ENDPOINT=OPENAI
# OPENAI_ENDPOINT=https://api.openai.com/v1/chat/completions
OPENAI_ENDPOINT=
OPENAI_API_KEY=
AZURE_API_KEY=
AZURE_ENDPOINT=
OPENROUTER_API_KEY=
# Set this to use Anthropic API directly
# If using via OpenRouter, OPENROUTER_API_KEY should be set instead
ANTHROPIC_API_KEY=
# You only need to set this if not using Anthropic API directly (eg. via proxy or AWS Bedrock)
# ANTHROPIC_ENDPOINT=
# In case of Azure/OpenRouter endpoint, change this to your deployed model name
MODEL_NAME=gpt-4-turbo-preview
# In case of Anthropic, use "anthropic/" + the model name, example for Claude 3 Opus
# MODEL_NAME=anthropic/claude-3-opus-20240229
MAX_TOKENS=8192
# Folders which shouldn't be tracked in workspace (useful to ignore folders created by compiler)
# IGNORE_PATHS=folder1,folder2
# Database
# DATABASE_TYPE=postgres
DB_NAME=gpt-pilot
DB_HOST=
DB_PORT=
DB_USER=
DB_PASSWORD=
# USE_GPTPILOT_FOLDER=true
# Load database imported from another location/system - EXPERIMENTAL
# AUTOFIX_FILE_PATHS=false
# Set extra buffer to wait on top of detected retry time when rate limmit is hit. defaults to 6
# RATE_LIMIT_EXTRA_BUFFER=
# Only send task-relevant files to the LLM. Enabled by default; uncomment and set this to "false" to disable.
# FILTER_RELEVANT_FILES=true