From 1d653973e9be82f47fdeebe949f0c37852c5f2dd Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Mon, 12 Feb 2024 13:19:37 +0100 Subject: [PATCH] feat(agent/llm): Use new OpenAI models as default `SMART_LLM`, `FAST_LLM`, and `EMBEDDING_MODEL` - Change default `SMART_LLM` from `gpt-4` to `gpt-4-turbo-preview` - Change default `FAST_LLM` from `gpt-3.5-turbo-16k` to `gpt-3.5-turbo-0125` - Change default `EMBEDDING_MODEL` from `text-embedding-ada-002` to `text-embedding-3-small` - Update .env.template, azure.yaml.template, and documentation accordingly --- autogpts/autogpt/.env.template | 10 +++++----- autogpts/autogpt/autogpt/config/config.py | 6 +++--- autogpts/autogpt/azure.yaml.template | 6 +++--- docs/content/AutoGPT/configuration/options.md | 6 +++--- docs/content/AutoGPT/setup/docker.md | 7 ++----- 5 files changed, 16 insertions(+), 19 deletions(-) diff --git a/autogpts/autogpt/.env.template b/autogpts/autogpt/.env.template index 9ec564a5df..30fd40c69f 100644 --- a/autogpts/autogpt/.env.template +++ b/autogpts/autogpt/.env.template @@ -86,14 +86,14 @@ OPENAI_API_KEY=your-openai-api-key ### LLM MODELS ################################################################################ -## SMART_LLM - Smart language model (Default: gpt-4-0314) -# SMART_LLM=gpt-4-0314 +## SMART_LLM - Smart language model (Default: gpt-4-turbo-preview) +# SMART_LLM=gpt-4-turbo-preview -## FAST_LLM - Fast language model (Default: gpt-3.5-turbo-16k) -# FAST_LLM=gpt-3.5-turbo-16k +## FAST_LLM - Fast language model (Default: gpt-3.5-turbo-0125) +# FAST_LLM=gpt-3.5-turbo-0125 ## EMBEDDING_MODEL - Model to use for creating embeddings -# EMBEDDING_MODEL=text-embedding-ada-002 +# EMBEDDING_MODEL=text-embedding-3-small ################################################################################ ### SHELL EXECUTION diff --git a/autogpts/autogpt/autogpt/config/config.py b/autogpts/autogpt/autogpt/config/config.py index ff0053a760..9d726d8701 100644 --- a/autogpts/autogpt/autogpt/config/config.py +++ b/autogpts/autogpt/autogpt/config/config.py @@ -80,11 +80,11 @@ class Config(SystemSettings, arbitrary_types_allowed=True): # Model configuration fast_llm: str = UserConfigurable( - default="gpt-3.5-turbo-16k", + default="gpt-3.5-turbo-0125", from_env=lambda: os.getenv("FAST_LLM"), ) smart_llm: str = UserConfigurable( - default="gpt-4", + default="gpt-4-turbo-preview", from_env=lambda: os.getenv("SMART_LLM"), ) temperature: float = UserConfigurable( @@ -95,7 +95,7 @@ class Config(SystemSettings, arbitrary_types_allowed=True): default=False, from_env=lambda: os.getenv("OPENAI_FUNCTIONS", "False") == "True" ) embedding_model: str = UserConfigurable( - default="text-embedding-ada-002", from_env="EMBEDDING_MODEL" + default="text-embedding-3-small", from_env="EMBEDDING_MODEL" ) browse_spacy_language_model: str = UserConfigurable( default="en_core_web_sm", from_env="BROWSE_SPACY_LANGUAGE_MODEL" diff --git a/autogpts/autogpt/azure.yaml.template b/autogpts/autogpt/azure.yaml.template index d9cc2ca8e4..d05a2c3f74 100644 --- a/autogpts/autogpt/azure.yaml.template +++ b/autogpts/autogpt/azure.yaml.template @@ -2,6 +2,6 @@ azure_api_type: azure azure_api_version: api-version-for-azure azure_endpoint: your-azure-openai-endpoint azure_model_map: - gpt-3.5-turbo: gpt35-deployment-id-for-azure - gpt-4: gpt4-deployment-id-for-azure - text-embedding-ada-002: embedding-deployment-id-for-azure + gpt-3.5-turbo-0125: gpt35-deployment-id-for-azure + gpt-4-turbo-preview: gpt4-deployment-id-for-azure + text-embedding-3-small: embedding-deployment-id-for-azure diff --git a/docs/content/AutoGPT/configuration/options.md b/docs/content/AutoGPT/configuration/options.md index 12df23c33f..dd0291bfa3 100644 --- a/docs/content/AutoGPT/configuration/options.md +++ b/docs/content/AutoGPT/configuration/options.md @@ -14,10 +14,10 @@ Configuration is controlled through the `Config` object. You can set configurati - `DISABLED_COMMAND_CATEGORIES`: Command categories to disable. Command categories are Python module names, e.g. autogpt.commands.execute_code. See the directory `autogpt/commands` in the source for all command modules. Default: None - `ELEVENLABS_API_KEY`: ElevenLabs API Key. Optional. - `ELEVENLABS_VOICE_ID`: ElevenLabs Voice ID. Optional. -- `EMBEDDING_MODEL`: LLM Model to use for embedding tasks. Default: text-embedding-ada-002 +- `EMBEDDING_MODEL`: LLM Model to use for embedding tasks. Default: `text-embedding-3-small` - `EXECUTE_LOCAL_COMMANDS`: If shell commands should be executed locally. Default: False - `EXIT_KEY`: Exit key accepted to exit. Default: n -- `FAST_LLM`: LLM Model to use for most tasks. Default: gpt-3.5-turbo +- `FAST_LLM`: LLM Model to use for most tasks. Default: `gpt-3.5-turbo-0125` - `GITHUB_API_KEY`: [Github API Key](https://github.com/settings/tokens). Optional. - `GITHUB_USERNAME`: GitHub Username. Optional. - `GOOGLE_API_KEY`: Google API key. Optional. @@ -44,7 +44,7 @@ Configuration is controlled through the `Config` object. You can set configurati - `SHELL_ALLOWLIST`: List of shell commands that ARE allowed to be executed by AutoGPT. Only applies if `SHELL_COMMAND_CONTROL` is set to `allowlist`. Default: None - `SHELL_COMMAND_CONTROL`: Whether to use `allowlist` or `denylist` to determine what shell commands can be executed (Default: denylist) - `SHELL_DENYLIST`: List of shell commands that ARE NOT allowed to be executed by AutoGPT. Only applies if `SHELL_COMMAND_CONTROL` is set to `denylist`. Default: sudo,su -- `SMART_LLM`: LLM Model to use for "smart" tasks. Default: gpt-4 +- `SMART_LLM`: LLM Model to use for "smart" tasks. Default: `gpt-4-turbo-preview` - `STREAMELEMENTS_VOICE`: StreamElements voice to use. Default: Brian - `TEMPERATURE`: Value of temperature given to OpenAI. Value from 0 to 2. Lower is more deterministic, higher is more random. See https://platform.openai.com/docs/api-reference/completions/create#completions/create-temperature - `TEXT_TO_SPEECH_PROVIDER`: Text to Speech Provider. Options are `gtts`, `macos`, `elevenlabs`, and `streamelements`. Default: gtts diff --git a/docs/content/AutoGPT/setup/docker.md b/docs/content/AutoGPT/setup/docker.md index 35bd7c2c1e..af4794a250 100644 --- a/docs/content/AutoGPT/setup/docker.md +++ b/docs/content/AutoGPT/setup/docker.md @@ -124,10 +124,7 @@ found in the [repository]. If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and make an Azure configuration file: - - Rename `azure.yaml.template` to `azure.yaml` and provide the relevant `azure_api_base`, `azure_api_version` and all the deployment IDs for the relevant models in the `azure_model_map` section: - - `fast_llm_deployment_id`: your gpt-3.5-turbo or gpt-4 deployment ID - - `smart_llm_deployment_id`: your gpt-4 deployment ID - - `embedding_model_deployment_id`: your text-embedding-ada-002 v2 deployment ID + - Rename `azure.yaml.template` to `azure.yaml` and provide the relevant `azure_api_base`, `azure_api_version` and all the deployment IDs for the relevant models in the `azure_model_map` section. Example: @@ -135,7 +132,7 @@ found in the [repository]. # Please specify all of these values as double-quoted strings # Replace string in angled brackets (<>) to your own deployment Name azure_model_map: - fast_llm_deployment_id: "" + gpt-4-turbo-preview: "" ... ```