From a6432e6ce48ee16017c7e3f6fb2f8273c9a21cc6 Mon Sep 17 00:00:00 2001 From: Ding3LI Date: Sat, 15 Apr 2023 16:26:42 -0500 Subject: [PATCH] [template] env template: added clarification, optional usages --- .env.template | 5 ++++- README.md | 8 ++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/.env.template b/.env.template index 6565dfdc5d..a030a9390f 100644 --- a/.env.template +++ b/.env.template @@ -50,7 +50,10 @@ SMART_TOKEN_LIMIT=8000 ### MEMORY ################################################################################ -# MEMORY_BACKEND - Memory backend type (Default: local) +### MEMORY_BACKEND - Memory backend type +# local - Default +# pinecone - Pinecone (if configured) +# redis - Redis (if configured) MEMORY_BACKEND=local ### PINECONE diff --git a/README.md b/README.md index f2ad74e58d..16dcbc5f72 100644 --- a/README.md +++ b/README.md @@ -124,8 +124,8 @@ pip install -r requirements.txt - `smart_llm_model_deployment_id` - your gpt-4 deployment ID - `embedding_model_deployment_id` - your text-embedding-ada-002 v2 deployment ID - Please specify all of these values as double-quoted strings - > Replace string in angled brackets (<>) to your own ID ```yaml + # Replace string in angled brackets (<>) to your own ID azure_model_map: fast_llm_model_deployment_id: "" ... @@ -323,9 +323,9 @@ By default, Auto-GPT is going to use LocalCache instead of redis or Pinecone. To switch to either, change the `MEMORY_BACKEND` env variable to the value that you want: -`local` (default) uses a local JSON cache file -`pinecone` uses the Pinecone.io account you configured in your ENV settings -`redis` will use the redis cache that you configured +* `local` (default) uses a local JSON cache file +* `pinecone` uses the Pinecone.io account you configured in your ENV settings +* `redis` will use the redis cache that you configured ## View Memory Usage