diff --git a/.env.template b/.env.template index 3a86d1c049..d589a681ae 100644 --- a/.env.template +++ b/.env.template @@ -7,10 +7,6 @@ FAST_LLM_MODEL=gpt-3.5-turbo GOOGLE_API_KEY= CUSTOM_SEARCH_ENGINE_ID= USE_AZURE=False -OPENAI_AZURE_API_BASE=your-base-url-for-azure -OPENAI_AZURE_API_VERSION=api-version-for-azure -OPENAI_AZURE_DEPLOYMENT_ID=deployment-id-for-azure -OPENAI_AZURE_EMBEDDING_DEPLOYMENT_ID=embedding-deployment-id-for-azure IMAGE_PROVIDER=dalle HUGGINGFACE_API_TOKEN= USE_MAC_OS_TTS=False diff --git a/README.md b/README.md index a888d3330d..d94d7ae3b8 100644 --- a/README.md +++ b/README.md @@ -93,7 +93,13 @@ pip install -r requirements.txt 4. Rename `.env.template` to `.env` and fill in your `OPENAI_API_KEY`. If you plan to use Speech Mode, fill in your `ELEVEN_LABS_API_KEY` as well. - Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys. - Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website. - - If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and provide the `OPENAI_AZURE_API_BASE`, `OPENAI_AZURE_API_VERSION` and `OPENAI_AZURE_DEPLOYMENT_ID` values as explained here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section. In addition to your GPT deployment, you will need to deploy a `text-embedding-ada-002 (Version 2)` model which will have a different deployment id, please set `OPENAI_AZURE_EMBEDDING_DEPLOYMENT_ID` accordingly (see here: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/tutorials/embeddings?tabs=command-line). + - If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and then: + - Rename `azure.yaml.template` to `azure.yaml` and provide the relevant `azure_api_base`, `azure_api_version` and all of the deployment ids for the relevant models in the `azure_model_map` section: + - `fast_llm_model_deployment_id` - your gpt-3.5-turbo or gpt-4 deployment id + - `smart_llm_model_deployment_id` - your gpt-4 deployment id + - `embedding_model_deployment_id` - your text-embedding-ada-002 v2 deployment id + - Please specify all of these values as double quoted strings + - details can be found here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section and here: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/tutorials/embeddings?tabs=command-line for the embedding model. ## 🔧 Usage diff --git a/ai_settings.yaml b/ai_settings.yaml index b37ba849f9..be512bdffc 100644 --- a/ai_settings.yaml +++ b/ai_settings.yaml @@ -1,7 +1,9 @@ ai_goals: -- Increase net worth. -- Develop and manage multiple businesses autonomously. -- Play to your strengths as a Large Language Model. -ai_name: Entrepreneur-GPT -ai_role: an AI designed to autonomously develop and run businesses with the sole goal - of increasing your net worth. +- Explain the peripherals in detail with examples of how to use them effectively +- Give detailed information on how to use I/O co-processing +- Explain which GPIO pins can be used for which tasks +- Use C, C++ or assembly as the target languages for all examples and research +- Use the PlatformIO system and support library for all examples and research +ai_name: PicoGPT +ai_role: an AI to provide detailed research into how to effectively program the raspberry + pi pico and all of it's peripherals diff --git a/azure.yaml.template b/azure.yaml.template new file mode 100644 index 0000000000..852645ca0d --- /dev/null +++ b/azure.yaml.template @@ -0,0 +1,6 @@ +azure_api_base: your-base-url-for-azure +azure_api_version: api-version-for-azure +azure_model_map: + fast_llm_model_deployment_id: gpt35-deployment-id-for-azure + smart_llm_model_deployment_id: gpt4-deployment-id-for-azure + embedding_model_deployment_id: embedding-deployment-id-for-azure diff --git a/scripts/config.py b/scripts/config.py index 045aa02128..fe8130c1d1 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -1,6 +1,7 @@ import abc import os import openai +import yaml from dotenv import load_dotenv # Load environment variables from .env file load_dotenv() @@ -46,10 +47,7 @@ class Config(metaclass=Singleton): self.use_azure = False self.use_azure = os.getenv("USE_AZURE") == 'True' if self.use_azure: - self.openai_api_base = os.getenv("OPENAI_AZURE_API_BASE") - self.openai_api_version = os.getenv("OPENAI_AZURE_API_VERSION") - self.openai_deployment_id = os.getenv("OPENAI_AZURE_DEPLOYMENT_ID") - self.openai_embedding_deployment_id = os.getenv("OPENAI_AZURE_EMBEDDING_DEPLOYMENT_ID") + self.load_azure_config() openai.api_type = "azure" openai.api_base = self.openai_api_base openai.api_version = self.openai_api_version @@ -82,6 +80,47 @@ class Config(metaclass=Singleton): # Initialize the OpenAI API client openai.api_key = self.openai_api_key + def get_azure_deployment_id_for_model(self, model: str) -> str: + """ + Returns the relevant deployment id for the model specified. + + Parameters: + model(str): The model to map to the deployment id. + + Returns: + The matching deployment id if found, otherwise an empty string. + """ + match model: + case self.fast_llm_model: + return self.azure_model_to_deployment_id_map["fast_llm_model_deployment_id"] + case self.smart_llm_model: + return self.azure_model_to_deployment_id_map["smart_llm_model_deployment_id"] + case "text-embedding-ada-002": + return self.azure_model_to_deployment_id_map["embedding_model_deployment_id"] + case default: + return "" + + AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), '..', 'azure.yaml') + + def load_azure_config(self, config_file: str=AZURE_CONFIG_FILE) -> None: + """ + Loads the configuration parameters for Azure hosting from the specified file path as a yaml file. + + Parameters: + config_file(str): The path to the config yaml file. DEFAULT: "../azure.yaml" + + Returns: + None + """ + try: + with open(config_file) as file: + config_params = yaml.load(file, Loader=yaml.FullLoader) + except FileNotFoundError: + config_params = {} + self.openai_api_base = config_params.get("azure_api_base", "") + self.openai_api_version = config_params.get("azure_api_version", "") + self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", []) + def set_continuous_mode(self, value: bool): """Set the continuous mode value.""" self.continuous_mode = value diff --git a/scripts/llm_utils.py b/scripts/llm_utils.py index 94ba5f1316..c49aeb0958 100644 --- a/scripts/llm_utils.py +++ b/scripts/llm_utils.py @@ -9,7 +9,7 @@ def create_chat_completion(messages, model=None, temperature=None, max_tokens=No """Create a chat completion using the OpenAI API""" if cfg.use_azure: response = openai.ChatCompletion.create( - deployment_id=cfg.openai_deployment_id, + deployment_id=cfg.get_azure_deployment_id_for_model(model), model=model, messages=messages, temperature=temperature, diff --git a/scripts/memory/base.py b/scripts/memory/base.py index 30372851b8..9df0091eb0 100644 --- a/scripts/memory/base.py +++ b/scripts/memory/base.py @@ -9,7 +9,7 @@ cfg = Config() def get_ada_embedding(text): text = text.replace("\n", " ") if cfg.use_azure: - return openai.Embedding.create(input=[text], engine=cfg.openai_embedding_deployment_id)["data"][0]["embedding"] + return openai.Embedding.create(input=[text], engine=cfg.get_azure_deployment_id_for_model("text-embedding-ada-002"))["data"][0]["embedding"] else: return openai.Embedding.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"]