From 435cbfed2100459de4c8296c739bf70d919604ae Mon Sep 17 00:00:00 2001 From: Alex O'Connell Date: Sun, 21 Apr 2024 23:47:55 -0400 Subject: [PATCH] add llama 3 to list of supported models --- custom_components/llama_conversation/translations/en.json | 2 +- docs/Model Prompting.md | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/custom_components/llama_conversation/translations/en.json b/custom_components/llama_conversation/translations/en.json index ef1c78d..e58074a 100644 --- a/custom_components/llama_conversation/translations/en.json +++ b/custom_components/llama_conversation/translations/en.json @@ -20,7 +20,7 @@ "downloaded_model_quantization": "Downloaded model quantization", "huggingface_model": "HuggingFace Model" }, - "description": "Please select a model to use.\n\n**Models supported out of the box:**\n1. [Home LLM](https://huggingface.co/collections/acon96/home-llm-6618762669211da33bb22c5a): Home 3B & Home 1B\n2. Mistral: [Mistral 7B](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) or [Mixtral 8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1)", + "description": "Please select a model to use.\n\n**Models supported out of the box:**\n1. [Home LLM](https://huggingface.co/collections/acon96/home-llm-6618762669211da33bb22c5a): Home 3B & Home 1B\n2. Mistral: [Mistral 7B](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) or [Mixtral 8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1)\nLlama 3: [8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) and [70B](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct)", "title": "Select Model" }, "remote_model": { diff --git a/docs/Model Prompting.md b/docs/Model Prompting.md index ee1d500..b084cef 100644 --- a/docs/Model Prompting.md +++ b/docs/Model Prompting.md @@ -59,7 +59,9 @@ Currently supported prompt formats are: 2. Vicuna 3. Alpaca 4. Mistral -5. None (useful for foundation models) +5. Zephyr +6. Llama 3 +7. None (useful for foundation models) ## Prompting other models with In Context Learning It is possible to use models that are not fine-tuned with the dataset via the usage of In Context Learning (ICL) examples. These examples condition the model to output the correct JSON schema without any fine-tuning of the model.