mirror of
https://github.com/OS-Copilot/OS-Copilot.git
synced 2026-01-10 04:28:00 -05:00
renamed LLAMA to OLLAMA to make it more generic and added the option for a local embeddings via ollama
This commit is contained in:
@@ -14,7 +14,9 @@ WOLFRAMALPHA_APP_ID=""
|
||||
# ollama run llama3
|
||||
# export NO_PROXY=localhost,127.0.0.1
|
||||
# MODEL_NAME="llama3"
|
||||
# MODEL_TYPE="LLAMA" # if use the gpt series (GPT), lamma series (LLAMA)
|
||||
# MODEL_TYPE="OLLAMA"
|
||||
# EMBED_MODEL_TYPE="OLLAMA"
|
||||
# EMBED_MODEL_NAME="nomic-embed-text"
|
||||
# MODEL_SERVER="http://localhost:11434" # only for local model
|
||||
# test script
|
||||
# python test_llama3.py
|
||||
|
||||
@@ -35,8 +35,10 @@ python examples/LLAMA3/test_llama3.py
|
||||
|
||||
```
|
||||
MODEL_NAME="llama3"
|
||||
MODEL_TYPE="LLAMA"
|
||||
MODEL_TYPE="OLLAMA"
|
||||
MODEL_SERVER="http://localhost:11434"
|
||||
EMBED_MODEL_TYPE="OLLAMA"
|
||||
EMBED_MODEL_NAME="nomic-embed-text"
|
||||
```
|
||||
|
||||
If the api cannot be linked in the script, run the following command to resolve the problem
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import re
|
||||
import json
|
||||
import os
|
||||
from oscopilot.utils.llms import OpenAI,LLAMA
|
||||
from oscopilot.utils.llms import OpenAI, OLLAMA
|
||||
# from oscopilot.environments.py_env import PythonEnv
|
||||
# from oscopilot.environments.py_jupyter_env import PythonJupyterEnv
|
||||
from oscopilot.environments import Env
|
||||
@@ -18,8 +18,8 @@ class BaseModule:
|
||||
"""
|
||||
if MODEL_TYPE == "OpenAI":
|
||||
self.llm = OpenAI()
|
||||
elif MODEL_TYPE == "LLAMA":
|
||||
self.llm = LLAMA()
|
||||
elif MODEL_TYPE == "OLLAMA":
|
||||
self.llm = OLLAMA()
|
||||
# self.environment = PythonEnv()
|
||||
# self.environment = PythonJupyterEnv()
|
||||
self.environment = Env()
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
|
||||
from langchain.vectorstores import Chroma
|
||||
from langchain.embeddings.openai import OpenAIEmbeddings
|
||||
from langchain_community.embeddings import OllamaEmbeddings
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
@@ -14,6 +15,8 @@ load_dotenv(dotenv_path='.env', override=True)
|
||||
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
|
||||
OPENAI_ORGANIZATION = os.getenv('OPENAI_ORGANIZATION')
|
||||
|
||||
EMBED_MODEL_TYPE = os.getenv('EMBED_MODEL_TYPE')
|
||||
EMBED_MODEL_NAME = os.getenv('EMBED_MODEL_NAME')
|
||||
|
||||
class ToolManager:
|
||||
"""
|
||||
@@ -60,12 +63,18 @@ class ToolManager:
|
||||
os.makedirs(f"{generated_tool_repo_dir}/tool_code", exist_ok=True)
|
||||
os.makedirs(f"{generated_tool_repo_dir}/tool_description", exist_ok=True)
|
||||
# Utilize the Chroma database and employ OpenAI Embeddings for vectorization (default: text-embedding-ada-002)
|
||||
self.vectordb = Chroma(
|
||||
collection_name="tool_vectordb",
|
||||
embedding_function=OpenAIEmbeddings(
|
||||
|
||||
if EMBED_MODEL_TYPE == "OpenAI":
|
||||
embedding_function = OpenAIEmbeddings(
|
||||
openai_api_key=OPENAI_API_KEY,
|
||||
openai_organization=OPENAI_ORGANIZATION,
|
||||
),
|
||||
)
|
||||
elif EMBED_MODEL_TYPE == "OLLAMA":
|
||||
embedding_function = OllamaEmbeddings(model=EMBED_MODEL_NAME)
|
||||
|
||||
self.vectordb = Chroma(
|
||||
collection_name="tool_vectordb",
|
||||
embedding_function=embedding_function,
|
||||
persist_directory=self.vectordb_path,
|
||||
)
|
||||
assert self.vectordb._collection.count() == len(self.generated_tools), (
|
||||
|
||||
@@ -67,7 +67,7 @@ class OpenAI:
|
||||
return response.choices[0].message.content
|
||||
|
||||
|
||||
class LLAMA:
|
||||
class OLLAMA:
|
||||
"""
|
||||
A class for interacting with the OpenAI API, allowing for chat completion requests.
|
||||
|
||||
@@ -135,7 +135,7 @@ def main():
|
||||
# message.append({"role": "user", "content": 'hello'})
|
||||
# print(OPENAI_API_KEY)
|
||||
# print(BASE_URL)
|
||||
llm = LLAMA()
|
||||
llm = OLLAMA()
|
||||
response = llm.chat(messages)
|
||||
print(response)
|
||||
end_time = time.time()
|
||||
|
||||
Reference in New Issue
Block a user