From 33c8fe627a58429942e8219b5a79d264ee680ae0 Mon Sep 17 00:00:00 2001 From: pratiksinghchauhan Date: Mon, 10 Apr 2023 11:39:52 +0530 Subject: [PATCH 1/3] improve performance and removed code duplication --- scripts/browse.py | 19 ++++++++----------- scripts/commands.py | 13 +++++++------ 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/scripts/browse.py b/scripts/browse.py index 0fda3d7b06..70f754e0b6 100644 --- a/scripts/browse.py +++ b/scripts/browse.py @@ -5,14 +5,17 @@ from llm_utils import create_chat_completion cfg = Config() -def scrape_text(url): +def get_website_content(url): response = requests.get(url, headers=cfg.user_agent_header) - # Check if the response contains an HTTP error if response.status_code >= 400: return "Error: HTTP " + str(response.status_code) + " error" + return response - soup = BeautifulSoup(response.text, "html.parser") + + +def scrape_text(website_content): + soup = BeautifulSoup(website_content.text, "html.parser") for script in soup(["script", "style"]): script.extract() @@ -39,14 +42,8 @@ def format_hyperlinks(hyperlinks): return formatted_links -def scrape_links(url): - response = requests.get(url, headers=cfg.user_agent_header) - - # Check if the response contains an HTTP error - if response.status_code >= 400: - return "error" - - soup = BeautifulSoup(response.text, "html.parser") +def scrape_links(website_content): + soup = BeautifulSoup(website_content.text, "html.parser") for script in soup(["script", "style"]): script.extract() diff --git a/scripts/commands.py b/scripts/commands.py index ba5383957a..4f94cebbbe 100644 --- a/scripts/commands.py +++ b/scripts/commands.py @@ -163,8 +163,9 @@ def google_official_search(query, num_results=8): return search_results_links def browse_website(url, question): - summary = get_text_summary(url, question) - links = get_hyperlinks(url) + website_content = browse.get_website_content(url) + summary = get_text_summary(website_content, question) + links = get_hyperlinks(website_content) # Limit links to 5 if len(links) > 5: @@ -175,14 +176,14 @@ def browse_website(url, question): return result -def get_text_summary(url, question): - text = browse.scrape_text(url) +def get_text_summary(website_content, question): + text = browse.scrape_text(website_content) summary = browse.summarize_text(text, question) return """ "Result" : """ + summary -def get_hyperlinks(url): - link_list = browse.scrape_links(url) +def get_hyperlinks(website_content): + link_list = browse.scrape_links(website_content) return link_list From 13467259b4722b45cba098a225609f8e09bbef3f Mon Sep 17 00:00:00 2001 From: pratiksinghchauhan Date: Mon, 10 Apr 2023 12:07:37 +0530 Subject: [PATCH 2/3] fix: #323 Error communicating with OpenAI --- .env.template | 6 +++--- README.md | 2 +- scripts/config.py | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.env.template b/.env.template index 525cd61c5f..8dcdc37ccb 100644 --- a/.env.template +++ b/.env.template @@ -7,8 +7,8 @@ FAST_LLM_MODEL="gpt-3.5-turbo" GOOGLE_API_KEY= CUSTOM_SEARCH_ENGINE_ID= USE_AZURE=False -OPENAI_API_BASE=your-base-url-for-azure -OPENAI_API_VERSION=api-version-for-azure -OPENAI_DEPLOYMENT_ID=deployment-id-for-azure +OPENAI_AZURE_API_BASE=your-base-url-for-azure +OPENAI_AZURE_API_VERSION=api-version-for-azure +OPENAI_AZURE_DEPLOYMENT_ID=deployment-id-for-azure IMAGE_PROVIDER=dalle HUGGINGFACE_API_TOKEN= \ No newline at end of file diff --git a/README.md b/README.md index ba80818d0e..f581274c5c 100644 --- a/README.md +++ b/README.md @@ -93,7 +93,7 @@ pip install -r requirements.txt 4. Rename `.env.template` to `.env` and fill in your `OPENAI_API_KEY`. If you plan to use Speech Mode, fill in your `ELEVEN_LABS_API_KEY` as well. - Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys. - Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website. - - If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and provide the `OPENAI_API_BASE`, `OPENAI_API_VERSION` and `OPENAI_DEPLOYMENT_ID` values as explained here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section + - If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and provide the `OPENAI_AZURE_API_BASE`, `OPENAI_AZURE_API_VERSION` and `OPENAI_AZURE_DEPLOYMENT_ID` values as explained here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section ## 🔧 Usage diff --git a/scripts/config.py b/scripts/config.py index 4d7adec1c0..0e8ea3203b 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -44,9 +44,9 @@ class Config(metaclass=Singleton): self.use_azure = False self.use_azure = os.getenv("USE_AZURE") == 'True' if self.use_azure: - self.openai_api_base = os.getenv("OPENAI_API_BASE") - self.openai_api_version = os.getenv("OPENAI_API_VERSION") - self.openai_deployment_id = os.getenv("OPENAI_DEPLOYMENT_ID") + self.openai_api_base = os.getenv("OPENAI_AZURE_API_BASE") + self.openai_api_version = os.getenv("OPENAI_AZURE_API_VERSION") + self.openai_deployment_id = os.getenv("OPENAI_AZURE_DEPLOYMENT_ID") openai.api_type = "azure" openai.api_base = self.openai_api_base openai.api_version = self.openai_api_version From 156739788aa471304fc5c6eaf9254cd4619fa459 Mon Sep 17 00:00:00 2001 From: pratiksinghchauhan Date: Mon, 10 Apr 2023 12:31:37 +0530 Subject: [PATCH 3/3] removed un necessary changes --- scripts/browse.py | 28 +++++++++++++++++++--------- scripts/commands.py | 15 ++++++++------- 2 files changed, 27 insertions(+), 16 deletions(-) diff --git a/scripts/browse.py b/scripts/browse.py index 70f754e0b6..7eeaaf4d94 100644 --- a/scripts/browse.py +++ b/scripts/browse.py @@ -5,17 +5,21 @@ from llm_utils import create_chat_completion cfg = Config() -def get_website_content(url): - response = requests.get(url, headers=cfg.user_agent_header) +def scrape_text(url): + # Most basic check if the URL is valid: + if not url.startswith('http'): + return "Error: Invalid URL" + + try: + response = requests.get(url, headers=cfg.user_agent_header) + except requests.exceptions.RequestException as e: + return "Error: " + str(e) + # Check if the response contains an HTTP error if response.status_code >= 400: return "Error: HTTP " + str(response.status_code) + " error" - return response - - -def scrape_text(website_content): - soup = BeautifulSoup(website_content.text, "html.parser") + soup = BeautifulSoup(response.text, "html.parser") for script in soup(["script", "style"]): script.extract() @@ -42,8 +46,14 @@ def format_hyperlinks(hyperlinks): return formatted_links -def scrape_links(website_content): - soup = BeautifulSoup(website_content.text, "html.parser") +def scrape_links(url): + response = requests.get(url, headers=cfg.user_agent_header) + + # Check if the response contains an HTTP error + if response.status_code >= 400: + return "error" + + soup = BeautifulSoup(response.text, "html.parser") for script in soup(["script", "style"]): script.extract() diff --git a/scripts/commands.py b/scripts/commands.py index 4f94cebbbe..76139b5c62 100644 --- a/scripts/commands.py +++ b/scripts/commands.py @@ -106,6 +106,8 @@ def execute_command(command_name, arguments): return execute_python_file(arguments["file"]) elif command_name == "generate_image": return generate_image(arguments["prompt"]) + elif command_name == "do_nothing": + return "No action performed." elif command_name == "task_complete": shutdown() else: @@ -163,9 +165,8 @@ def google_official_search(query, num_results=8): return search_results_links def browse_website(url, question): - website_content = browse.get_website_content(url) - summary = get_text_summary(website_content, question) - links = get_hyperlinks(website_content) + summary = get_text_summary(url, question) + links = get_hyperlinks(url) # Limit links to 5 if len(links) > 5: @@ -176,14 +177,14 @@ def browse_website(url, question): return result -def get_text_summary(website_content, question): - text = browse.scrape_text(website_content) +def get_text_summary(url, question): + text = browse.scrape_text(url) summary = browse.summarize_text(text, question) return """ "Result" : """ + summary -def get_hyperlinks(website_content): - link_list = browse.scrape_links(website_content) +def get_hyperlinks(url): + link_list = browse.scrape_links(url) return link_list