diff --git a/scripts/browse.py b/scripts/browse.py index 0fda3d7b06..70f754e0b6 100644 --- a/scripts/browse.py +++ b/scripts/browse.py @@ -5,14 +5,17 @@ from llm_utils import create_chat_completion cfg = Config() -def scrape_text(url): +def get_website_content(url): response = requests.get(url, headers=cfg.user_agent_header) - # Check if the response contains an HTTP error if response.status_code >= 400: return "Error: HTTP " + str(response.status_code) + " error" + return response - soup = BeautifulSoup(response.text, "html.parser") + + +def scrape_text(website_content): + soup = BeautifulSoup(website_content.text, "html.parser") for script in soup(["script", "style"]): script.extract() @@ -39,14 +42,8 @@ def format_hyperlinks(hyperlinks): return formatted_links -def scrape_links(url): - response = requests.get(url, headers=cfg.user_agent_header) - - # Check if the response contains an HTTP error - if response.status_code >= 400: - return "error" - - soup = BeautifulSoup(response.text, "html.parser") +def scrape_links(website_content): + soup = BeautifulSoup(website_content.text, "html.parser") for script in soup(["script", "style"]): script.extract() diff --git a/scripts/commands.py b/scripts/commands.py index ba5383957a..4f94cebbbe 100644 --- a/scripts/commands.py +++ b/scripts/commands.py @@ -163,8 +163,9 @@ def google_official_search(query, num_results=8): return search_results_links def browse_website(url, question): - summary = get_text_summary(url, question) - links = get_hyperlinks(url) + website_content = browse.get_website_content(url) + summary = get_text_summary(website_content, question) + links = get_hyperlinks(website_content) # Limit links to 5 if len(links) > 5: @@ -175,14 +176,14 @@ def browse_website(url, question): return result -def get_text_summary(url, question): - text = browse.scrape_text(url) +def get_text_summary(website_content, question): + text = browse.scrape_text(website_content) summary = browse.summarize_text(text, question) return """ "Result" : """ + summary -def get_hyperlinks(url): - link_list = browse.scrape_links(url) +def get_hyperlinks(website_content): + link_list = browse.scrape_links(website_content) return link_list