mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-03 19:35:15 -05:00
Merge branch 'master' of https://github.com/Significant-Gravitas/Auto-GPT into pr/87
This commit is contained in:
0
tests/__init__.py
Normal file
0
tests/__init__.py
Normal file
6
tests/context.py
Normal file
6
tests/context.py
Normal file
@@ -0,0 +1,6 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.insert(
|
||||
0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../scripts"))
|
||||
)
|
||||
49
tests/integration/memory_tests.py
Normal file
49
tests/integration/memory_tests.py
Normal file
@@ -0,0 +1,49 @@
|
||||
import random
|
||||
import string
|
||||
import sys
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.memory.local import LocalCache
|
||||
|
||||
|
||||
class TestLocalCache(unittest.TestCase):
|
||||
def random_string(self, length):
|
||||
return "".join(random.choice(string.ascii_letters) for _ in range(length))
|
||||
|
||||
def setUp(self):
|
||||
cfg = cfg = Config()
|
||||
self.cache = LocalCache(cfg)
|
||||
self.cache.clear()
|
||||
|
||||
# Add example texts to the cache
|
||||
self.example_texts = [
|
||||
"The quick brown fox jumps over the lazy dog",
|
||||
"I love machine learning and natural language processing",
|
||||
"The cake is a lie, but the pie is always true",
|
||||
"ChatGPT is an advanced AI model for conversation",
|
||||
]
|
||||
|
||||
for text in self.example_texts:
|
||||
self.cache.add(text)
|
||||
|
||||
# Add some random strings to test noise
|
||||
for _ in range(5):
|
||||
self.cache.add(self.random_string(10))
|
||||
|
||||
def test_get_relevant(self):
|
||||
query = "I'm interested in artificial intelligence and NLP"
|
||||
k = 3
|
||||
relevant_texts = self.cache.get_relevant(query, k)
|
||||
|
||||
print(f"Top {k} relevant texts for the query '{query}':")
|
||||
for i, text in enumerate(relevant_texts, start=1):
|
||||
print(f"{i}. {text}")
|
||||
|
||||
self.assertEqual(len(relevant_texts), k)
|
||||
self.assertIn(self.example_texts[1], relevant_texts)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
48
tests/integration/milvus_memory_tests.py
Normal file
48
tests/integration/milvus_memory_tests.py
Normal file
@@ -0,0 +1,48 @@
|
||||
import random
|
||||
import string
|
||||
import unittest
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.memory.milvus import MilvusMemory
|
||||
|
||||
|
||||
class TestMilvusMemory(unittest.TestCase):
|
||||
def random_string(self, length):
|
||||
return "".join(random.choice(string.ascii_letters) for _ in range(length))
|
||||
|
||||
def setUp(self):
|
||||
cfg = Config()
|
||||
cfg.milvus_addr = "localhost:19530"
|
||||
self.memory = MilvusMemory(cfg)
|
||||
self.memory.clear()
|
||||
|
||||
# Add example texts to the cache
|
||||
self.example_texts = [
|
||||
"The quick brown fox jumps over the lazy dog",
|
||||
"I love machine learning and natural language processing",
|
||||
"The cake is a lie, but the pie is always true",
|
||||
"ChatGPT is an advanced AI model for conversation",
|
||||
]
|
||||
|
||||
for text in self.example_texts:
|
||||
self.memory.add(text)
|
||||
|
||||
# Add some random strings to test noise
|
||||
for _ in range(5):
|
||||
self.memory.add(self.random_string(10))
|
||||
|
||||
def test_get_relevant(self):
|
||||
query = "I'm interested in artificial intelligence and NLP"
|
||||
k = 3
|
||||
relevant_texts = self.memory.get_relevant(query, k)
|
||||
|
||||
print(f"Top {k} relevant texts for the query '{query}':")
|
||||
for i, text in enumerate(relevant_texts, start=1):
|
||||
print(f"{i}. {text}")
|
||||
|
||||
self.assertEqual(len(relevant_texts), k)
|
||||
self.assertIn(self.example_texts[1], relevant_texts)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
56
tests/local_cache_test.py
Normal file
56
tests/local_cache_test.py
Normal file
@@ -0,0 +1,56 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
from autogpt.memory.local import LocalCache
|
||||
|
||||
|
||||
def MockConfig():
|
||||
return type(
|
||||
"MockConfig",
|
||||
(object,),
|
||||
{
|
||||
"debug_mode": False,
|
||||
"continuous_mode": False,
|
||||
"speak_mode": False,
|
||||
"memory_index": "auto-gpt",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
class TestLocalCache(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.cfg = MockConfig()
|
||||
self.cache = LocalCache(self.cfg)
|
||||
|
||||
def test_add(self):
|
||||
text = "Sample text"
|
||||
self.cache.add(text)
|
||||
self.assertIn(text, self.cache.data.texts)
|
||||
|
||||
def test_clear(self):
|
||||
self.cache.clear()
|
||||
self.assertEqual(self.cache.data, [""])
|
||||
|
||||
def test_get(self):
|
||||
text = "Sample text"
|
||||
self.cache.add(text)
|
||||
result = self.cache.get(text)
|
||||
self.assertEqual(result, [text])
|
||||
|
||||
def test_get_relevant(self):
|
||||
text1 = "Sample text 1"
|
||||
text2 = "Sample text 2"
|
||||
self.cache.add(text1)
|
||||
self.cache.add(text2)
|
||||
result = self.cache.get_relevant(text1, 1)
|
||||
self.assertEqual(result, [text1])
|
||||
|
||||
def test_get_stats(self):
|
||||
text = "Sample text"
|
||||
self.cache.add(text)
|
||||
stats = self.cache.get_stats()
|
||||
self.assertEqual(stats, (1, self.cache.data.embeddings.shape))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
63
tests/milvus_memory_test.py
Normal file
63
tests/milvus_memory_test.py
Normal file
@@ -0,0 +1,63 @@
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
from autogpt.memory.milvus import MilvusMemory
|
||||
|
||||
|
||||
def MockConfig():
|
||||
return type(
|
||||
"MockConfig",
|
||||
(object,),
|
||||
{
|
||||
"debug_mode": False,
|
||||
"continuous_mode": False,
|
||||
"speak_mode": False,
|
||||
"milvus_collection": "autogpt",
|
||||
"milvus_addr": "localhost:19530",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
class TestMilvusMemory(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.cfg = MockConfig()
|
||||
self.memory = MilvusMemory(self.cfg)
|
||||
|
||||
def test_add(self):
|
||||
text = "Sample text"
|
||||
self.memory.clear()
|
||||
self.memory.add(text)
|
||||
result = self.memory.get(text)
|
||||
self.assertEqual([text], result)
|
||||
|
||||
def test_clear(self):
|
||||
self.memory.clear()
|
||||
self.assertEqual(self.memory.collection.num_entities, 0)
|
||||
|
||||
def test_get(self):
|
||||
text = "Sample text"
|
||||
self.memory.clear()
|
||||
self.memory.add(text)
|
||||
result = self.memory.get(text)
|
||||
self.assertEqual(result, [text])
|
||||
|
||||
def test_get_relevant(self):
|
||||
text1 = "Sample text 1"
|
||||
text2 = "Sample text 2"
|
||||
self.memory.clear()
|
||||
self.memory.add(text1)
|
||||
self.memory.add(text2)
|
||||
result = self.memory.get_relevant(text1, 1)
|
||||
self.assertEqual(result, [text1])
|
||||
|
||||
def test_get_stats(self):
|
||||
text = "Sample text"
|
||||
self.memory.clear()
|
||||
self.memory.add(text)
|
||||
stats = self.memory.get_stats()
|
||||
self.assertEqual(15, len(stats))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
63
tests/smoke_test.py
Normal file
63
tests/smoke_test.py
Normal file
@@ -0,0 +1,63 @@
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
from autogpt.file_operations import delete_file, read_file
|
||||
|
||||
env_vars = {"MEMORY_BACKEND": "no_memory", "TEMPERATURE": "0"}
|
||||
|
||||
|
||||
class TestCommands(unittest.TestCase):
|
||||
def test_write_file(self):
|
||||
# Test case to check if the write_file command can successfully write 'Hello World' to a file
|
||||
# named 'hello_world.txt'.
|
||||
|
||||
# Read the current ai_settings.yaml file and store its content.
|
||||
ai_settings = None
|
||||
if os.path.exists("ai_settings.yaml"):
|
||||
with open("ai_settings.yaml", "r") as f:
|
||||
ai_settings = f.read()
|
||||
os.remove("ai_settings.yaml")
|
||||
|
||||
try:
|
||||
if os.path.exists("hello_world.txt"):
|
||||
# Clean up any existing 'hello_world.txt' file before testing.
|
||||
delete_file("hello_world.txt")
|
||||
# Prepare input data for the test.
|
||||
input_data = """write_file-GPT
|
||||
an AI designed to use the write_file command to write 'Hello World' into a file named "hello_world.txt" and then use the task_complete command to complete the task.
|
||||
Use the write_file command to write 'Hello World' into a file named "hello_world.txt".
|
||||
Use the task_complete command to complete the task.
|
||||
Do not use any other commands.
|
||||
|
||||
y -5
|
||||
EOF"""
|
||||
command = f"{sys.executable} -m autogpt"
|
||||
|
||||
# Execute the script with the input data.
|
||||
process = subprocess.Popen(
|
||||
command,
|
||||
stdin=subprocess.PIPE,
|
||||
shell=True,
|
||||
env={**os.environ, **env_vars},
|
||||
)
|
||||
process.communicate(input_data.encode())
|
||||
|
||||
# Read the content of the 'hello_world.txt' file created during the test.
|
||||
content = read_file("hello_world.txt")
|
||||
finally:
|
||||
if ai_settings:
|
||||
# Restore the original ai_settings.yaml file.
|
||||
with open("ai_settings.yaml", "w") as f:
|
||||
f.write(ai_settings)
|
||||
|
||||
# Check if the content of the 'hello_world.txt' file is equal to 'Hello World'.
|
||||
self.assertEqual(
|
||||
content, "Hello World", f"Expected 'Hello World', got {content}"
|
||||
)
|
||||
|
||||
|
||||
# Run the test case.
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
84
tests/test_config.py
Normal file
84
tests/test_config.py
Normal file
@@ -0,0 +1,84 @@
|
||||
from unittest import TestCase
|
||||
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
class TestConfig(TestCase):
|
||||
"""
|
||||
Test cases for the Config class, which handles the configuration settings
|
||||
for the AI and ensures it behaves as a singleton.
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
"""
|
||||
Set up the test environment by creating an instance of the Config class.
|
||||
"""
|
||||
self.config = Config()
|
||||
|
||||
def test_singleton(self):
|
||||
"""
|
||||
Test if the Config class behaves as a singleton by ensuring that two instances are the same.
|
||||
"""
|
||||
config2 = Config()
|
||||
self.assertIs(self.config, config2)
|
||||
|
||||
def test_initial_values(self):
|
||||
"""
|
||||
Test if the initial values of the Config class attributes are set correctly.
|
||||
"""
|
||||
self.assertFalse(self.config.debug_mode)
|
||||
self.assertFalse(self.config.continuous_mode)
|
||||
self.assertFalse(self.config.speak_mode)
|
||||
self.assertEqual(self.config.fast_llm_model, "gpt-3.5-turbo")
|
||||
self.assertEqual(self.config.smart_llm_model, "gpt-4")
|
||||
self.assertEqual(self.config.fast_token_limit, 4000)
|
||||
self.assertEqual(self.config.smart_token_limit, 8000)
|
||||
|
||||
def test_set_continuous_mode(self):
|
||||
"""
|
||||
Test if the set_continuous_mode() method updates the continuous_mode attribute.
|
||||
"""
|
||||
self.config.set_continuous_mode(True)
|
||||
self.assertTrue(self.config.continuous_mode)
|
||||
|
||||
def test_set_speak_mode(self):
|
||||
"""
|
||||
Test if the set_speak_mode() method updates the speak_mode attribute.
|
||||
"""
|
||||
self.config.set_speak_mode(True)
|
||||
self.assertTrue(self.config.speak_mode)
|
||||
|
||||
def test_set_fast_llm_model(self):
|
||||
"""
|
||||
Test if the set_fast_llm_model() method updates the fast_llm_model attribute.
|
||||
"""
|
||||
self.config.set_fast_llm_model("gpt-3.5-turbo-test")
|
||||
self.assertEqual(self.config.fast_llm_model, "gpt-3.5-turbo-test")
|
||||
|
||||
def test_set_smart_llm_model(self):
|
||||
"""
|
||||
Test if the set_smart_llm_model() method updates the smart_llm_model attribute.
|
||||
"""
|
||||
self.config.set_smart_llm_model("gpt-4-test")
|
||||
self.assertEqual(self.config.smart_llm_model, "gpt-4-test")
|
||||
|
||||
def test_set_fast_token_limit(self):
|
||||
"""
|
||||
Test if the set_fast_token_limit() method updates the fast_token_limit attribute.
|
||||
"""
|
||||
self.config.set_fast_token_limit(5000)
|
||||
self.assertEqual(self.config.fast_token_limit, 5000)
|
||||
|
||||
def test_set_smart_token_limit(self):
|
||||
"""
|
||||
Test if the set_smart_token_limit() method updates the smart_token_limit attribute.
|
||||
"""
|
||||
self.config.set_smart_token_limit(9000)
|
||||
self.assertEqual(self.config.smart_token_limit, 9000)
|
||||
|
||||
def test_set_debug_mode(self):
|
||||
"""
|
||||
Test if the set_debug_mode() method updates the debug_mode attribute.
|
||||
"""
|
||||
self.config.set_debug_mode(True)
|
||||
self.assertTrue(self.config.debug_mode)
|
||||
@@ -1,9 +1,8 @@
|
||||
import unittest
|
||||
import os
|
||||
import sys
|
||||
# Probably a better way:
|
||||
sys.path.append(os.path.abspath('../scripts'))
|
||||
from json_parser import fix_and_parse_json
|
||||
|
||||
import tests.context
|
||||
from autogpt.json_fixes.parsing import fix_and_parse_json
|
||||
|
||||
|
||||
class TestParseJson(unittest.TestCase):
|
||||
def test_valid_json(self):
|
||||
@@ -11,16 +10,18 @@ class TestParseJson(unittest.TestCase):
|
||||
json_str = '{"name": "John", "age": 30, "city": "New York"}'
|
||||
obj = fix_and_parse_json(json_str)
|
||||
self.assertEqual(obj, {"name": "John", "age": 30, "city": "New York"})
|
||||
|
||||
|
||||
def test_invalid_json_minor(self):
|
||||
# Test that an invalid JSON string can be fixed with gpt
|
||||
json_str = '{"name": "John", "age": 30, "city": "New York",}'
|
||||
self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), {"name": "John", "age": 30, "city": "New York"})
|
||||
|
||||
with self.assertRaises(Exception):
|
||||
fix_and_parse_json(json_str, try_to_fix_with_gpt=False)
|
||||
|
||||
def test_invalid_json_major_with_gpt(self):
|
||||
# Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False
|
||||
json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END'
|
||||
self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=True), {"name": "John", "age": 30, "city": "New York"})
|
||||
with self.assertRaises(Exception):
|
||||
fix_and_parse_json(json_str, try_to_fix_with_gpt=False)
|
||||
|
||||
def test_invalid_json_major_without_gpt(self):
|
||||
# Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
|
||||
@@ -50,25 +51,22 @@ class TestParseJson(unittest.TestCase):
|
||||
}
|
||||
}"""
|
||||
good_obj = {
|
||||
"command": {
|
||||
"name": "browse_website",
|
||||
"args":{
|
||||
"url": "https://github.com/Torantulino/Auto-GPT"
|
||||
}
|
||||
},
|
||||
"thoughts":
|
||||
{
|
||||
"text": "I suggest we start browsing the repository to find any issues that we can fix.",
|
||||
"reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.",
|
||||
"plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes",
|
||||
"criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.",
|
||||
"speak": "I will start browsing the repository to find any issues we can fix."
|
||||
}
|
||||
}
|
||||
"command": {
|
||||
"name": "browse_website",
|
||||
"args": {"url": "https://github.com/Torantulino/Auto-GPT"},
|
||||
},
|
||||
"thoughts": {
|
||||
"text": "I suggest we start browsing the repository to find any issues that we can fix.",
|
||||
"reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.",
|
||||
"plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes",
|
||||
"criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.",
|
||||
"speak": "I will start browsing the repository to find any issues we can fix.",
|
||||
},
|
||||
}
|
||||
# Assert that this raises an exception:
|
||||
self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj)
|
||||
|
||||
|
||||
self.assertEqual(
|
||||
fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj
|
||||
)
|
||||
|
||||
def test_invalid_json_leading_sentence_with_gpt(self):
|
||||
# Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
|
||||
@@ -91,25 +89,23 @@ class TestParseJson(unittest.TestCase):
|
||||
}
|
||||
}"""
|
||||
good_obj = {
|
||||
"command": {
|
||||
"name": "browse_website",
|
||||
"args":{
|
||||
"url": "https://github.com/Torantulino/Auto-GPT"
|
||||
"command": {
|
||||
"name": "browse_website",
|
||||
"args": {"url": "https://github.com/Torantulino/Auto-GPT"},
|
||||
},
|
||||
"thoughts": {
|
||||
"text": "Browsing the repository to identify potential bugs",
|
||||
"reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.",
|
||||
"plan": "- Analyze the repository for potential bugs and areas of improvement",
|
||||
"criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.",
|
||||
"speak": "I am browsing the repository to identify potential bugs.",
|
||||
},
|
||||
}
|
||||
},
|
||||
"thoughts":
|
||||
{
|
||||
"text": "Browsing the repository to identify potential bugs",
|
||||
"reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.",
|
||||
"plan": "- Analyze the repository for potential bugs and areas of improvement",
|
||||
"criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.",
|
||||
"speak": "I am browsing the repository to identify potential bugs."
|
||||
}
|
||||
}
|
||||
# Assert that this raises an exception:
|
||||
self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj)
|
||||
self.assertEqual(
|
||||
fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj
|
||||
)
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
114
tests/test_prompt_generator.py
Normal file
114
tests/test_prompt_generator.py
Normal file
@@ -0,0 +1,114 @@
|
||||
from unittest import TestCase
|
||||
|
||||
from autogpt.promptgenerator import PromptGenerator
|
||||
|
||||
|
||||
class TestPromptGenerator(TestCase):
|
||||
"""
|
||||
Test cases for the PromptGenerator class, which is responsible for generating
|
||||
prompts for the AI with constraints, commands, resources, and performance evaluations.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
"""
|
||||
Set up the initial state for each test method by creating an instance of PromptGenerator.
|
||||
"""
|
||||
cls.generator = PromptGenerator()
|
||||
|
||||
# Test whether the add_constraint() method adds a constraint to the generator's constraints list
|
||||
def test_add_constraint(self):
|
||||
"""
|
||||
Test if the add_constraint() method adds a constraint to the generator's constraints list.
|
||||
"""
|
||||
constraint = "Constraint1"
|
||||
self.generator.add_constraint(constraint)
|
||||
self.assertIn(constraint, self.generator.constraints)
|
||||
|
||||
# Test whether the add_command() method adds a command to the generator's commands list
|
||||
def test_add_command(self):
|
||||
"""
|
||||
Test if the add_command() method adds a command to the generator's commands list.
|
||||
"""
|
||||
command_label = "Command Label"
|
||||
command_name = "command_name"
|
||||
args = {"arg1": "value1", "arg2": "value2"}
|
||||
self.generator.add_command(command_label, command_name, args)
|
||||
command = {
|
||||
"label": command_label,
|
||||
"name": command_name,
|
||||
"args": args,
|
||||
}
|
||||
self.assertIn(command, self.generator.commands)
|
||||
|
||||
def test_add_resource(self):
|
||||
"""
|
||||
Test if the add_resource() method adds a resource to the generator's resources list.
|
||||
"""
|
||||
resource = "Resource1"
|
||||
self.generator.add_resource(resource)
|
||||
self.assertIn(resource, self.generator.resources)
|
||||
|
||||
def test_add_performance_evaluation(self):
|
||||
"""
|
||||
Test if the add_performance_evaluation() method adds an evaluation to the generator's
|
||||
performance_evaluation list.
|
||||
"""
|
||||
evaluation = "Evaluation1"
|
||||
self.generator.add_performance_evaluation(evaluation)
|
||||
self.assertIn(evaluation, self.generator.performance_evaluation)
|
||||
|
||||
def test_generate_prompt_string(self):
|
||||
"""
|
||||
Test if the generate_prompt_string() method generates a prompt string with all the added
|
||||
constraints, commands, resources, and evaluations.
|
||||
"""
|
||||
# Define the test data
|
||||
constraints = ["Constraint1", "Constraint2"]
|
||||
commands = [
|
||||
{
|
||||
"label": "Command1",
|
||||
"name": "command_name1",
|
||||
"args": {"arg1": "value1"},
|
||||
},
|
||||
{
|
||||
"label": "Command2",
|
||||
"name": "command_name2",
|
||||
"args": {},
|
||||
},
|
||||
]
|
||||
resources = ["Resource1", "Resource2"]
|
||||
evaluations = ["Evaluation1", "Evaluation2"]
|
||||
|
||||
# Add test data to the generator
|
||||
for constraint in constraints:
|
||||
self.generator.add_constraint(constraint)
|
||||
for command in commands:
|
||||
self.generator.add_command(
|
||||
command["label"], command["name"], command["args"]
|
||||
)
|
||||
for resource in resources:
|
||||
self.generator.add_resource(resource)
|
||||
for evaluation in evaluations:
|
||||
self.generator.add_performance_evaluation(evaluation)
|
||||
|
||||
# Generate the prompt string and verify its correctness
|
||||
prompt_string = self.generator.generate_prompt_string()
|
||||
self.assertIsNotNone(prompt_string)
|
||||
|
||||
# Check if all constraints, commands, resources, and evaluations are present in the prompt string
|
||||
for constraint in constraints:
|
||||
self.assertIn(constraint, prompt_string)
|
||||
for command in commands:
|
||||
self.assertIn(command["name"], prompt_string)
|
||||
for key, value in command["args"].items():
|
||||
self.assertIn(f'"{key}": "{value}"', prompt_string)
|
||||
for resource in resources:
|
||||
self.assertIn(resource, prompt_string)
|
||||
for evaluation in evaluations:
|
||||
self.assertIn(evaluation, prompt_string)
|
||||
|
||||
self.assertIn("constraints", prompt_string.lower())
|
||||
self.assertIn("commands", prompt_string.lower())
|
||||
self.assertIn("resources", prompt_string.lower())
|
||||
self.assertIn("performance evaluation", prompt_string.lower())
|
||||
62
tests/test_token_counter.py
Normal file
62
tests/test_token_counter.py
Normal file
@@ -0,0 +1,62 @@
|
||||
import unittest
|
||||
import tests.context
|
||||
from autogpt.token_counter import count_message_tokens, count_string_tokens
|
||||
|
||||
|
||||
class TestTokenCounter(unittest.TestCase):
|
||||
def test_count_message_tokens(self):
|
||||
messages = [
|
||||
{"role": "user", "content": "Hello"},
|
||||
{"role": "assistant", "content": "Hi there!"},
|
||||
]
|
||||
self.assertEqual(count_message_tokens(messages), 17)
|
||||
|
||||
def test_count_message_tokens_with_name(self):
|
||||
messages = [
|
||||
{"role": "user", "content": "Hello", "name": "John"},
|
||||
{"role": "assistant", "content": "Hi there!"},
|
||||
]
|
||||
self.assertEqual(count_message_tokens(messages), 17)
|
||||
|
||||
def test_count_message_tokens_empty_input(self):
|
||||
self.assertEqual(count_message_tokens([]), 3)
|
||||
|
||||
def test_count_message_tokens_invalid_model(self):
|
||||
messages = [
|
||||
{"role": "user", "content": "Hello"},
|
||||
{"role": "assistant", "content": "Hi there!"},
|
||||
]
|
||||
with self.assertRaises(KeyError):
|
||||
count_message_tokens(messages, model="invalid_model")
|
||||
|
||||
def test_count_message_tokens_gpt_4(self):
|
||||
messages = [
|
||||
{"role": "user", "content": "Hello"},
|
||||
{"role": "assistant", "content": "Hi there!"},
|
||||
]
|
||||
self.assertEqual(count_message_tokens(messages, model="gpt-4-0314"), 15)
|
||||
|
||||
def test_count_string_tokens(self):
|
||||
string = "Hello, world!"
|
||||
self.assertEqual(
|
||||
count_string_tokens(string, model_name="gpt-3.5-turbo-0301"), 4
|
||||
)
|
||||
|
||||
def test_count_string_tokens_empty_input(self):
|
||||
self.assertEqual(count_string_tokens("", model_name="gpt-3.5-turbo-0301"), 0)
|
||||
|
||||
def test_count_message_tokens_invalid_model(self):
|
||||
messages = [
|
||||
{"role": "user", "content": "Hello"},
|
||||
{"role": "assistant", "content": "Hi there!"},
|
||||
]
|
||||
with self.assertRaises(NotImplementedError):
|
||||
count_message_tokens(messages, model="invalid_model")
|
||||
|
||||
def test_count_string_tokens_gpt_4(self):
|
||||
string = "Hello, world!"
|
||||
self.assertEqual(count_string_tokens(string, model_name="gpt-4-0314"), 4)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
114
tests/unit/json_tests.py
Normal file
114
tests/unit/json_tests.py
Normal file
@@ -0,0 +1,114 @@
|
||||
import unittest
|
||||
|
||||
from autogpt.json_parser import fix_and_parse_json
|
||||
|
||||
|
||||
class TestParseJson(unittest.TestCase):
|
||||
def test_valid_json(self):
|
||||
# Test that a valid JSON string is parsed correctly
|
||||
json_str = '{"name": "John", "age": 30, "city": "New York"}'
|
||||
obj = fix_and_parse_json(json_str)
|
||||
self.assertEqual(obj, {"name": "John", "age": 30, "city": "New York"})
|
||||
|
||||
def test_invalid_json_minor(self):
|
||||
# Test that an invalid JSON string can be fixed with gpt
|
||||
json_str = '{"name": "John", "age": 30, "city": "New York",}'
|
||||
self.assertEqual(
|
||||
fix_and_parse_json(json_str, try_to_fix_with_gpt=False),
|
||||
{"name": "John", "age": 30, "city": "New York"},
|
||||
)
|
||||
|
||||
def test_invalid_json_major_with_gpt(self):
|
||||
# Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False
|
||||
json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END'
|
||||
self.assertEqual(
|
||||
fix_and_parse_json(json_str, try_to_fix_with_gpt=True),
|
||||
{"name": "John", "age": 30, "city": "New York"},
|
||||
)
|
||||
|
||||
def test_invalid_json_major_without_gpt(self):
|
||||
# Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
|
||||
json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END'
|
||||
# Assert that this raises an exception:
|
||||
with self.assertRaises(Exception):
|
||||
fix_and_parse_json(json_str, try_to_fix_with_gpt=False)
|
||||
|
||||
def test_invalid_json_leading_sentence_with_gpt(self):
|
||||
# Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
|
||||
json_str = """I suggest we start by browsing the repository to find any issues that we can fix.
|
||||
|
||||
{
|
||||
"command": {
|
||||
"name": "browse_website",
|
||||
"args":{
|
||||
"url": "https://github.com/Torantulino/Auto-GPT"
|
||||
}
|
||||
},
|
||||
"thoughts":
|
||||
{
|
||||
"text": "I suggest we start browsing the repository to find any issues that we can fix.",
|
||||
"reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.",
|
||||
"plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes",
|
||||
"criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.",
|
||||
"speak": "I will start browsing the repository to find any issues we can fix."
|
||||
}
|
||||
}"""
|
||||
good_obj = {
|
||||
"command": {
|
||||
"name": "browse_website",
|
||||
"args": {"url": "https://github.com/Torantulino/Auto-GPT"},
|
||||
},
|
||||
"thoughts": {
|
||||
"text": "I suggest we start browsing the repository to find any issues that we can fix.",
|
||||
"reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.",
|
||||
"plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes",
|
||||
"criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.",
|
||||
"speak": "I will start browsing the repository to find any issues we can fix.",
|
||||
},
|
||||
}
|
||||
# Assert that this raises an exception:
|
||||
self.assertEqual(
|
||||
fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj
|
||||
)
|
||||
|
||||
def test_invalid_json_leading_sentence_with_gpt(self):
|
||||
# Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
|
||||
json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this.
|
||||
|
||||
{
|
||||
"command": {
|
||||
"name": "browse_website",
|
||||
"args":{
|
||||
"url": "https://github.com/Torantulino/Auto-GPT"
|
||||
}
|
||||
},
|
||||
"thoughts":
|
||||
{
|
||||
"text": "Browsing the repository to identify potential bugs",
|
||||
"reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.",
|
||||
"plan": "- Analyze the repository for potential bugs and areas of improvement",
|
||||
"criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.",
|
||||
"speak": "I am browsing the repository to identify potential bugs."
|
||||
}
|
||||
}"""
|
||||
good_obj = {
|
||||
"command": {
|
||||
"name": "browse_website",
|
||||
"args": {"url": "https://github.com/Torantulino/Auto-GPT"},
|
||||
},
|
||||
"thoughts": {
|
||||
"text": "Browsing the repository to identify potential bugs",
|
||||
"reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.",
|
||||
"plan": "- Analyze the repository for potential bugs and areas of improvement",
|
||||
"criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.",
|
||||
"speak": "I am browsing the repository to identify potential bugs.",
|
||||
},
|
||||
}
|
||||
# Assert that this raises an exception:
|
||||
self.assertEqual(
|
||||
fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
118
tests/unit/test_browse_scrape_links.py
Normal file
118
tests/unit/test_browse_scrape_links.py
Normal file
@@ -0,0 +1,118 @@
|
||||
# Generated by CodiumAI
|
||||
|
||||
# Dependencies:
|
||||
# pip install pytest-mock
|
||||
import pytest
|
||||
|
||||
from scripts.browse import scrape_links
|
||||
|
||||
"""
|
||||
Code Analysis
|
||||
|
||||
Objective:
|
||||
The objective of the 'scrape_links' function is to scrape hyperlinks from a
|
||||
given URL and return them in a formatted way.
|
||||
|
||||
Inputs:
|
||||
- url: a string representing the URL to be scraped.
|
||||
|
||||
Flow:
|
||||
1. Send a GET request to the given URL using the requests library and the user agent header from the config file.
|
||||
2. Check if the response contains an HTTP error. If it does, return "error".
|
||||
3. Parse the HTML content of the response using the BeautifulSoup library.
|
||||
4. Remove any script and style tags from the parsed HTML.
|
||||
5. Extract all hyperlinks from the parsed HTML using the 'extract_hyperlinks' function.
|
||||
6. Format the extracted hyperlinks using the 'format_hyperlinks' function.
|
||||
7. Return the formatted hyperlinks.
|
||||
|
||||
Outputs:
|
||||
- A list of formatted hyperlinks.
|
||||
|
||||
Additional aspects:
|
||||
- The function uses the 'requests' and 'BeautifulSoup' libraries to send HTTP
|
||||
requests and parse HTML content, respectively.
|
||||
- The 'extract_hyperlinks' function is called to extract hyperlinks from the parsed HTML.
|
||||
- The 'format_hyperlinks' function is called to format the extracted hyperlinks.
|
||||
- The function checks for HTTP errors and returns "error" if any are found.
|
||||
"""
|
||||
|
||||
|
||||
class TestScrapeLinks:
|
||||
# Tests that the function returns a list of formatted hyperlinks when
|
||||
# provided with a valid url that returns a webpage with hyperlinks.
|
||||
def test_valid_url_with_hyperlinks(self):
|
||||
url = "https://www.google.com"
|
||||
result = scrape_links(url)
|
||||
assert len(result) > 0
|
||||
assert isinstance(result, list)
|
||||
assert isinstance(result[0], str)
|
||||
|
||||
# Tests that the function returns correctly formatted hyperlinks when given a valid url.
|
||||
def test_valid_url(self, mocker):
|
||||
# Mock the requests.get() function to return a response with sample HTML containing hyperlinks
|
||||
mock_response = mocker.Mock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.text = (
|
||||
"<html><body><a href='https://www.google.com'>Google</a></body></html>"
|
||||
)
|
||||
mocker.patch("requests.get", return_value=mock_response)
|
||||
|
||||
# Call the function with a valid URL
|
||||
result = scrape_links("https://www.example.com")
|
||||
|
||||
# Assert that the function returns correctly formatted hyperlinks
|
||||
assert result == ["Google (https://www.google.com)"]
|
||||
|
||||
# Tests that the function returns "error" when given an invalid url.
|
||||
def test_invalid_url(self, mocker):
|
||||
# Mock the requests.get() function to return an HTTP error response
|
||||
mock_response = mocker.Mock()
|
||||
mock_response.status_code = 404
|
||||
mocker.patch("requests.get", return_value=mock_response)
|
||||
|
||||
# Call the function with an invalid URL
|
||||
result = scrape_links("https://www.invalidurl.com")
|
||||
|
||||
# Assert that the function returns "error"
|
||||
assert "Error:" in result
|
||||
|
||||
# Tests that the function returns an empty list when the html contains no hyperlinks.
|
||||
def test_no_hyperlinks(self, mocker):
|
||||
# Mock the requests.get() function to return a response with sample HTML containing no hyperlinks
|
||||
mock_response = mocker.Mock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.text = "<html><body><p>No hyperlinks here</p></body></html>"
|
||||
mocker.patch("requests.get", return_value=mock_response)
|
||||
|
||||
# Call the function with a URL containing no hyperlinks
|
||||
result = scrape_links("https://www.example.com")
|
||||
|
||||
# Assert that the function returns an empty list
|
||||
assert result == []
|
||||
|
||||
# Tests that scrape_links() correctly extracts and formats hyperlinks from
|
||||
# a sample HTML containing a few hyperlinks.
|
||||
def test_scrape_links_with_few_hyperlinks(self, mocker):
|
||||
# Mock the requests.get() function to return a response with a sample HTML containing hyperlinks
|
||||
mock_response = mocker.Mock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.text = """
|
||||
<html>
|
||||
<body>
|
||||
<div id="google-link"><a href="https://www.google.com">Google</a></div>
|
||||
<div id="github"><a href="https://github.com">GitHub</a></div>
|
||||
<div id="CodiumAI"><a href="https://www.codium.ai">CodiumAI</a></div>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
mocker.patch("requests.get", return_value=mock_response)
|
||||
|
||||
# Call the function being tested
|
||||
result = scrape_links("https://www.example.com")
|
||||
|
||||
# Assert that the function returns a list of formatted hyperlinks
|
||||
assert isinstance(result, list)
|
||||
assert len(result) == 3
|
||||
assert result[0] == "Google (https://www.google.com)"
|
||||
assert result[1] == "GitHub (https://github.com)"
|
||||
assert result[2] == "CodiumAI (https://www.codium.ai)"
|
||||
96
tests/unit/test_browse_scrape_text.py
Normal file
96
tests/unit/test_browse_scrape_text.py
Normal file
@@ -0,0 +1,96 @@
|
||||
# Generated by CodiumAI
|
||||
|
||||
import requests
|
||||
|
||||
from autogpt.commands.web_requests import scrape_text
|
||||
|
||||
"""
|
||||
Code Analysis
|
||||
|
||||
Objective:
|
||||
The objective of the "scrape_text" function is to scrape the text content from
|
||||
a given URL and return it as a string, after removing any unwanted HTML tags and scripts.
|
||||
|
||||
Inputs:
|
||||
- url: a string representing the URL of the webpage to be scraped.
|
||||
|
||||
Flow:
|
||||
1. Send a GET request to the given URL using the requests library and the user agent header from the config file.
|
||||
2. Check if the response contains an HTTP error. If it does, return an error message.
|
||||
3. Use BeautifulSoup to parse the HTML content of the response and extract all script and style tags.
|
||||
4. Get the text content of the remaining HTML using the get_text() method of BeautifulSoup.
|
||||
5. Split the text into lines and then into chunks, removing any extra whitespace.
|
||||
6. Join the chunks into a single string with newline characters between them.
|
||||
7. Return the cleaned text.
|
||||
|
||||
Outputs:
|
||||
- A string representing the cleaned text content of the webpage.
|
||||
|
||||
Additional aspects:
|
||||
- The function uses the requests library and BeautifulSoup to handle the HTTP request and HTML parsing, respectively.
|
||||
- The function removes script and style tags from the HTML to avoid including unwanted content in the text output.
|
||||
- The function uses a generator expression to split the text into lines and chunks, which can improve performance for large amounts of text.
|
||||
"""
|
||||
|
||||
|
||||
class TestScrapeText:
|
||||
# Tests that scrape_text() returns the expected text when given a valid URL.
|
||||
def test_scrape_text_with_valid_url(self, mocker):
|
||||
# Mock the requests.get() method to return a response with expected text
|
||||
expected_text = "This is some sample text"
|
||||
mock_response = mocker.Mock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.text = f"<html><body><div><p style='color: blue;'>{expected_text}</p></div></body></html>"
|
||||
mocker.patch("requests.get", return_value=mock_response)
|
||||
|
||||
# Call the function with a valid URL and assert that it returns the expected text
|
||||
url = "http://www.example.com"
|
||||
assert scrape_text(url) == expected_text
|
||||
|
||||
# Tests that the function returns an error message when an invalid or unreachable url is provided.
|
||||
def test_invalid_url(self, mocker):
|
||||
# Mock the requests.get() method to raise an exception
|
||||
mocker.patch("requests.get", side_effect=requests.exceptions.RequestException)
|
||||
|
||||
# Call the function with an invalid URL and assert that it returns an error message
|
||||
url = "http://www.invalidurl.com"
|
||||
error_message = scrape_text(url)
|
||||
assert "Error:" in error_message
|
||||
|
||||
# Tests that the function returns an empty string when the html page contains no text to be scraped.
|
||||
def test_no_text(self, mocker):
|
||||
# Mock the requests.get() method to return a response with no text
|
||||
mock_response = mocker.Mock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.text = "<html><body></body></html>"
|
||||
mocker.patch("requests.get", return_value=mock_response)
|
||||
|
||||
# Call the function with a valid URL and assert that it returns an empty string
|
||||
url = "http://www.example.com"
|
||||
assert scrape_text(url) == ""
|
||||
|
||||
# Tests that the function returns an error message when the response status code is an http error (>=400).
|
||||
def test_http_error(self, mocker):
|
||||
# Mock the requests.get() method to return a response with a 404 status code
|
||||
mocker.patch("requests.get", return_value=mocker.Mock(status_code=404))
|
||||
|
||||
# Call the function with a URL
|
||||
result = scrape_text("https://www.example.com")
|
||||
|
||||
# Check that the function returns an error message
|
||||
assert result == "Error: HTTP 404 error"
|
||||
|
||||
# Tests that scrape_text() properly handles HTML tags.
|
||||
def test_scrape_text_with_html_tags(self, mocker):
|
||||
# Create a mock response object with HTML containing tags
|
||||
html = "<html><body><p>This is <b>bold</b> text.</p></body></html>"
|
||||
mock_response = mocker.Mock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.text = html
|
||||
mocker.patch("requests.get", return_value=mock_response)
|
||||
|
||||
# Call the function with a URL
|
||||
result = scrape_text("https://www.example.com")
|
||||
|
||||
# Check that the function properly handles HTML tags
|
||||
assert result == "This is bold text."
|
||||
18
tests/unit/test_commands.py
Normal file
18
tests/unit/test_commands.py
Normal file
@@ -0,0 +1,18 @@
|
||||
import autogpt.agent.agent_manager as agent_manager
|
||||
from autogpt.app import start_agent, list_agents
|
||||
import unittest
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
|
||||
class TestCommands(unittest.TestCase):
|
||||
def test_make_agent(self):
|
||||
with patch("openai.ChatCompletion.create") as mock:
|
||||
obj = MagicMock()
|
||||
obj.response.choices[0].messages[0].content = "Test message"
|
||||
mock.return_value = obj
|
||||
start_agent("Test Agent", "chat", "Hello, how are you?", "gpt2")
|
||||
agents = list_agents()
|
||||
self.assertEqual("List of agents:\n0: chat", agents)
|
||||
start_agent("Test Agent 2", "write", "Hello, how are you?", "gpt2")
|
||||
agents = list_agents()
|
||||
self.assertEqual("List of agents:\n0: chat\n1: write", agents)
|
||||
Reference in New Issue
Block a user