fixing path issues

This commit is contained in:
SwiftyOS
2024-09-20 14:04:24 +02:00
parent 815cf8b4ac
commit 5b08913ab9
17 changed files with 610 additions and 610 deletions

View File

@@ -155,7 +155,7 @@ jobs:
poetry run agbenchmark --mock
CHANGED=$(git diff --name-only | grep -E '(agclassic/benchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
if [ ! -z "$CHANGED" ]; then
echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
echo "$CHANGED"

2
.gitignore vendored
View File

@@ -157,7 +157,7 @@ openai/
CURRENT_BULLETIN.md
# AgBenchmark
agclassic/benchmark/reports/
agbenchmark/reports/
# Nodejs
package-lock.json

View File

@@ -97,7 +97,7 @@ repos:
alias: pyright-benchmark
entry: poetry -C classic/benchmark run pyright
args: [-p, benchmark, benchmark]
files: ^classic/benchmark/(agclassic/benchmark/|tests/|poetry\.lock$)
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
types: [file]
language: system
pass_filenames: false
@@ -122,6 +122,6 @@ repos:
- id: pytest-benchmark
name: Run tests - Benchmark
entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
files: ^classic/benchmark/(agclassic/benchmark/|tests/|poetry\.lock$)
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
language: system
pass_filenames: false

View File

@@ -71,7 +71,7 @@ This guide will walk you through the process of creating your own agent and usin
<!-- TODO: insert visual demonstrating the benchmark -->
📦 [`agbenchmark`](https://pypi.org/project/agclassic/benchmark/) on Pypi
📦 [`agbenchmark`](https://pypi.org/project/agbenchmark/) on Pypi
&ensp;|&ensp;
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/blob/master/benchmark) about the Benchmark

View File

@@ -9,4 +9,4 @@ exclude =
venv*/,
.venv/,
reports/,
agclassic/benchmark/reports/,
agbenchmark/reports/,

File diff suppressed because one or more lines are too long

View File

@@ -295,7 +295,7 @@ def benchmark_categories_list():
glob_path = os.path.join(
this_dir,
"./classic/benchmark/agclassic/benchmark/challenges/**/[!deprecated]*/data.json",
"./classic/benchmark/agbenchmark/challenges/**/[!deprecated]*/data.json",
)
# Use it as the base for the glob pattern, excluding 'deprecated' directory
for data_file in glob.glob(glob_path, recursive=True):
@@ -340,7 +340,7 @@ def benchmark_tests_list():
glob_path = os.path.join(
this_dir,
"./classic/benchmark/agclassic/benchmark/challenges/**/[!deprecated]*/data.json",
"./classic/benchmark/agbenchmark/challenges/**/[!deprecated]*/data.json",
)
# Use it as the base for the glob pattern, excluding 'deprecated' directory
for data_file in glob.glob(glob_path, recursive=True):
@@ -391,7 +391,7 @@ def benchmark_tests_details(test_name):
glob_path = os.path.join(
this_dir,
"./classic/benchmark/agclassic/benchmark/challenges/**/[!deprecated]*/data.json",
"./classic/benchmark/agbenchmark/challenges/**/[!deprecated]*/data.json",
)
# Use it as the base for the glob pattern, excluding 'deprecated' directory
for data_file in glob.glob(glob_path, recursive=True):

File diff suppressed because one or more lines are too long

View File

@@ -2,51 +2,51 @@
"edges": [
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]"
}
],
"nodes": [
@@ -86,7 +86,7 @@
"name": "TestReadFile",
"task": "Read the file called file_to_read.txt and write its content to a file called output.txt"
},
"id": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"label": "ReadFile",
"shape": "dot"
},
@@ -125,7 +125,7 @@
"name": "TestWriteFile",
"task": "Write the word 'Washington' to a .txt file"
},
"id": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"label": "WriteFile",
"shape": "dot"
},
@@ -162,7 +162,7 @@
"name": "TestAnswerQuestionCsv",
"task": "How much was spent on utilities in total ? Write the answer in an output.txt file."
},
"id": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
"label": "AnswerQuestionCsv",
"shape": "dot"
},
@@ -200,7 +200,7 @@
"name": "TestAnswerQuestionSmallCsv",
"task": "How much was spent on utilities in total ? Write the answer in an output.txt file."
},
"id": "agclassic/benchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
"label": "AnswerQuestionSmallCsv",
"shape": "dot"
},
@@ -239,7 +239,7 @@
"name": "TestAnswerQuestionCombineCsv",
"task": "How much was spent on utilities in total ? Write the answer in an output.txt file."
},
"id": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
"label": "AnswerQuestionCombineCsv",
"shape": "dot"
},
@@ -277,7 +277,7 @@
"name": "TestCombineCsv",
"task": "The csvs 'file1.csv' and 'file2.csv' both have a column 'ID'. Combine these 2 csvs using the 'ID' column. Sort the rows by ID in ascending order and the columns alphabetically. Write the output in output.csv"
},
"id": "agclassic/benchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
"label": "CombineCsv",
"shape": "dot"
},
@@ -314,7 +314,7 @@
"name": "TestLabelCsv",
"task": "The csv 'input.csv' has many items. create a 'Color' column for these items and classify them as either 'blue', 'green', or 'yellow' depending on what the most likely color is. Preserve the order of the rows. The color column should be the second column. Write the output in output.csv"
},
"id": "agclassic/benchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
"label": "LabelCsv",
"shape": "dot"
},
@@ -352,7 +352,7 @@
"name": "TestSortCsv",
"task": "Sort the input.csv by the 'timestamp' column and write the new csv in the output.csv file. The order of the columns should be preserved."
},
"id": "agclassic/benchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
"label": "SortCsv",
"shape": "dot"
}

File diff suppressed because one or more lines are too long

View File

@@ -2,45 +2,45 @@
"edges": [
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestSearch::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestSearch::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestSearch::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestSearch::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestSynthesizeInfo::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestSynthesizeInfo::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestSynthesizeInfo::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestSynthesizeInfo::test_method[challenge_data0]"
}
],
"nodes": [
@@ -80,7 +80,7 @@
"name": "TestReadFile",
"task": "Read the file called file_to_read.txt and write its content to a file called output.txt"
},
"id": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"label": "ReadFile",
"shape": "dot"
},
@@ -119,7 +119,7 @@
"name": "TestWriteFile",
"task": "Write the word 'Washington' to a .txt file"
},
"id": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"label": "WriteFile",
"shape": "dot"
},
@@ -156,7 +156,7 @@
"name": "TestBasicRetrieval",
"task": "Write the price of the book in this url 'http://books.toscrape.com/catalogue/meditations_33/index.html' to a .txt file."
},
"id": "agclassic/benchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
"label": "BasicRetrieval",
"shape": "dot"
},
@@ -199,7 +199,7 @@
"name": "TestSearch",
"task": "Open 'https://silennaihin.com/random/plain.html' and paste all of the text on the page in a .txt file"
},
"id": "agclassic/benchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
"label": "Search",
"shape": "dot"
},
@@ -239,7 +239,7 @@
"name": "TestTestGetInformation",
"task": "Write the twitter handle of the two hosts of Latent Space to a file called output.txt"
},
"id": "agclassic/benchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]",
"label": "TestGetInformation",
"shape": "dot"
},
@@ -291,7 +291,7 @@
"name": "TestRevenueRetrieval2",
"task": "Write tesla's revenue every year since its creation into a .txt file. Use the US notation, with a precision rounded to the nearest million dollars (for instance, $31,578 million)."
},
"id": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
"label": "RevenueRetrieval2",
"shape": "dot"
},
@@ -328,7 +328,7 @@
"name": "TestRevenueRetrieval",
"task": "Write tesla's exact revenue in 2022 into a .txt file. Use the US notation, with a precision rounded to the nearest million dollars (for instance, $31,578 million)."
},
"id": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
"label": "RevenueRetrieval",
"shape": "dot"
},
@@ -367,7 +367,7 @@
"name": "TestSynthesizeInfo",
"task": "Create a brief report or summary highlighting how one or more companies from companies.txt are addressing or capitalizing on challenges or trends from challenges.txt. Write a file called output.txt."
},
"id": "agclassic/benchmark/generate_test.py::TestSynthesizeInfo::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestSynthesizeInfo::test_method[challenge_data0]",
"label": "SynthesizeInfo",
"shape": "dot"
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -2,51 +2,51 @@
"edges": [
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]"
}
],
"nodes": [
@@ -86,7 +86,7 @@
"name": "TestReadFile",
"task": "Read the file called file_to_read.txt and write its content to a file called output.txt"
},
"id": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"label": "ReadFile",
"shape": "dot"
},
@@ -125,7 +125,7 @@
"name": "TestWriteFile",
"task": "Write the word 'Washington' to a .txt file"
},
"id": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"label": "WriteFile",
"shape": "dot"
},
@@ -162,7 +162,7 @@
"name": "TestAnswerQuestionCsv",
"task": "How much was spent on utilities in total ? Write the answer in an output.txt file."
},
"id": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
"label": "AnswerQuestionCsv",
"shape": "dot"
},
@@ -200,7 +200,7 @@
"name": "TestAnswerQuestionSmallCsv",
"task": "How much was spent on utilities in total ? Write the answer in an output.txt file."
},
"id": "agclassic/benchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
"label": "AnswerQuestionSmallCsv",
"shape": "dot"
},
@@ -239,7 +239,7 @@
"name": "TestAnswerQuestionCombineCsv",
"task": "How much was spent on utilities in total ? Write the answer in an output.txt file."
},
"id": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
"label": "AnswerQuestionCombineCsv",
"shape": "dot"
},
@@ -277,7 +277,7 @@
"name": "TestCombineCsv",
"task": "The csvs 'file1.csv' and 'file2.csv' both have a column 'ID'. Combine these 2 csvs using the 'ID' column. Sort the rows by ID in ascending order and the columns alphabetically. Write the output in output.csv"
},
"id": "agclassic/benchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
"label": "CombineCsv",
"shape": "dot"
},
@@ -314,7 +314,7 @@
"name": "TestLabelCsv",
"task": "The csv 'input.csv' has many items. create a 'Color' column for these items and classify them as either 'blue', 'green', or 'yellow' depending on what the most likely color is. Preserve the order of the rows. The color column should be the second column. Write the output in output.csv"
},
"id": "agclassic/benchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
"label": "LabelCsv",
"shape": "dot"
},
@@ -352,7 +352,7 @@
"name": "TestSortCsv",
"task": "Sort the input.csv by the 'timestamp' column and write the new csv in the output.csv file. The order of the columns should be preserved."
},
"id": "agclassic/benchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
"label": "SortCsv",
"shape": "dot"
}

File diff suppressed because one or more lines are too long

View File

@@ -2,45 +2,45 @@
"edges": [
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestSearch::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestSearch::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestSearch::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestSearch::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestSynthesizeInfo::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestSynthesizeInfo::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestSynthesizeInfo::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestSynthesizeInfo::test_method[challenge_data0]"
}
],
"nodes": [
@@ -80,7 +80,7 @@
"name": "TestReadFile",
"task": "Read the file called file_to_read.txt and write its content to a file called output.txt"
},
"id": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"label": "ReadFile",
"shape": "dot"
},
@@ -119,7 +119,7 @@
"name": "TestWriteFile",
"task": "Write the word 'Washington' to a .txt file"
},
"id": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"label": "WriteFile",
"shape": "dot"
},
@@ -156,7 +156,7 @@
"name": "TestBasicRetrieval",
"task": "Write the price of the book in this url 'http://books.toscrape.com/catalogue/meditations_33/index.html' to a .txt file."
},
"id": "agclassic/benchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
"label": "BasicRetrieval",
"shape": "dot"
},
@@ -199,7 +199,7 @@
"name": "TestSearch",
"task": "Open 'https://silennaihin.com/random/plain.html' and paste all of the text on the page in a .txt file"
},
"id": "agclassic/benchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
"label": "Search",
"shape": "dot"
},
@@ -239,7 +239,7 @@
"name": "TestTestGetInformation",
"task": "Write the twitter handle of the two hosts of Latent Space to a file called output.txt"
},
"id": "agclassic/benchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]",
"label": "TestGetInformation",
"shape": "dot"
},
@@ -291,7 +291,7 @@
"name": "TestRevenueRetrieval2",
"task": "Write tesla's revenue every year since its creation into a .txt file. Use the US notation, with a precision rounded to the nearest million dollars (for instance, $31,578 million)."
},
"id": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
"label": "RevenueRetrieval2",
"shape": "dot"
},
@@ -328,7 +328,7 @@
"name": "TestRevenueRetrieval",
"task": "Write tesla's exact revenue in 2022 into a .txt file. Use the US notation, with a precision rounded to the nearest million dollars (for instance, $31,578 million)."
},
"id": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
"label": "RevenueRetrieval",
"shape": "dot"
},
@@ -367,7 +367,7 @@
"name": "TestSynthesizeInfo",
"task": "Create a brief report or summary highlighting how one or more companies from companies.txt are addressing or capitalizing on challenges or trends from challenges.txt. Write a file called output.txt."
},
"id": "agclassic/benchmark/generate_test.py::TestSynthesizeInfo::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestSynthesizeInfo::test_method[challenge_data0]",
"label": "SynthesizeInfo",
"shape": "dot"
}

File diff suppressed because one or more lines are too long