Compare commits

...

6 Commits

Author SHA1 Message Date
Swifty
6301f8c34e Merge branch 'repo-restructure' into fix-path 2024-09-20 14:11:07 +02:00
Reinier van der Leer
f711c057da unbreak links in README 2024-09-20 14:09:01 +02:00
Reinier van der Leer
0c6b08d882 fix agbenchmark replace-all 2024-09-20 14:08:17 +02:00
SwiftyOS
5b08913ab9 fixing path issues 2024-09-20 14:04:24 +02:00
Reinier van der Leer
815cf8b4ac fix issue with webdriver-manager 2024-09-20 13:46:57 +02:00
SwiftyOS
3abe821533 Update docker file formatting 2024-09-20 13:43:51 +02:00
21 changed files with 627 additions and 626 deletions

View File

@@ -155,7 +155,7 @@ jobs:
poetry run agbenchmark --mock
CHANGED=$(git diff --name-only | grep -E '(agclassic/benchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
if [ ! -z "$CHANGED" ]; then
echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
echo "$CHANGED"

2
.gitignore vendored
View File

@@ -157,7 +157,7 @@ openai/
CURRENT_BULLETIN.md
# AgBenchmark
agclassic/benchmark/reports/
classic/benchmark/agbenchmark/reports/
# Nodejs
package-lock.json

View File

@@ -97,7 +97,7 @@ repos:
alias: pyright-benchmark
entry: poetry -C classic/benchmark run pyright
args: [-p, benchmark, benchmark]
files: ^classic/benchmark/(agclassic/benchmark/|tests/|poetry\.lock$)
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
types: [file]
language: system
pass_filenames: false
@@ -122,6 +122,6 @@ repos:
- id: pytest-benchmark
name: Run tests - Benchmark
entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
files: ^classic/benchmark/(agclassic/benchmark/|tests/|poetry\.lock$)
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
language: system
pass_filenames: false

View File

@@ -55,15 +55,16 @@ Be part of the revolution! **AutoGPT** is here to stay, at the forefront of AI i
## 🤖 AutoGPT Classic
> Below is information about the classic version of AutoGPT.
**🛠️ [Build your own Agent - Quickstart](FORGE-QUICKSTART.md)**
**🛠️ [Build your own Agent - Quickstart](classic/FORGE-QUICKSTART.md)**
### 🏗️ Forge
**Forge your own agent!** – Forge is a ready-to-go template for your agent application. All the boilerplate code is already handled, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from the [`forge.sdk`](/forge/sdk) can also be used individually to speed up development and reduce boilerplate in your agent project.
**Forge your own agent!** – Forge is a ready-to-go toolkit to build your own agent application. It handles most of the boilerplate code, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from [`forge`](/classic/forge/) can also be used individually to speed up development and reduce boilerplate in your agent project.
🚀 [**Getting Started with Forge**](https://github.com/Significant-Gravitas/AutoGPT/blob/master/classic/forge/tutorials/001_getting_started.md) –
This guide will walk you through the process of creating your own agent and using the benchmark and user interface.
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/forge) about Forge
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/forge) about Forge
### 🎯 Benchmark
@@ -71,7 +72,7 @@ This guide will walk you through the process of creating your own agent and usin
<!-- TODO: insert visual demonstrating the benchmark -->
📦 [`agbenchmark`](https://pypi.org/project/agclassic/benchmark/) on Pypi
📦 [`agbenchmark`](https://pypi.org/project/agbenchmark/) on Pypi
&ensp;|&ensp;
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/blob/master/benchmark) about the Benchmark
@@ -83,7 +84,7 @@ This guide will walk you through the process of creating your own agent and usin
The frontend works out-of-the-box with all agents in the repo. Just use the [CLI] to run your agent of choice!
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/frontend) about the Frontend
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/frontend) about the Frontend
### ⌨️ CLI

View File

@@ -42,13 +42,13 @@ ENTRYPOINT ["poetry", "run", "autogpt"]
CMD []
# dev build -> include everything
FROM autogpt-base as autogpt-dev
FROM autogpt-base AS autogpt-dev
RUN poetry install --no-cache --no-root \
&& rm -rf $(poetry env info --path)/src
ONBUILD COPY original_autogpt/ ./
# release build -> include bare minimum
FROM autogpt-base as autogpt-release
FROM autogpt-base AS autogpt-release
RUN poetry install --no-cache --no-root --without dev \
&& rm -rf $(poetry env info --path)/src
ONBUILD COPY original_autogpt/ ./autogpt

View File

@@ -9,4 +9,4 @@ exclude =
venv*/,
.venv/,
reports/,
agclassic/benchmark/reports/,
agbenchmark/reports/,

File diff suppressed because one or more lines are too long

View File

@@ -295,7 +295,7 @@ def benchmark_categories_list():
glob_path = os.path.join(
this_dir,
"./classic/benchmark/agclassic/benchmark/challenges/**/[!deprecated]*/data.json",
"./benchmark/agbenchmark/challenges/**/[!deprecated]*/data.json",
)
# Use it as the base for the glob pattern, excluding 'deprecated' directory
for data_file in glob.glob(glob_path, recursive=True):
@@ -340,7 +340,7 @@ def benchmark_tests_list():
glob_path = os.path.join(
this_dir,
"./classic/benchmark/agclassic/benchmark/challenges/**/[!deprecated]*/data.json",
"./benchmark/agbenchmark/challenges/**/[!deprecated]*/data.json",
)
# Use it as the base for the glob pattern, excluding 'deprecated' directory
for data_file in glob.glob(glob_path, recursive=True):
@@ -391,7 +391,7 @@ def benchmark_tests_details(test_name):
glob_path = os.path.join(
this_dir,
"./classic/benchmark/agclassic/benchmark/challenges/**/[!deprecated]*/data.json",
"./benchmark/agbenchmark/challenges/**/[!deprecated]*/data.json",
)
# Use it as the base for the glob pattern, excluding 'deprecated' directory
for data_file in glob.glob(glob_path, recursive=True):

View File

@@ -6756,13 +6756,13 @@ wasabi = ">=0.9.1,<1.2.0"
[[package]]
name = "webdriver-manager"
version = "4.0.1"
version = "4.0.2"
description = "Library provides the way to automatically manage drivers for different browsers"
optional = false
python-versions = ">=3.7"
files = [
{file = "webdriver_manager-4.0.1-py2.py3-none-any.whl", hash = "sha256:d7970052295bb9cda2c1a24cf0b872dd2c41ababcc78f7b6b8dc37a41e979a7e"},
{file = "webdriver_manager-4.0.1.tar.gz", hash = "sha256:25ec177c6a2ce9c02fb8046f1b2732701a9418d6a977967bb065d840a3175d87"},
{file = "webdriver_manager-4.0.2-py2.py3-none-any.whl", hash = "sha256:75908d92ecc45ff2b9953614459c633db8f9aa1ff30181cefe8696e312908129"},
{file = "webdriver_manager-4.0.2.tar.gz", hash = "sha256:efedf428f92fd6d5c924a0d054e6d1322dd77aab790e834ee767af392b35590f"},
]
[package.dependencies]
@@ -7084,4 +7084,4 @@ benchmark = ["agbenchmark"]
[metadata]
lock-version = "2.0"
python-versions = "^3.10"
content-hash = "acca6b5d67a64527f1d19f61e20a89eb228e066a80cd7701fd59cf19bb267eb8"
content-hash = "98943987dbafb450c36d64f462032014183db6b5047f44945bfb63557f64204c"

View File

@@ -54,7 +54,7 @@ tiktoken = ">=0.7.0,<1.0.0"
toml = "^0.10.2"
uvicorn = { extras = ["standard"], version = ">=0.23.2,<1" }
watchdog = "4.0.0"
webdriver-manager = "^4.0.1"
webdriver-manager = "^4.0.2"
[tool.poetry.extras]
benchmark = ["agbenchmark"]

File diff suppressed because one or more lines are too long

View File

@@ -2,51 +2,51 @@
"edges": [
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]"
}
],
"nodes": [
@@ -86,7 +86,7 @@
"name": "TestReadFile",
"task": "Read the file called file_to_read.txt and write its content to a file called output.txt"
},
"id": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"label": "ReadFile",
"shape": "dot"
},
@@ -125,7 +125,7 @@
"name": "TestWriteFile",
"task": "Write the word 'Washington' to a .txt file"
},
"id": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"label": "WriteFile",
"shape": "dot"
},
@@ -162,7 +162,7 @@
"name": "TestAnswerQuestionCsv",
"task": "How much was spent on utilities in total ? Write the answer in an output.txt file."
},
"id": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
"label": "AnswerQuestionCsv",
"shape": "dot"
},
@@ -200,7 +200,7 @@
"name": "TestAnswerQuestionSmallCsv",
"task": "How much was spent on utilities in total ? Write the answer in an output.txt file."
},
"id": "agclassic/benchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
"label": "AnswerQuestionSmallCsv",
"shape": "dot"
},
@@ -239,7 +239,7 @@
"name": "TestAnswerQuestionCombineCsv",
"task": "How much was spent on utilities in total ? Write the answer in an output.txt file."
},
"id": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
"label": "AnswerQuestionCombineCsv",
"shape": "dot"
},
@@ -277,7 +277,7 @@
"name": "TestCombineCsv",
"task": "The csvs 'file1.csv' and 'file2.csv' both have a column 'ID'. Combine these 2 csvs using the 'ID' column. Sort the rows by ID in ascending order and the columns alphabetically. Write the output in output.csv"
},
"id": "agclassic/benchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
"label": "CombineCsv",
"shape": "dot"
},
@@ -314,7 +314,7 @@
"name": "TestLabelCsv",
"task": "The csv 'input.csv' has many items. create a 'Color' column for these items and classify them as either 'blue', 'green', or 'yellow' depending on what the most likely color is. Preserve the order of the rows. The color column should be the second column. Write the output in output.csv"
},
"id": "agclassic/benchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
"label": "LabelCsv",
"shape": "dot"
},
@@ -352,7 +352,7 @@
"name": "TestSortCsv",
"task": "Sort the input.csv by the 'timestamp' column and write the new csv in the output.csv file. The order of the columns should be preserved."
},
"id": "agclassic/benchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
"label": "SortCsv",
"shape": "dot"
}

File diff suppressed because one or more lines are too long

View File

@@ -2,45 +2,45 @@
"edges": [
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestSearch::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestSearch::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestSearch::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestSearch::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestSynthesizeInfo::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestSynthesizeInfo::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestSynthesizeInfo::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestSynthesizeInfo::test_method[challenge_data0]"
}
],
"nodes": [
@@ -80,7 +80,7 @@
"name": "TestReadFile",
"task": "Read the file called file_to_read.txt and write its content to a file called output.txt"
},
"id": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"label": "ReadFile",
"shape": "dot"
},
@@ -119,7 +119,7 @@
"name": "TestWriteFile",
"task": "Write the word 'Washington' to a .txt file"
},
"id": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"label": "WriteFile",
"shape": "dot"
},
@@ -156,7 +156,7 @@
"name": "TestBasicRetrieval",
"task": "Write the price of the book in this url 'http://books.toscrape.com/catalogue/meditations_33/index.html' to a .txt file."
},
"id": "agclassic/benchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
"label": "BasicRetrieval",
"shape": "dot"
},
@@ -199,7 +199,7 @@
"name": "TestSearch",
"task": "Open 'https://silennaihin.com/random/plain.html' and paste all of the text on the page in a .txt file"
},
"id": "agclassic/benchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
"label": "Search",
"shape": "dot"
},
@@ -239,7 +239,7 @@
"name": "TestTestGetInformation",
"task": "Write the twitter handle of the two hosts of Latent Space to a file called output.txt"
},
"id": "agclassic/benchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]",
"label": "TestGetInformation",
"shape": "dot"
},
@@ -291,7 +291,7 @@
"name": "TestRevenueRetrieval2",
"task": "Write tesla's revenue every year since its creation into a .txt file. Use the US notation, with a precision rounded to the nearest million dollars (for instance, $31,578 million)."
},
"id": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
"label": "RevenueRetrieval2",
"shape": "dot"
},
@@ -328,7 +328,7 @@
"name": "TestRevenueRetrieval",
"task": "Write tesla's exact revenue in 2022 into a .txt file. Use the US notation, with a precision rounded to the nearest million dollars (for instance, $31,578 million)."
},
"id": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
"label": "RevenueRetrieval",
"shape": "dot"
},
@@ -367,7 +367,7 @@
"name": "TestSynthesizeInfo",
"task": "Create a brief report or summary highlighting how one or more companies from companies.txt are addressing or capitalizing on challenges or trends from challenges.txt. Write a file called output.txt."
},
"id": "agclassic/benchmark/generate_test.py::TestSynthesizeInfo::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestSynthesizeInfo::test_method[challenge_data0]",
"label": "SynthesizeInfo",
"shape": "dot"
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -2,51 +2,51 @@
"edges": [
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]"
}
],
"nodes": [
@@ -86,7 +86,7 @@
"name": "TestReadFile",
"task": "Read the file called file_to_read.txt and write its content to a file called output.txt"
},
"id": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"label": "ReadFile",
"shape": "dot"
},
@@ -125,7 +125,7 @@
"name": "TestWriteFile",
"task": "Write the word 'Washington' to a .txt file"
},
"id": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"label": "WriteFile",
"shape": "dot"
},
@@ -162,7 +162,7 @@
"name": "TestAnswerQuestionCsv",
"task": "How much was spent on utilities in total ? Write the answer in an output.txt file."
},
"id": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
"label": "AnswerQuestionCsv",
"shape": "dot"
},
@@ -200,7 +200,7 @@
"name": "TestAnswerQuestionSmallCsv",
"task": "How much was spent on utilities in total ? Write the answer in an output.txt file."
},
"id": "agclassic/benchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
"label": "AnswerQuestionSmallCsv",
"shape": "dot"
},
@@ -239,7 +239,7 @@
"name": "TestAnswerQuestionCombineCsv",
"task": "How much was spent on utilities in total ? Write the answer in an output.txt file."
},
"id": "agclassic/benchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
"label": "AnswerQuestionCombineCsv",
"shape": "dot"
},
@@ -277,7 +277,7 @@
"name": "TestCombineCsv",
"task": "The csvs 'file1.csv' and 'file2.csv' both have a column 'ID'. Combine these 2 csvs using the 'ID' column. Sort the rows by ID in ascending order and the columns alphabetically. Write the output in output.csv"
},
"id": "agclassic/benchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
"label": "CombineCsv",
"shape": "dot"
},
@@ -314,7 +314,7 @@
"name": "TestLabelCsv",
"task": "The csv 'input.csv' has many items. create a 'Color' column for these items and classify them as either 'blue', 'green', or 'yellow' depending on what the most likely color is. Preserve the order of the rows. The color column should be the second column. Write the output in output.csv"
},
"id": "agclassic/benchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
"label": "LabelCsv",
"shape": "dot"
},
@@ -352,7 +352,7 @@
"name": "TestSortCsv",
"task": "Sort the input.csv by the 'timestamp' column and write the new csv in the output.csv file. The order of the columns should be preserved."
},
"id": "agclassic/benchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
"label": "SortCsv",
"shape": "dot"
}

File diff suppressed because one or more lines are too long

View File

@@ -2,45 +2,45 @@
"edges": [
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestSearch::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestSearch::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestSearch::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestSearch::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]"
},
{
"arrows": "to",
"from": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agclassic/benchmark/generate_test.py::TestSynthesizeInfo::test_method[challenge_data0]",
"to": "agclassic/benchmark/generate_test.py::TestSynthesizeInfo::test_method[challenge_data0]"
"from": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestSynthesizeInfo::test_method[challenge_data0]",
"to": "agbenchmark/generate_test.py::TestSynthesizeInfo::test_method[challenge_data0]"
}
],
"nodes": [
@@ -80,7 +80,7 @@
"name": "TestReadFile",
"task": "Read the file called file_to_read.txt and write its content to a file called output.txt"
},
"id": "agclassic/benchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
"label": "ReadFile",
"shape": "dot"
},
@@ -119,7 +119,7 @@
"name": "TestWriteFile",
"task": "Write the word 'Washington' to a .txt file"
},
"id": "agclassic/benchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]",
"label": "WriteFile",
"shape": "dot"
},
@@ -156,7 +156,7 @@
"name": "TestBasicRetrieval",
"task": "Write the price of the book in this url 'http://books.toscrape.com/catalogue/meditations_33/index.html' to a .txt file."
},
"id": "agclassic/benchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
"label": "BasicRetrieval",
"shape": "dot"
},
@@ -199,7 +199,7 @@
"name": "TestSearch",
"task": "Open 'https://silennaihin.com/random/plain.html' and paste all of the text on the page in a .txt file"
},
"id": "agclassic/benchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
"label": "Search",
"shape": "dot"
},
@@ -239,7 +239,7 @@
"name": "TestTestGetInformation",
"task": "Write the twitter handle of the two hosts of Latent Space to a file called output.txt"
},
"id": "agclassic/benchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]",
"label": "TestGetInformation",
"shape": "dot"
},
@@ -291,7 +291,7 @@
"name": "TestRevenueRetrieval2",
"task": "Write tesla's revenue every year since its creation into a .txt file. Use the US notation, with a precision rounded to the nearest million dollars (for instance, $31,578 million)."
},
"id": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
"label": "RevenueRetrieval2",
"shape": "dot"
},
@@ -328,7 +328,7 @@
"name": "TestRevenueRetrieval",
"task": "Write tesla's exact revenue in 2022 into a .txt file. Use the US notation, with a precision rounded to the nearest million dollars (for instance, $31,578 million)."
},
"id": "agclassic/benchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
"label": "RevenueRetrieval",
"shape": "dot"
},
@@ -367,7 +367,7 @@
"name": "TestSynthesizeInfo",
"task": "Create a brief report or summary highlighting how one or more companies from companies.txt are addressing or capitalizing on challenges or trends from challenges.txt. Write a file called output.txt."
},
"id": "agclassic/benchmark/generate_test.py::TestSynthesizeInfo::test_method[challenge_data0]",
"id": "agbenchmark/generate_test.py::TestSynthesizeInfo::test_method[challenge_data0]",
"label": "SynthesizeInfo",
"shape": "dot"
}

File diff suppressed because one or more lines are too long

View File

@@ -348,10 +348,10 @@ tiktoken = ">=0.7.0,<1.0.0"
toml = "^0.10.2"
uvicorn = {version = ">=0.23.2,<1", extras = ["standard"]}
watchdog = "4.0.0"
webdriver-manager = "^4.0.1"
webdriver-manager = "^4.0.2"
[package.extras]
benchmark = ["agbenchmark @ file:///Users/czerwinski/Projects/AutoGPT/benchmark"]
benchmark = ["agbenchmark @ file:///home/reinier/code/agpt/AutoGPT/classic/benchmark"]
[package.source]
type = "directory"
@@ -6416,13 +6416,13 @@ wasabi = ">=0.9.1,<1.2.0"
[[package]]
name = "webdriver-manager"
version = "4.0.1"
version = "4.0.2"
description = "Library provides the way to automatically manage drivers for different browsers"
optional = false
python-versions = ">=3.7"
files = [
{file = "webdriver_manager-4.0.1-py2.py3-none-any.whl", hash = "sha256:d7970052295bb9cda2c1a24cf0b872dd2c41ababcc78f7b6b8dc37a41e979a7e"},
{file = "webdriver_manager-4.0.1.tar.gz", hash = "sha256:25ec177c6a2ce9c02fb8046f1b2732701a9418d6a977967bb065d840a3175d87"},
{file = "webdriver_manager-4.0.2-py2.py3-none-any.whl", hash = "sha256:75908d92ecc45ff2b9953614459c633db8f9aa1ff30181cefe8696e312908129"},
{file = "webdriver_manager-4.0.2.tar.gz", hash = "sha256:efedf428f92fd6d5c924a0d054e6d1322dd77aab790e834ee767af392b35590f"},
]
[package.dependencies]