Merge branch 'master' into zamilmajdy/code-validation

This commit is contained in:
Nicholas Tindle
2024-06-13 17:33:44 -05:00
committed by GitHub
31 changed files with 1905 additions and 1783 deletions

268
.github/workflows/autogpt-server-ci.yml vendored Normal file
View File

@@ -0,0 +1,268 @@
name: AutoGPT Server CI
on:
push:
branches: [master, development, ci-test*]
paths:
- ".github/workflows/autogpt-server-ci.yml"
- "rnd/autogpt_server/**"
- "!autogpt/tests/vcr_cassettes"
pull_request:
branches: [master, development, release-*]
paths:
- ".github/workflows/autogpt-server-ci.yml"
- "rnd/autogpt_server/**"
- "!autogpt/tests/vcr_cassettes"
concurrency:
group: ${{ format('autogpt-server-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
defaults:
run:
shell: bash
working-directory: rnd/autogpt_server
jobs:
test:
permissions:
contents: read
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
python-version: ["3.10"]
platform-os: [ubuntu, macos, macos-arm64, windows]
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
steps:
# Quite slow on macOS (2~4 minutes to set up Docker)
# - name: Set up Docker (macOS)
# if: runner.os == 'macOS'
# uses: crazy-max/ghaction-setup-docker@v3
- name: Start MinIO service (Linux)
if: runner.os == 'Linux'
working-directory: "."
run: |
docker pull minio/minio:edge-cicd
docker run -d -p 9000:9000 minio/minio:edge-cicd
- name: Start MinIO service (macOS)
if: runner.os == 'macOS'
working-directory: ${{ runner.temp }}
run: |
brew install minio/stable/minio
mkdir data
minio server ./data &
# No MinIO on Windows:
# - Windows doesn't support running Linux Docker containers
# - It doesn't seem possible to start background processes on Windows. They are
# killed after the step returns.
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- id: get_date
name: Get date
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Set up Python dependency cache
# On Windows, unpacking cached dependencies takes longer than just installing them
if: runner.os != 'Windows'
uses: actions/cache@v4
with:
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('rnd/autogpt_server/poetry.lock') }}
- name: Install Poetry (Unix)
if: runner.os != 'Windows'
run: |
curl -sSL https://install.python-poetry.org | python3 -
if [ "${{ runner.os }}" = "macOS" ]; then
PATH="$HOME/.local/bin:$PATH"
echo "$HOME/.local/bin" >> $GITHUB_PATH
fi
- name: Install Poetry (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
$env:PATH += ";$env:APPDATA\Python\Scripts"
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
- name: Install Python dependencies
run: poetry install
- name: Generate Prisma Client
run: poetry run prisma generate
- name: Run Database Migrations
run: poetry run prisma migrate dev --name updates
- name: Run pytest with coverage
run: |
poetry run pytest -vv \
test
env:
CI: true
PLAIN_OUTPUT: True
# - name: Upload coverage reports to Codecov
# uses: codecov/codecov-action@v4
# with:
# token: ${{ secrets.CODECOV_TOKEN }}
# flags: autogpt-server,${{ runner.os }}
build:
permissions:
contents: read
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
python-version: ["3.10"]
platform-os: [ubuntu, macos, macos-arm64, windows]
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- id: get_date
name: Get date
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Set up Python dependency cache
# On Windows, unpacking cached dependencies takes longer than just installing them
if: runner.os != 'Windows'
uses: actions/cache@v4
with:
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('rnd/autogpt_server/poetry.lock') }}
- name: Install Poetry (Unix)
if: runner.os != 'Windows'
run: |
curl -sSL https://install.python-poetry.org | python3 -
if [ "${{ runner.os }}" = "macOS" ]; then
PATH="$HOME/.local/bin:$PATH"
echo "$HOME/.local/bin" >> $GITHUB_PATH
fi
- name: Install Poetry (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
$env:PATH += ";$env:APPDATA\Python\Scripts"
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
- name: Install Python dependencies
run: poetry install
- name: Generate Prisma Client
run: poetry run prisma generate
- name: Run Database Migrations
run: poetry run prisma migrate dev --name updates
- name: install rpm
if: matrix.platform-os == 'ubuntu'
run: sudo apt-get install -y alien fakeroot rpm
- name: Build distribution
run: |
case "${{ matrix.platform-os }}" in
"macos" | "macos-arm64")
${MAC_COMMAND}
;;
"windows")
${WINDOWS_COMMAND}
;;
*)
${LINUX_COMMAND}
;;
esac
env:
MAC_COMMAND: "poetry run poe dist_dmg"
WINDOWS_COMMAND: "poetry run poe dist_msi"
LINUX_COMMAND: "poetry run poe dist_appimage"
# break this into seperate steps each with their own name that matches the file
- name: Upload App artifact
uses: actions/upload-artifact@v4
with:
name: autogptserver-app-${{ matrix.platform-os }}
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.app
- name: Upload dmg artifact
uses: actions/upload-artifact@v4
with:
name: autogptserver-dmg-${{ matrix.platform-os }}
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/AutoGPTServer.dmg
- name: Upload msi artifact
uses: actions/upload-artifact@v4
with:
name: autogptserver-msi-${{ matrix.platform-os }}
path: D:\a\AutoGPT\AutoGPT\rnd\autogpt_server\dist\*.msi
- name: Upload deb artifact
uses: actions/upload-artifact@v4
with:
name: autogptserver-deb-${{ matrix.platform-os }}
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.deb
- name: Upload rpm artifact
uses: actions/upload-artifact@v4
with:
name: autogptserver-rpm-${{ matrix.platform-os }}
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.rpm
- name: Upload tar.gz artifact
uses: actions/upload-artifact@v4
with:
name: autogptserver-tar.gz-${{ matrix.platform-os }}
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.tar.gz
- name: Upload zip artifact
uses: actions/upload-artifact@v4
with:
name: autogptserver-zip-${{ matrix.platform-os }}
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.zip
- name: Upload pkg artifact
uses: actions/upload-artifact@v4
with:
name: autogptserver-pkg-${{ matrix.platform-os }}
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.pkg
- name: Upload AppImage artifact
uses: actions/upload-artifact@v4
with:
name: autogptserver-AppImage-${{ matrix.platform-os }}
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.AppImage

View File

@@ -4,7 +4,15 @@
[![Twitter Follow](https://img.shields.io/twitter/follow/Auto_GPT?style=social)](https://twitter.com/Auto_GPT)  
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
**AutoGPT** is the vision of the power of AI accessible to everyone, to use and to build on. Our mission is to provide the tools, so that you can focus on what matters:
**AutoGPT** is a generalist LLM based AI agent that can autonomously accomplish minor tasks.
**Examples**:
- Look up and summarize this research paper
- Write a marketing for food supplements
- Write a blog post detailing the news in AI
Our mission is to provide the tools, so that you can focus on what matters:
- 🏗️ **Building** - Lay the foundation for something amazing.
- 🧪 **Testing** - Fine-tune your agent to perfection.

View File

@@ -1,367 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 28,
"id": "15386da8",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"\n",
"load_dotenv()\n",
"\n",
"def llm_eval(evaluation: str) -> float:\n",
" openai_client = OpenAI()\n",
" answer = openai_client.chat.completions.create(\n",
" model=\"gpt-4\",\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": evaluation},\n",
" ],\n",
" )\n",
" return answer.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "e9d1c3a6",
"metadata": {},
"outputs": [],
"source": [
"REFERENCE_PROMPT = \"\"\"Ignore previous directions. You are now an expert at evaluating how close machine generated responses are to human answers. You essentially act as a hyper advanced BLEU score.\n",
"In order to score the machine generated response you will {scoring}. Make sure to factor in the distance to the ideal response into your thinking, deliberation, and final result regarding scoring. Return nothing but a float score.\n",
"\n",
"Here is the given task for you to evaluate:\n",
"{task}\n",
"\n",
"Here is the ideal response you're comparing to based on the task:\n",
"{answer}\n",
"\n",
"Here are some examples of how to score a machine generated response compared to the above ideal response:\n",
"{examples}\n",
"\n",
"Here is the current machine generated response to the task that you need to evaluate:\n",
"{response}\n",
"\n",
"Remember to always end your response with nothing but a float score.\n",
"Float score:\n",
"\"\"\"\n",
"\n",
"RUBRIC_PROMPT = \"\"\"Ignore previous directions. You are now an expert at evaluating machine generated responses to given tasks.\n",
"In order to score the generated texts you will {scoring}. Make sure to factor in rubric into your thinking, deliberation, and final result regarding scoring. Return nothing but a float score.\n",
"\n",
"Here is the given task for you to evaluate:\n",
"{task}\n",
"\n",
"Use the below rubric to guide your thinking about scoring:\n",
"{answer}\n",
"\n",
"Here are some examples of how to score a machine generated response based on the above rubric.\n",
"{examples}\n",
"\n",
"Here is the current machine generated response to the task that you need to evaluate:\n",
"{response}\n",
"\n",
"Remember to always end your response with nothing but a float score.\n",
"Float score:\n",
"\"\"\"\n",
"\n",
"QUESTION_PROMPT = \"\"\"Ignore previous directions. You are now an expert at evaluating machine generated responses to given tasks.\n",
"In order to score the generated texts you will {scoring}. Make sure to think about whether the generated response answers the question well in order to score accurately. Return nothing but a float score.\n",
"\n",
"Here is the given task:\n",
"{task}\n",
"\n",
"Here is a question that checks if the task was completed correctly:\n",
"{answer}\n",
"\n",
"Here are some examples of how to score a machine generated response based on task-response pairs.\n",
"{examples}\n",
"\n",
"Here is the current machine generated response to the task that you need to evaluate:\n",
"{response}\n",
"\n",
"Remember to always end your response with nothing but a float score.\n",
"Float score:\n",
"\"\"\"\n",
"\n",
"CUSTOM_PROMPT = \"\"\"{custom}\n",
"{scoring}\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "8ff9cd33",
"metadata": {},
"outputs": [],
"source": [
"task = \"As an agent powered by a large language model, you can execute any digital action. Your task is to find the cheapest flight to San Francisco from Toronto. Create a step by step plan for yourself to execute this task and output this plan into a .txt file.\"\n",
"\n",
"percentage = \"assign a float score that will represent a percentage out of 100. Use decimal points to be even more accurate. 0 represents the worst possible generation, while 100 represents the ideal generation\"\n",
"scale = \"assign an integer score from a scale of 1-10. 1 represents a really bad generation, while 10 represents an ideal generation\"\n",
"binary = \"assign a binary score of either 0 or 1. 0 represents a failure, while 1 represents a success\"\n",
"\n",
"reference_answer = \"\"\"1. Search 'Toronto to San Francisco flights' on the internet\n",
"2. Click on the first link that is a flight aggregator such as SkyScanner or Google Flights\n",
"3. Select the dates that you want to travel\n",
"3. Click on the 'one way' option and click search\n",
"4. Search through all of the given options and select the cheapest flight\"\"\"\n",
"\n",
"rubric_answer = \"\"\"- Reasonable: The plan's steps should be arranged in a logical sequence. Each step should naturally follow from the one before it, leading coherently towards the desired outcome.\n",
"- Atomic: Each step of the plan should be broken down into the smallest possible units of action. This level of detail allows for better tracking and control, as well as easier troubleshooting and adjustment of the plan if necessary.\n",
"- Relevant: Each step should directly contribute to achieving the end goal of the plan. Irrelevant steps can waste resources and detract from the main objectives.\n",
"- Timely: The plan should include a realistic timeline for each step, and not include any redundant steps.\"\"\"\n",
"\n",
"question_answer = \"Will this plan find the cheapest flight to San Francisco from Toronto consistently?\""
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "d0bfdce5",
"metadata": {},
"outputs": [],
"source": [
"FEW_SHOT_REFERENCE_EXAMPLES = \"\"\"EXAMPLE 1\n",
"Plan: 'Define the Parameters:\n",
"\n",
"Identify the potential range of dates for the journey. Ensure flexibility to capitalize on the best deals.\n",
"Research Airlines:\n",
"\n",
"Compile a list of airlines that operate flights between Toronto and San Francisco. This can be accomplished by checking each airline's website or using an airline directory.\n",
"Use Comparison Tools:\n",
"\n",
"Use flight comparison websites (such as Skyscanner, Expedia, Google Flights, etc.) to compare flight prices across different airlines. These tools aggregate flight data and can present the cheapest options available.\n",
"Set Price Alerts:\n",
"\n",
"If the trip is not immediate, set price alerts on these comparison websites. They will notify you when there is a significant drop in flight prices on the selected route.\n",
"Check Airlines' Official Websites:\n",
"\n",
"After identifying potential flights via comparison tools, visit the airlines' official websites. Sometimes airlines offer deals or discounts on their own websites that are not reflected on comparison sites.\n",
"Consider Nearby Airports:\n",
"\n",
"Check flight prices to and from airports in the vicinity of both Toronto and San Francisco. Sometimes less busy airports can offer cheaper flights.\n",
"Evaluate Cost Effectiveness:\n",
"\n",
"Consider factors such as baggage fees, meal costs, and transportation to and from the airport when evaluating the total cost of the flight. The cheapest ticket price does not necessarily mean the least expensive journey overall.\n",
"Book the Flight:\n",
"\n",
"Once the cheapest and most convenient flight has been identified, proceed to booking. Double-check the flight details before finalizing the booking.\n",
"Monitor Flight Details:\n",
"\n",
"After booking, keep an eye on flight status, gate information, and any potential changes to the flight schedule.'\n",
"Returned score: 82.7\n",
"Internal rationale: The plan is solid, however the plan goes to extreme lengths to make things cheap, sacrificing time and simplicity. The task just asks for a vague definition of booking a flight. There are some redundant steps.\n",
"\n",
"EXAMPLE 2\n",
"Plan: 'Determine the Travel Dates and Flexibility: Decide on the dates you want to travel to San Francisco and check if you have any flexibility in your travel schedule. Being flexible with your travel dates can often lead to finding cheaper flights.\n",
"\n",
"Use Flight Search Engines: Start by using popular flight search engines like Google Flights, Skyscanner, Kayak, or Expedia. These platforms allow you to compare prices from various airlines and find the most affordable options.\n",
"\n",
"Set Up Fare Alerts: If your travel dates are flexible, consider setting up fare alerts on the flight search engines. These alerts will notify you when the prices drop for the specified route.\n",
"\n",
"Check Nearby Airports: In both Toronto and San Francisco, there might be multiple airports. Check flights departing from nearby airports as they may offer better deals.\n",
"\n",
"Consider Layovers: Non-stop flights are usually more convenient but can be more expensive. Look for flights with one or more layovers as they may offer cost savings.\n",
"\n",
"Check Airlines' Official Websites: Once you find a potentially cheap flight on a search engine, verify the price directly on the airline's official website. Sometimes, booking directly with the airline can be cheaper due to exclusive deals and promotions.\n",
"\n",
"Use Incognito/Private Browsing Mode: Flight prices can sometimes increase if the website detects repeated searches for the same route. To avoid this, use the incognito or private browsing mode in your web browser.\n",
"\n",
"Consider Budget Airlines: Check if there are any budget airlines flying between Toronto and San Francisco. They often offer lower fares, but be mindful of additional fees for baggage and other services.\n",
"\n",
"Check for Deals and Promo Codes: Look for any ongoing deals or promo codes that can help you save on your flight booking. Airlines and travel websites occasionally offer special discounts.\n",
"\n",
"Be Flexible with Departure and Arrival Times: If possible, consider flying during off-peak hours or mid-week, as flights during these times can be less expensive.\n",
"\n",
"Factor in Total Costs: While searching for cheap flights, don't forget to consider other expenses like baggage fees, seat selection, and additional amenities. Some budget airlines might have hidden costs that could make the overall trip more expensive.\n",
"\n",
"Book Early: Flight prices tend to rise as the departure date approaches. Once you find a good deal that suits your preferences, don't wait too long to book your flight.'\n",
"Returned score: 74.9\n",
"Internal rationale: The individual components of this plan are better than the one previous. But this plan doesn't follow logical steps to completion, and is just more general advice.\n",
"\n",
"EXAMPLE 3\n",
"Plan: 'Search online for cheap flights.\n",
"Check different dates.\n",
"Look at nearby airports.\n",
"Consider layovers.\n",
"Try budget airlines.\n",
"Book early if you find a good deal.'\n",
"Returned score: 42.0\n",
"Internal rationale: This plan is too vague and does not provide enough detail to be useful.\n",
"\n",
"\"\"\"\n",
"FEW_SHOT_RUBRIC_EXAMPLES = \"\"\n",
"FEW_SHOT_QUESTION_EXAMPLES = \"\""
]
},
{
"cell_type": "code",
"execution_count": 75,
"id": "3de1d6d4",
"metadata": {},
"outputs": [],
"source": [
"response1=\"\"\"Define the Parameters:\n",
"\n",
"Identify the potential range of dates for the journey. Ensure flexibility to capitalize on the best deals.\n",
"Research Airlines:\n",
"\n",
"Compile a list of airlines that operate flights between Toronto and San Francisco. This can be accomplished by checking each airline's website or using an airline directory.\n",
"Use Comparison Tools:\n",
"\n",
"Use flight comparison websites (such as Skyscanner, Expedia, Google Flights, etc.) to compare flight prices across different airlines. These tools aggregate flight data and can present the cheapest options available.\n",
"Set Price Alerts:\n",
"\n",
"If the trip is not immediate, set price alerts on these comparison websites. They will notify you when there is a significant drop in flight prices on the selected route.\n",
"Check Airlines' Official Websites:\n",
"\n",
"After identifying potential flights via comparison tools, visit the airlines' official websites. Sometimes airlines offer deals or discounts on their own websites that are not reflected on comparison sites.\n",
"Consider Nearby Airports:\n",
"\n",
"Check flight prices to and from airports in the vicinity of both Toronto and San Francisco. Sometimes less busy airports can offer cheaper flights.\n",
"Evaluate Cost Effectiveness:\n",
"\n",
"Consider factors such as baggage fees, meal costs, and transportation to and from the airport when evaluating the total cost of the flight. The cheapest ticket price does not necessarily mean the least expensive journey overall.\n",
"Book the Flight:\n",
"\n",
"Once the cheapest and most convenient flight has been identified, proceed to booking. Double-check the flight details before finalizing the booking.\n",
"Monitor Flight Details:\n",
"\n",
"After booking, keep an eye on flight status, gate information, and any potential changes to the flight schedule.\"\"\"\n",
"\n",
"response2=\"\"\"Determine the Travel Dates and Flexibility: Decide on the dates you want to travel to San Francisco and check if you have any flexibility in your travel schedule. Being flexible with your travel dates can often lead to finding cheaper flights.\n",
"\n",
"Use Flight Search Engines: Start by using popular flight search engines like Google Flights, Skyscanner, Kayak, or Expedia. These platforms allow you to compare prices from various airlines and find the most affordable options.\n",
"\n",
"Set Up Fare Alerts: If your travel dates are flexible, consider setting up fare alerts on the flight search engines. These alerts will notify you when the prices drop for the specified route.\n",
"\n",
"Check Nearby Airports: In both Toronto and San Francisco, there might be multiple airports. Check flights departing from nearby airports as they may offer better deals.\n",
"\n",
"Consider Layovers: Non-stop flights are usually more convenient but can be more expensive. Look for flights with one or more layovers as they may offer cost savings.\n",
"\n",
"Check Airlines' Official Websites: Once you find a potentially cheap flight on a search engine, verify the price directly on the airline's official website. Sometimes, booking directly with the airline can be cheaper due to exclusive deals and promotions.\n",
"\n",
"Use Incognito/Private Browsing Mode: Flight prices can sometimes increase if the website detects repeated searches for the same route. To avoid this, use the incognito or private browsing mode in your web browser.\n",
"\n",
"Consider Budget Airlines: Check if there are any budget airlines flying between Toronto and San Francisco. They often offer lower fares, but be mindful of additional fees for baggage and other services.\n",
"\n",
"Check for Deals and Promo Codes: Look for any ongoing deals or promo codes that can help you save on your flight booking. Airlines and travel websites occasionally offer special discounts.\n",
"\n",
"Be Flexible with Departure and Arrival Times: If possible, consider flying during off-peak hours or mid-week, as flights during these times can be less expensive.\n",
"\n",
"Factor in Total Costs: While searching for cheap flights, don't forget to consider other expenses like baggage fees, seat selection, and additional amenities. Some budget airlines might have hidden costs that could make the overall trip more expensive.\n",
"\n",
"Book Early: Flight prices tend to rise as the departure date approaches. Once you find a good deal that suits your preferences, don't wait too long to book your flight.\"\"\"\n",
"\n",
"response3 = \"\"\"Search online for cheap flights.\n",
"Check different dates.\n",
"Look at nearby airports.\n",
"Consider layovers.\n",
"Try budget airlines.\n",
"Book early if you find a good deal.\"\"\"\n",
"\n",
"ideal_response = \"\"\"1. Search 'Toronto to San Francisco flights' on the internet\n",
"2. Click on the first link that is a flight aggregator such as SkyScanner or Google Flights\n",
"3. Select the dates that you want to travel\n",
"3. Click on the 'one way' option and click search\n",
"4. Search through all of the given options and select the cheapest flight\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "a5bf2f5c",
"metadata": {},
"outputs": [],
"source": [
"reference_evaluation = REFERENCE_PROMPT.format(task=task, scoring=percentage, answer=reference_answer, response=ideal_response, examples=FEW_SHOT_REFERENCE_EXAMPLES)\n",
"rubric_evaluation = RUBRIC_PROMPT.format(task=task, scoring=percentage, answer=rubric_answer, response=ideal_response, examples=FEW_SHOT_REFERENCE_EXAMPLES)\n",
"question_evaluation = QUESTION_PROMPT.format(task=task, scoring=percentage, answer=question_answer, response=ideal_response, examples=FEW_SHOT_REFERENCE_EXAMPLES)"
]
},
{
"cell_type": "code",
"execution_count": 82,
"id": "1cd01a4c",
"metadata": {},
"outputs": [],
"source": [
"reference_response = llm_eval(reference_evaluation)\n",
"rubric_response = llm_eval(rubric_evaluation)\n",
"question_response = llm_eval(question_evaluation)"
]
},
{
"cell_type": "code",
"execution_count": 72,
"id": "72e6ac91",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"REFERENCE: 66.5 6.0 0.0\n",
"RUBRIC: 65.0 4.0 0.0\n",
"QUESTION: 87.0 6.5 0.0\n"
]
}
],
"source": [
"# response3 - the worst one\n",
"print('REFERENCE:', reference_response_percentage, reference_response_scale, reference_response_binary)\n",
"print('RUBRIC:', rubric_response_percentage, rubric_response_scale, rubric_response_binary)\n",
"print('QUESTION:', question_response_percentage, question_response_scale, question_response_binary)"
]
},
{
"cell_type": "code",
"execution_count": 83,
"id": "a66014ee",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"REFERENCE: 100.0 10.0 1.0\n",
"RUBRIC: 88.0 7.5 1.0\n",
"QUESTION: 87.0 7.5 1.0\n"
]
}
],
"source": [
"# reference_response - the best one\n",
"print('REFERENCE:', reference_response_ideal_percentage, reference_response_ideal_scale, reference_response_ideal_binary)\n",
"print('RUBRIC:', rubric_response_ideal_percentage, rubric_response_ideal_scale, rubric_response_ideal_binary)\n",
"print('QUESTION:', question_response_ideal_percentage, question_response_ideal_scale, question_response_ideal_binary)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -1,30 +0,0 @@
{
"1": {
"role": "assistant",
"content": "{\n \"thoughts\": {\n \"text\": \"I need to find Tesla's revenue in 2022.\",\n \"reasoning\": \"I will perform a web search to find the information.\",\n \"plan\": \"- Perform a web search\\n- Extract the revenue\\n- Write the revenue to a file\",\n \"criticism\": \"I should be cautious about the reliability of the source I find.\",\n \"speak\": \"I will search the web for Tesla's revenue in 2022.\"\n },\n \"command\": {\n \"name\": \"web_search\",\n \"args\": {\n \"query\": \"Tesla revenue in 2022\"\n }\n }\n}"
},
"2": {
"role": "assistant",
"content": "{\n \"thoughts\": {\n \"text\": \"I found a reliable source for Tesla's revenue in 2022.\",\n \"reasoning\": \"I will use the Statista source to extract the revenue information.\",\n \"plan\": \"- Browse the Statista website\\n- Extract the revenue\\n- Write the revenue to a file\",\n \"criticism\": \"I should verify the information on the Statista website.\",\n \"speak\": \"I will browse the Statista website to find Tesla's revenue in 2022.\"\n },\n \"command\": {\n \"name\": \"browse_website\",\n \"args\": {\n \"url\": \"https://www.statista.com/statistics/272120/revenue-of-tesla/\",\n \"question\": \"What was Tesla's revenue in 2022?\"\n }\n }\n}"
},
"3": {
"role": "assistant",
"content": "Tesla's revenue in 2022 was nearly $81.5 billion, which represents a 51 percent increase from the previous year. The majority of the revenue, over $71 billion, was generated from Tesla's automotive segment. The Model Y emerged as Tesla's best-selling vehicle, and the company has been investing in the production of the Model 3 and Model Y."
},
"4": {
"role": "assistant",
"content": "The text provides various statistics and financial information about Tesla, including its revenue from 2008 to 2022. However, the specific revenue for 2022 is not mentioned in the text."
},
"5": {
"role": "assistant",
"content": "a description of different types of cookies used on a website and the options for accepting or rejecting them. There is no information provided about Tesla's revenue in 2022."
},
"6": {
"role": "assistant",
"content": "Tesla's revenue in 2022 increased by 51% to nearly $81.5 billion, with the majority coming from the automotive segment. The Model Y was the best-selling vehicle, and the company has been focusing on producing the Model 3 and Model Y."
},
"7": {
"role": "assistant",
"content": "{\n \"thoughts\": {\n \"text\": \"I found Tesla's revenue in 2022.\",\n \"reasoning\": \"I will write the revenue to a file.\",\n \"plan\": \"- Write the revenue to a file\",\n \"criticism\": \"I should ensure the revenue is formatted correctly.\",\n \"speak\": \"I will write Tesla's revenue in 2022 to a file.\"\n },\n \"command\": {\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\": \"tesla_revenue_2022.txt\",\n \"text\": \"Tesla's revenue in 2022 was $81,500,000,000.\"\n }\n }\n}"
}
}

View File

@@ -1,74 +0,0 @@
{
"1": {
"role": "assistant",
"content": {
"thoughts": {
"text": "I need to find Tesla's revenue in 2022.",
"reasoning": "I will perform a web search to find the information.",
"plan": "- Perform a web search\n- Extract the revenue\n- Write the revenue to a file",
"criticism": "I should be cautious about the reliability of the source I find.",
"speak": "I will search the web for Tesla's revenue in 2022."
},
"command": {
"name": "web_search",
"args": {
"query": "Tesla revenue in 2022"
}
}
}
},
"2": {
"role": "assistant",
"content": {
"thoughts": {
"text": "I found a reliable source for Tesla's revenue in 2022.",
"reasoning": "I will use the Statista source to extract the revenue information.",
"plan": "- Browse the Statista website\n- Extract the revenue\n- Write the revenue to a file",
"criticism": "I should verify the information on the Statista website.",
"speak": "I will browse the Statista website to find Tesla's revenue in 2022."
},
"command": {
"name": "browse_website",
"args": {
"url": "https://www.statista.com/statistics/272120/revenue-of-tesla/",
"question": "What was Tesla's revenue in 2022?"
}
}
}
},
"3": {
"role": "assistant",
"content": "Tesla's revenue in 2022 was nearly $81.5 billion, which represents a 51 percent increase from the previous year. The majority of the revenue, over $71 billion, was generated from Tesla's automotive segment. The Model Y emerged as Tesla's best-selling vehicle, and the company has been investing in the production of the Model 3 and Model Y."
},
"4": {
"role": "assistant",
"content": "The text provides various statistics and financial information about Tesla, including its revenue from 2008 to 2022. However, the specific revenue for 2022 is not mentioned in the text."
},
"5": {
"role": "assistant",
"content": "a description of different types of cookies used on a website and the options for accepting or rejecting them. There is no information provided about Tesla's revenue in 2022."
},
"6": {
"role": "assistant",
"content": "Tesla's revenue in 2022 increased by 51% to nearly $81.5 billion, with the majority coming from the automotive segment. The Model Y was the best-selling vehicle, and the company has been focusing on producing the Model 3 and Model Y."
},
"7": {
"role": "assistant",
"content": {
"thoughts": {
"text": "I found Tesla's revenue in 2022.",
"reasoning": "I will write the revenue to a file.",
"plan": "- Write the revenue to a file",
"criticism": "I should ensure the revenue is formatted correctly.",
"speak": "I will write Tesla's revenue in 2022 to a file."
},
"command": {
"name": "write_to_file",
"args": {
"filename": "tesla_revenue_2022.txt",
"text": "Tesla's revenue in 2022 was $81,500,000,000."
}
}
}
}
}

9
cli.py
View File

@@ -149,10 +149,11 @@ def start(agent_name: str, no_setup: bool):
setup_process.wait()
click.echo()
subprocess.Popen(["./run_benchmark", "serve"], cwd=agent_dir)
click.echo("⌛ (Re)starting benchmark server...")
wait_until_conn_ready(8080)
click.echo()
# FIXME: Doesn't work: Command not found: agbenchmark
# subprocess.Popen(["./run_benchmark", "serve"], cwd=agent_dir)
# click.echo("⌛ (Re)starting benchmark server...")
# wait_until_conn_ready(8080)
# click.echo()
subprocess.Popen(["./run"], cwd=agent_dir)
click.echo(f"⌛ (Re)starting agent '{agent_name}'...")

View File

@@ -11,7 +11,7 @@ Configuration is controlled through the `Config` object. You can set configurati
- `BROWSE_CHUNK_MAX_LENGTH`: When browsing website, define the length of chunks to summarize. Default: 3000
- `BROWSE_SPACY_LANGUAGE_MODEL`: [spaCy language model](https://spacy.io/usage/models) to use when creating chunks. Default: en_core_web_sm
- `CHAT_MESSAGES_ENABLED`: Enable chat messages. Optional
- `DISABLED_COMMANDS`: Commands to disable. Use comma separated names of commands. See the list of commands from built-in components [here](../components/components.md). Default: None
- `DISABLED_COMMANDS`: Commands to disable. Use comma separated names of commands. See the list of commands from built-in components [here](../../forge/components/built-in-components.md). Default: None
- `ELEVENLABS_API_KEY`: ElevenLabs API Key. Optional.
- `ELEVENLABS_VOICE_ID`: ElevenLabs Voice ID. Optional.
- `EMBEDDING_MODEL`: LLM Model to use for embedding tasks. Default: `text-embedding-3-small`

View File

@@ -27,7 +27,7 @@ d88P 888 "Y88888 "Y888 "Y88P" "Y8888P88 888 888
888 "Y88P" 888 "Y88888 "Y8888
888
Y8b d88P
"Y88P" v0.1.0
"Y88P" v0.2.0
\n"""
if __name__ == "__main__":

View File

@@ -0,0 +1,229 @@
import inspect
import logging
from typing import Any, Optional
from uuid import uuid4
from forge.agent.base import BaseAgent, BaseAgentSettings
from forge.agent.protocols import (
AfterExecute,
CommandProvider,
DirectiveProvider,
MessageProvider,
)
from forge.agent_protocol.agent import ProtocolAgent
from forge.agent_protocol.database.db import AgentDB
from forge.agent_protocol.models.task import (
Step,
StepRequestBody,
Task,
TaskRequestBody,
)
from forge.command.command import Command
from forge.components.system.system import SystemComponent
from forge.config.ai_profile import AIProfile
from forge.file_storage.base import FileStorage
from forge.llm.prompting.schema import ChatPrompt
from forge.llm.prompting.utils import dump_prompt
from forge.llm.providers.schema import AssistantFunctionCall
from forge.llm.providers.utils import function_specs_from_commands
from forge.models.action import (
ActionErrorResult,
ActionProposal,
ActionResult,
ActionSuccessResult,
)
from forge.utils.exceptions import AgentException, AgentTerminated
logger = logging.getLogger(__name__)
class ForgeAgent(ProtocolAgent, BaseAgent):
"""
The goal of the Forge is to take care of the boilerplate code,
so you can focus on agent design.
There is a great paper surveying the agent landscape: https://arxiv.org/abs/2308.11432
Which I would highly recommend reading as it will help you understand the possibilities.
ForgeAgent provides component support; https://docs.agpt.co/forge/components/introduction/
Using Components is a new way of building agents that is more flexible and easier to extend.
Components replace some agent's logic and plugins with a more modular and composable system.
""" # noqa: E501
def __init__(self, database: AgentDB, workspace: FileStorage):
"""
The database is used to store tasks, steps and artifact metadata.
The workspace is used to store artifacts (files).
"""
# An example agent information; you can modify this to suit your needs
state = BaseAgentSettings(
name="Forge Agent",
description="The Forge Agent is a generic agent that can solve tasks.",
agent_id=str(uuid4()),
ai_profile=AIProfile(
ai_name="ForgeAgent", ai_role="Generic Agent", ai_goals=["Solve tasks"]
),
task="Solve tasks",
)
# ProtocolAgent adds the Agent Protocol (API) functionality
ProtocolAgent.__init__(self, database, workspace)
# BaseAgent provides the component handling functionality
BaseAgent.__init__(self, state)
# AGENT COMPONENTS
# Components provide additional functionality to the agent
# There are NO components added by default in the BaseAgent
# You can create your own components or add existing ones
# Built-in components:
# https://docs.agpt.co/forge/components/built-in-components/
# System component provides "finish" command and adds some prompt information
self.system = SystemComponent()
async def create_task(self, task_request: TaskRequestBody) -> Task:
"""
The agent protocol, which is the core of the Forge,
works by creating a task and then executing steps for that task.
This method is called when the agent is asked to create a task.
We are hooking into function to add a custom log message.
Though you can do anything you want here.
"""
task = await super().create_task(task_request)
logger.info(
f"📦 Task created with ID: {task.task_id} and "
f"input: {task.input[:40]}{'...' if len(task.input) > 40 else ''}"
)
return task
async def execute_step(self, task_id: str, step_request: StepRequestBody) -> Step:
"""
Preffered method to add agent logic is to add custom components:
https://docs.agpt.co/forge/components/creating-components/
Outdated tutorial on how to add custom logic:
https://aiedge.medium.com/autogpt-forge-e3de53cc58ec
The agent protocol, which is the core of the Forge, works by creating a task and then
executing steps for that task. This method is called when the agent is asked to execute
a step.
The task that is created contains an input string, for the benchmarks this is the task
the agent has been asked to solve and additional input, which is a dictionary and
could contain anything.
If you want to get the task use:
```
task = await self.db.get_task(task_id)
```
The step request body is essentially the same as the task request and contains an input
string, for the benchmarks this is the task the agent has been asked to solve and
additional input, which is a dictionary and could contain anything.
You need to implement logic that will take in this step input and output the completed step
as a step object. You can do everything in a single step or you can break it down into
multiple steps. Returning a request to continue in the step output, the user can then decide
if they want the agent to continue or not.
""" # noqa: E501
step = await self.db.create_step(
task_id=task_id, input=step_request, is_last=False
)
proposal = await self.propose_action()
output = await self.execute(proposal)
if isinstance(output, ActionSuccessResult):
step.output = str(output.outputs)
elif isinstance(output, ActionErrorResult):
step.output = output.reason
return step
async def propose_action(self) -> ActionProposal:
self.reset_trace()
# Get directives
directives = self.state.directives.copy(deep=True)
directives.resources += await self.run_pipeline(DirectiveProvider.get_resources)
directives.constraints += await self.run_pipeline(
DirectiveProvider.get_constraints
)
directives.best_practices += await self.run_pipeline(
DirectiveProvider.get_best_practices
)
# Get commands
self.commands = await self.run_pipeline(CommandProvider.get_commands)
# Get messages
messages = await self.run_pipeline(MessageProvider.get_messages)
prompt: ChatPrompt = ChatPrompt(
messages=messages, functions=function_specs_from_commands(self.commands)
)
logger.debug(f"Executing prompt:\n{dump_prompt(prompt)}")
# Call the LLM and parse result
# THIS NEEDS TO BE REPLACED WITH YOUR LLM CALL/LOGIC
# Have a look at autogpt/agents/agent.py for an example (complete_and_parse)
proposal = ActionProposal(
thoughts="I cannot solve the task!",
use_tool=AssistantFunctionCall(
name="finish", arguments={"reason": "Unimplemented logic"}
),
)
self.config.cycle_count += 1
return proposal
async def execute(self, proposal: Any, user_feedback: str = "") -> ActionResult:
tool = proposal.use_tool
# Get commands
self.commands = await self.run_pipeline(CommandProvider.get_commands)
# Execute the command
try:
command: Optional[Command] = None
for c in reversed(self.commands):
if tool.name in c.names:
command = c
if command is None:
raise AgentException(f"Command {tool.name} not found")
command_result = command(**tool.arguments)
if inspect.isawaitable(command_result):
command_result = await command_result
result = ActionSuccessResult(outputs=command_result)
except AgentTerminated:
result = ActionSuccessResult(outputs="Agent terminated or finished")
except AgentException as e:
result = ActionErrorResult.from_exception(e)
logger.warning(f"{tool} raised an error: {e}")
await self.run_pipeline(AfterExecute.after_execute, result)
logger.debug("\n".join(self.trace))
return result
async def do_not_execute(
self, denied_proposal: Any, user_feedback: str
) -> ActionResult:
result = ActionErrorResult(reason="Action denied")
await self.run_pipeline(AfterExecute.after_execute, result)
logger.debug("\n".join(self.trace))
return result

View File

@@ -28,7 +28,7 @@ from forge.file_storage.base import FileStorage
logger = logging.getLogger(__name__)
class Agent:
class ProtocolAgent:
def __init__(self, database: AgentDB, workspace: FileStorage):
self.db = database
self.workspace = workspace

View File

@@ -3,17 +3,12 @@ from pathlib import Path
import pytest
from fastapi import UploadFile
from forge.agent_protocol.database.db import AgentDB
from forge.agent_protocol.models.task import (
StepRequestBody,
Task,
TaskListResponse,
TaskRequestBody,
)
from forge.file_storage.base import FileStorageConfiguration
from forge.file_storage.local import LocalFileStorage
from .agent import Agent
from .agent import ProtocolAgent
from .database.db import AgentDB
from .models.task import StepRequestBody, Task, TaskListResponse, TaskRequestBody
@pytest.fixture
@@ -21,7 +16,7 @@ def agent(test_workspace: Path):
db = AgentDB("sqlite:///test.db")
config = FileStorageConfiguration(root=test_workspace)
workspace = LocalFileStorage(config)
return Agent(db, workspace)
return ProtocolAgent(db, workspace)
@pytest.fixture
@@ -33,7 +28,7 @@ def file_upload():
@pytest.mark.asyncio
async def test_create_task(agent: Agent):
async def test_create_task(agent: ProtocolAgent):
task_request = TaskRequestBody(
input="test_input", additional_input={"input": "additional_test_input"}
)
@@ -42,7 +37,7 @@ async def test_create_task(agent: Agent):
@pytest.mark.asyncio
async def test_list_tasks(agent: Agent):
async def test_list_tasks(agent: ProtocolAgent):
task_request = TaskRequestBody(
input="test_input", additional_input={"input": "additional_test_input"}
)
@@ -52,7 +47,7 @@ async def test_list_tasks(agent: Agent):
@pytest.mark.asyncio
async def test_get_task(agent: Agent):
async def test_get_task(agent: ProtocolAgent):
task_request = TaskRequestBody(
input="test_input", additional_input={"input": "additional_test_input"}
)
@@ -63,7 +58,7 @@ async def test_get_task(agent: Agent):
@pytest.mark.xfail(reason="execute_step is not implemented")
@pytest.mark.asyncio
async def test_execute_step(agent: Agent):
async def test_execute_step(agent: ProtocolAgent):
task_request = TaskRequestBody(
input="test_input", additional_input={"input": "additional_test_input"}
)
@@ -78,7 +73,7 @@ async def test_execute_step(agent: Agent):
@pytest.mark.xfail(reason="execute_step is not implemented")
@pytest.mark.asyncio
async def test_get_step(agent: Agent):
async def test_get_step(agent: ProtocolAgent):
task_request = TaskRequestBody(
input="test_input", additional_input={"input": "additional_test_input"}
)
@@ -92,7 +87,7 @@ async def test_get_step(agent: Agent):
@pytest.mark.asyncio
async def test_list_artifacts(agent: Agent):
async def test_list_artifacts(agent: ProtocolAgent):
tasks = await agent.list_tasks()
assert tasks.tasks, "No tasks in test.db"
@@ -101,7 +96,7 @@ async def test_list_artifacts(agent: Agent):
@pytest.mark.asyncio
async def test_create_artifact(agent: Agent, file_upload: UploadFile):
async def test_create_artifact(agent: ProtocolAgent, file_upload: UploadFile):
task_request = TaskRequestBody(
input="test_input", additional_input={"input": "additional_test_input"}
)
@@ -116,7 +111,7 @@ async def test_create_artifact(agent: Agent, file_upload: UploadFile):
@pytest.mark.asyncio
async def test_create_and_get_artifact(agent: Agent, file_upload: UploadFile):
async def test_create_and_get_artifact(agent: ProtocolAgent, file_upload: UploadFile):
task_request = TaskRequestBody(
input="test_input", additional_input={"input": "additional_test_input"}
)

View File

@@ -24,7 +24,7 @@ from .models import (
)
if TYPE_CHECKING:
from forge.agent.agent import Agent
from .agent import ProtocolAgent
base_router = APIRouter()
logger = logging.getLogger(__name__)
@@ -73,7 +73,7 @@ async def create_agent_task(request: Request, task_request: TaskRequestBody) ->
"artifacts": [],
}
"""
agent: "Agent" = request["agent"]
agent: "ProtocolAgent" = request["agent"]
try:
task = await agent.create_task(task_request)
@@ -124,7 +124,7 @@ async def list_agent_tasks(
}
}
"""
agent: "Agent" = request["agent"]
agent: "ProtocolAgent" = request["agent"]
try:
tasks = await agent.list_tasks(page, page_size)
return tasks
@@ -185,7 +185,7 @@ async def get_agent_task(request: Request, task_id: str) -> Task:
]
}
""" # noqa: E501
agent: "Agent" = request["agent"]
agent: "ProtocolAgent" = request["agent"]
try:
task = await agent.get_task(task_id)
return task
@@ -239,7 +239,7 @@ async def list_agent_task_steps(
}
}
""" # noqa: E501
agent: "Agent" = request["agent"]
agent: "ProtocolAgent" = request["agent"]
try:
steps = await agent.list_steps(task_id, page, page_size)
return steps
@@ -298,7 +298,7 @@ async def execute_agent_task_step(
...
}
"""
agent: "Agent" = request["agent"]
agent: "ProtocolAgent" = request["agent"]
try:
# An empty step request represents a yes to continue command
if not step_request:
@@ -337,7 +337,7 @@ async def get_agent_task_step(request: Request, task_id: str, step_id: str) -> S
...
}
"""
agent: "Agent" = request["agent"]
agent: "ProtocolAgent" = request["agent"]
try:
step = await agent.get_step(task_id, step_id)
return step
@@ -388,7 +388,7 @@ async def list_agent_task_artifacts(
}
}
""" # noqa: E501
agent: "Agent" = request["agent"]
agent: "ProtocolAgent" = request["agent"]
try:
artifacts = await agent.list_artifacts(task_id, page, page_size)
return artifacts
@@ -430,7 +430,7 @@ async def upload_agent_task_artifacts(
"file_name": "main.py"
}
""" # noqa: E501
agent: "Agent" = request["agent"]
agent: "ProtocolAgent" = request["agent"]
if file is None:
raise HTTPException(status_code=400, detail="File must be specified")
@@ -468,7 +468,7 @@ async def download_agent_task_artifact(
Response:
<file_content_of_artifact>
"""
agent: "Agent" = request["agent"]
agent: "ProtocolAgent" = request["agent"]
try:
return await agent.get_artifact(task_id, artifact_id)
except Exception:

13
forge/forge/app.py Normal file
View File

@@ -0,0 +1,13 @@
import os
from pathlib import Path
from forge.agent.forge_agent import ForgeAgent
from forge.agent_protocol.database.db import AgentDB
from forge.file_storage import FileStorageBackendName, get_storage
database_name = os.getenv("DATABASE_STRING")
workspace = get_storage(FileStorageBackendName.LOCAL, root_path=Path("workspace"))
database = AgentDB(database_name, debug_enabled=False)
agent = ForgeAgent(database=database, workspace=workspace)
app = agent.get_agent_app()

View File

@@ -1,6 +1,6 @@
[tool.poetry]
name = "AutoGPT-Forge"
version = "0.1.0"
version = "0.2.0"
description = ""
authors = ["AutoGPT <support@agpt.co>"]
license = "MIT"

View File

@@ -464,7 +464,7 @@ d88P 888 "Y88888 "Y888 "Y88P" "Y8888P88 888 888
888 "Y88P" 888 "Y88888 "Y8888
888
Y8b d88P
"Y88P" v0.1.0
"Y88P" v0.2.0
[2023-09-27 15:39:07,832] [forge.sdk.agent] [INFO] 📝 Agent server starting on http://localhost:8000

View File

@@ -1,22 +1,27 @@
from multiprocessing import freeze_support
from multiprocessing.spawn import freeze_support as freeze_support_spawn
from autogpt_server.data import ExecutionQueue
from autogpt_server.data.execution import ExecutionQueue
from autogpt_server.executor import start_executor_manager
from autogpt_server.server import start_server
def main() -> None:
queue = ExecutionQueue()
start_executor_manager(5, queue)
start_server(queue)
if __name__ == "__main__":
def background_process() -> None:
"""
Used by the cli to run the server and executor in the background.
This function runs the server and starts the executor in the background.
"""
# These directives are required to make multiprocessing work with cx_Freeze
# and are both required and safe across platforms (Windows, macOS, Linux)
# They must be placed at the beginning of the executions before any other
# multiprocessing code is run
freeze_support()
freeze_support_spawn()
main()
# Start the application
queue = ExecutionQueue()
start_executor_manager(5, queue)
start_server(queue)
if __name__ == "__main__":
background_process()

View File

@@ -0,0 +1,116 @@
"""
The command line interface for the agent server
"""
from multiprocessing import freeze_support
from multiprocessing.spawn import freeze_support as freeze_support_spawn
import click
@click.group()
def main():
"""AutoGPT Server CLI Tool"""
@main.command()
def background() -> None:
"""
Command to run the server in the background. Used by the run command
"""
from autogpt_server.app import background_process
background_process()
@main.command()
def start():
"""
Starts the server in the background and saves the PID
"""
import os
import pathlib
import subprocess
import psutil
# Define the path for the new directory and file
home_dir = pathlib.Path.home()
new_dir = home_dir / ".config" / "agpt"
file_path = new_dir / "running.tmp"
# Create the directory if it does not exist
os.makedirs(new_dir, exist_ok=True)
if file_path.exists():
with open(file_path, "r", encoding="utf-8") as file:
pid = int(file.read())
if psutil.pid_exists(pid):
print("Server is already running")
exit(1)
else:
print("PID does not exist deleting file")
os.remove(file_path)
sp = subprocess.Popen(
["poetry", "run", "python", "autogpt_server/cli.py", "background"],
stdout=subprocess.DEVNULL, # Redirect standard output to devnull
stderr=subprocess.DEVNULL, # Redirect standard error to devnull
)
print(f"Server running in process: {sp.pid}")
with open(file_path, "w", encoding="utf-8") as file:
file.write(str(sp.pid))
@main.command()
def stop():
"""
Stops the server
"""
import os
import pathlib
import subprocess
home_dir = pathlib.Path.home()
new_dir = home_dir / ".config" / "agpt"
file_path = new_dir / "running.tmp"
if not file_path.exists():
print("Server is not running")
return
with open(file_path, "r", encoding="utf-8") as file:
pid = file.read()
os.remove(file_path)
subprocess.Popen(["kill", pid])
print("Server Stopped")
@click.group()
def test():
"""
Group for test commands
"""
@test.command()
def event():
"""
Send an event to the running server
"""
print("Event sent")
main.add_command(test)
def start_cli() -> None:
"""
Entry point into the cli
"""
freeze_support()
freeze_support_spawn()
main()
if __name__ == "__main__":
start_cli()

View File

@@ -1,36 +0,0 @@
import uuid
from multiprocessing import Queue
class Execution:
"""Data model for an execution of an Agent"""
def __init__(self, execution_id: str, data: str):
self.execution_id = execution_id
self.data = data
# TODO: This shared class make api & executor coupled in one machine.
# Replace this with a persistent & remote-hosted queue.
# One very likely candidate would be persisted Redis (Redis Queue).
# It will also open the possibility of using it for other purposes like
# caching, execution engine broker (like Celery), user session management etc.
class ExecutionQueue:
"""
Queue for managing the execution of agents.
This will be shared between different processes
"""
def __init__(self):
self.queue: Queue[Execution] = Queue()
def add(self, data: str) -> str:
execution_id = uuid.uuid4()
self.queue.put(Execution(str(execution_id), data))
return str(execution_id)
def get(self) -> Execution | None:
return self.queue.get()
def empty(self) -> bool:
return self.queue.empty()

View File

@@ -0,0 +1,237 @@
import json
import jsonschema
from abc import ABC, abstractmethod
from prisma.models import AgentBlock
from pydantic import BaseModel
from typing import Any, ClassVar
BlockData = dict[str, Any]
class BlockSchema(BaseModel):
"""
A schema for the block input and output data.
The dictionary structure is an object-typed `jsonschema`.
The top-level properties are the block input/output names.
You can initialize this class by providing a dictionary of properties.
The key is the string of the property name, and the value is either
a string of the type or a dictionary of the jsonschema.
You can also provide additional keyword arguments for additional properties.
Like `name`, `required` (by default all properties are required), etc.
Example:
input_schema = BlockSchema({
"system_prompt": "string",
"user_prompt": "string",
"max_tokens": "integer",
"user_info": {
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"},
},
"required": ["name"],
},
}, required=["system_prompt", "user_prompt"])
output_schema = BlockSchema({
"on_complete": "string",
"on_failures": "string",
})
"""
jsonschema: dict[str, Any]
def __init__(
self,
properties: dict[str, str | dict],
required: list[str] | None = None,
**kwargs: Any
):
schema = {
"type": "object",
"properties": {
key: {"type": value} if isinstance(value, str) else value
for key, value in properties.items()
},
"required": required or list(properties.keys()),
**kwargs,
}
super().__init__(jsonschema=schema)
def __str__(self) -> str:
return json.dumps(self.jsonschema)
def validate_data(self, data: BlockData) -> str | None:
"""
Validate the data against the schema.
Returns the validation error message if the data does not match the schema.
"""
try:
jsonschema.validate(data, self.jsonschema)
return None
except jsonschema.ValidationError as e:
return str(e)
def validate_field(self, field_name: str, data: BlockData) -> str | None:
"""
Validate the data against a specific property (one of the input/output name).
Returns the validation error message if the data does not match the schema.
"""
property_schema = self.jsonschema["properties"].get(field_name)
if not property_schema:
return f"Invalid property name {field_name}"
try:
jsonschema.validate(data, property_schema)
return None
except jsonschema.ValidationError as e:
return str(e)
class Block(ABC, BaseModel):
@classmethod
@property
@abstractmethod
def id(cls) -> str:
"""
The unique identifier for the block, this value will be persisted in the DB.
So it should be a unique and constant across the application run.
Use the UUID format for the ID.
"""
pass
@classmethod
@property
@abstractmethod
def input_schema(cls) -> BlockSchema:
"""
The schema for the block input data.
The top-level properties are the possible input name expected by the block.
"""
pass
@classmethod
@property
@abstractmethod
def output_schema(cls) -> BlockSchema:
"""
The schema for the block output.
The top-level properties are the possible output name produced by the block.
"""
pass
@abstractmethod
async def run(self, input_data: BlockData) -> tuple[str, Any]:
"""
Run the block with the given input data.
Args:
input_data: The input data with the structure of input_schema.
Returns:
The (output name, output data), matching the type in output_schema.
"""
pass
@classmethod
@property
def name(cls):
return cls.__name__
async def execute(self, input_data: BlockData) -> tuple[str, Any]:
if error := self.input_schema.validate_data(input_data):
raise ValueError(
f"Unable to execute block with invalid input data: {error}"
)
output_name, output_data = await self.run(input_data)
if error := self.output_schema.validate_field(output_name, output_data):
raise ValueError(
f"Unable to execute block with invalid output data: {error}"
)
return output_name, output_data
# ===================== Inline-Block Implementations ===================== #
class ParrotBlock(Block):
id: ClassVar[str] = "1ff065e9-88e8-4358-9d82-8dc91f622ba9" # type: ignore
input_schema: ClassVar[BlockSchema] = BlockSchema({ # type: ignore
"input": "string",
})
output_schema: ClassVar[BlockSchema] = BlockSchema({ # type: ignore
"output": "string",
})
async def run(self, input_data: BlockData) -> tuple[str, Any]:
return "output", input_data["input"]
class TextCombinerBlock(Block):
id: ClassVar[str] = "db7d8f02-2f44-4c55-ab7a-eae0941f0c30" # type: ignore
input_schema: ClassVar[BlockSchema] = BlockSchema({ # type: ignore
"text1": "string",
"text2": "string",
"format": "string",
})
output_schema: ClassVar[BlockSchema] = BlockSchema({ # type: ignore
"combined_text": "string",
})
async def run(self, input_data: BlockData) -> tuple[str, Any]:
return "combined_text", input_data["format"].format(
text1=input_data["text1"],
text2=input_data["text2"],
)
class PrintingBlock(Block):
id: ClassVar[str] = "f3b1c1b2-4c4f-4f0d-8d2f-4c4f0d8d2f4c" # type: ignore
input_schema: ClassVar[BlockSchema] = BlockSchema({ # type: ignore
"text": "string",
})
output_schema: ClassVar[BlockSchema] = BlockSchema({ # type: ignore
"status": "string",
})
async def run(self, input_data: BlockData) -> tuple[str, Any]:
print(input_data["text"])
return "status", "printed"
# ======================= Block Helper Functions ======================= #
AVAILABLE_BLOCKS: dict[str, Block] = {}
async def initialize_blocks() -> None:
global AVAILABLE_BLOCKS
AVAILABLE_BLOCKS = {block.id: block() for block in Block.__subclasses__()}
for block in AVAILABLE_BLOCKS.values():
existing_block = await AgentBlock.prisma().find_unique(
where={"id": block.id}
)
if existing_block:
continue
await AgentBlock.prisma().create(
data={
"id": block.id,
"name": block.name,
"inputSchema": str(block.input_schema),
"outputSchema": str(block.output_schema),
}
)
async def get_block(block_id: str) -> Block:
if not AVAILABLE_BLOCKS:
await initialize_blocks()
return AVAILABLE_BLOCKS[block_id]

View File

@@ -0,0 +1,26 @@
import asyncio
from uuid import uuid4
from prisma import Prisma
from pydantic import BaseModel
prisma = Prisma(auto_register=True)
def connect_sync():
asyncio.get_event_loop().run_until_complete(connect())
async def connect():
await prisma.connect()
async def disconnect():
await prisma.disconnect()
class BaseDbModel(BaseModel):
id: str = ""
def __init__(self, id: str = "", **data):
data["id"] = id or str(uuid4())
super().__init__(**data)

View File

@@ -0,0 +1,98 @@
import json
from datetime import datetime
from enum import Enum
from multiprocessing import Queue
from prisma.models import AgentNodeExecution
from typing import Any
from autogpt_server.data.db import BaseDbModel
class Execution(BaseDbModel):
"""Data model for an execution of an Agent"""
run_id: str
node_id: str
data: dict[str, Any]
class ExecutionStatus(str, Enum):
QUEUED = "QUEUED"
RUNNING = "RUNNING"
COMPLETED = "COMPLETED"
FAILED = "FAILED"
# TODO: This shared class make api & executor coupled in one machine.
# Replace this with a persistent & remote-hosted queue.
# One very likely candidate would be persisted Redis (Redis Queue).
# It will also open the possibility of using it for other purposes like
# caching, execution engine broker (like Celery), user session management etc.
class ExecutionQueue:
"""
Queue for managing the execution of agents.
This will be shared between different processes
"""
def __init__(self):
self.queue: Queue[Execution] = Queue()
def add(self, execution: Execution) -> Execution:
self.queue.put(execution)
return execution
def get(self) -> Execution:
return self.queue.get()
def empty(self) -> bool:
return self.queue.empty()
async def add_execution(execution: Execution, queue: ExecutionQueue) -> Execution:
await AgentNodeExecution.prisma().create(
data={
"id": execution.id,
"executionId": execution.run_id,
"agentNodeId": execution.node_id,
"executionStatus": ExecutionStatus.QUEUED,
"inputData": json.dumps(execution.data),
"creationTime": datetime.now(),
}
)
return queue.add(execution)
async def start_execution(exec_id: str) -> None:
await AgentNodeExecution.prisma().update(
where={"id": exec_id},
data={
"executionStatus": ExecutionStatus.RUNNING,
"startTime": datetime.now(),
},
)
async def complete_execution(exec_id: str, output: tuple[str, Any]) -> None:
output_name, output_data = output
await AgentNodeExecution.prisma().update(
where={"id": exec_id},
data={
"executionStatus": ExecutionStatus.COMPLETED,
"outputName": output_name,
"outputData": json.dumps(output_data),
"endTime": datetime.now(),
},
)
async def fail_execution(exec_id: str, error: Exception) -> None:
await AgentNodeExecution.prisma().update(
where={"id": exec_id},
data={
"executionStatus": ExecutionStatus.FAILED,
"outputName": "error",
"outputData": str(error),
"endTime": datetime.now(),
},
)

View File

@@ -0,0 +1,172 @@
import asyncio
import json
import uuid
from typing import Any
from prisma.models import AgentGraph, AgentNode, AgentNodeLink, AgentNodeExecution
from autogpt_server.data.db import BaseDbModel
from autogpt_server.data.block import get_block
class Node(BaseDbModel):
block_id: str
input_default: dict[str, Any] = {} # dict[input_name, default_value]
input_nodes: dict[str, str] = {} # dict[input_name, node_id]
# TODO: Make it `dict[str, list[str]]`, output can be connected to multiple blocks.
# Other option is to use an edge-list, but it will complicate the rest code.
output_nodes: dict[str, str] = {} # dict[output_name, node_id]
@staticmethod
def from_db(node: AgentNode):
if not node.AgentBlock:
raise ValueError(f"Invalid node {node.id}, invalid AgentBlock.")
return Node(
id=node.id,
block_id=node.AgentBlock.id,
input_default=json.loads(node.constantInput),
input_nodes={v.sinkName: v.agentNodeSourceId for v in node.Input or []},
output_nodes={v.sourceName: v.agentNodeSinkId for v in node.Output or []},
)
def connect(self, node: "Node", source_name: str, sink_name: str):
self.output_nodes[source_name] = node.id
node.input_nodes[sink_name] = self.id
@property
async def block(self):
return await get_block(self.block_id)
class Graph(BaseDbModel):
name: str
description: str
nodes: list[Node]
@property
def starting_nodes(self) -> list[Node]:
return [node for node in self.nodes if not node.input_nodes]
@staticmethod
def from_db(graph: AgentGraph):
return Graph(
id=graph.id,
name=graph.name or "",
description=graph.description or "",
nodes=[Node.from_db(node) for node in graph.AgentNodes or []],
)
EXECUTION_NODE_INCLUDE = {
"Input": True,
"Output": True,
"AgentBlock": True,
}
async def get_node(node_id: str) -> Node | None:
node = await AgentNode.prisma().find_unique_or_raise(
where={"id": node_id},
include=EXECUTION_NODE_INCLUDE, # type: ignore
)
return Node.from_db(node) if node else None
async def get_graph(graph_id: str) -> Graph | None:
graph = await AgentGraph.prisma().find_unique(
where={"id": graph_id},
include={"AgentNodes": {"include": EXECUTION_NODE_INCLUDE}}, # type: ignore
)
return Graph.from_db(graph) if graph else None
async def get_node_input(node: Node, exec_id: str) -> dict[str, Any]:
"""
Get execution node input data from the previous node execution result.
Args:
node: The execution node.
exec_id: The execution ID.
Returns:
dictionary of input data, key is the input name, value is the input data.
"""
query = AgentNodeExecution.prisma().find_many(
where={ # type: ignore
"executionId": exec_id,
"agentNodeId": {"in": list(node.input_nodes.values())},
"executionStatus": "COMPLETED",
},
distinct=["agentNodeId"], # type: ignore
order={"creationTime": "desc"},
)
latest_executions: dict[str, AgentNodeExecution] = {
execution.agentNodeId: execution for execution in await query
}
return {
**node.input_default,
**{
name: json.loads(latest_executions[node_id].outputData or "{}")
for name, node_id in node.input_nodes.items()
if node_id in latest_executions and latest_executions[node_id].outputData
},
}
async def create_graph(graph: Graph) -> Graph:
await AgentGraph.prisma().create(
data={
"id": graph.id,
"name": graph.name,
"description": graph.description,
}
)
# TODO: replace bulk creation using create_many
await asyncio.gather(
*[
AgentNode.prisma().create(
{
"id": node.id,
"agentBlockId": node.block_id,
"agentGraphId": graph.id,
"constantInput": json.dumps(node.input_default),
}
)
for node in graph.nodes
]
)
edge_source_names = {
(source_node.id, sink_node_id): output_name
for source_node in graph.nodes
for output_name, sink_node_id in source_node.output_nodes.items()
}
edge_sink_names = {
(source_node_id, sink_node.id): input_name
for sink_node in graph.nodes
for input_name, source_node_id in sink_node.input_nodes.items()
}
# TODO: replace bulk creation using create_many
await asyncio.gather(
*[
AgentNodeLink.prisma().create(
{
"id": str(uuid.uuid4()),
"sourceName": edge_source_names.get((input_node, output_node), ""),
"sinkName": edge_sink_names.get((input_node, output_node), ""),
"agentNodeSourceId": input_node,
"agentNodeSinkId": output_node,
}
)
for input_node, output_node in (
edge_source_names.keys() | edge_sink_names.keys()
)
]
)
if created_graph := await get_graph(graph.id):
return created_graph
raise ValueError(f"Failed to create graph {graph.id}.")

View File

@@ -1,36 +1,136 @@
import asyncio
import logging
import time
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import ProcessPoolExecutor
from multiprocessing import Process
from typing import Optional
from autogpt_server.data import Execution, ExecutionQueue
from autogpt_server.data import block, db, graph
from autogpt_server.data.execution import (
Execution,
ExecutionQueue,
add_execution,
complete_execution,
fail_execution,
start_execution,
)
logger = logging.getLogger(__name__)
# TODO: Replace this by an actual Agent Execution.
def execute_node(id: str, data: str) -> None:
logger.warning(f"Executor processing started, execution_id: {id}, data: {data}")
for i in range(5):
def get_log_prefix(run_id: str, exec_id: str, block_name: str = "-"):
return f"[Execution graph-{run_id}|node-{exec_id}|{block_name}]"
async def execute_node(data: Execution) -> Execution | None:
"""
Execute a node in the graph. This will trigger a block execution on a node,
persist the execution result, and return the subsequent node to be executed.
Args:
data: The execution data for executing the current node.
Returns:
The subsequent node to be enqueued, or None if there is no subsequent node.
"""
run_id = data.run_id
exec_id = data.id
exec_data = data.data
node_id = data.node_id
node = await graph.get_node(node_id)
if not node:
logger.error(f"Node {node_id} not found.")
return None
node_block = await block.get_block(node.block_id)
if not node_block:
logger.error(f"Block {node.block_id} not found.")
return None
# Execute the node
prefix = get_log_prefix(run_id, exec_id, node_block.name)
logger.warning(f"{prefix} execute with input:\n{exec_data}")
await start_execution(exec_id)
try:
output_name, output_data = await node_block.execute(exec_data)
logger.warning(f"{prefix} executed with output: `{output_name}`:{output_data}")
await complete_execution(exec_id, (output_name, output_data))
except Exception as e:
logger.exception(f"{prefix} failed with error: %s", e)
await fail_execution(exec_id, e)
raise e
# Try to enqueue next eligible nodes
if output_name not in node.output_nodes:
logger.error(f"{prefix} output name `{output_name}` has no subsequent node.")
return None
next_node_id = node.output_nodes[output_name]
next_node = await graph.get_node(next_node_id)
if not next_node:
logger.error(f"{prefix} Error, next node {next_node_id} not found.")
return None
next_node_input = await graph.get_node_input(next_node, run_id)
next_node_block = await next_node.block
if not set(next_node.input_nodes).issubset(next_node_input):
logger.warning(f"{prefix} Skipped {next_node_id}-{next_node_block.name}, "
f"missing: {set(next_node.input_nodes) - set(next_node_input)}")
return None
if error := next_node_block.input_schema.validate_data(next_node_input):
logger.warning(
f"Executor processing step {i}, execution_id: {id}, data: {data}"
)
time.sleep(1)
logger.warning(f"Executor processing completed, execution_id: {id}, data: {data}")
f"{prefix} Skipped {next_node_id}-{next_node_block.name}, {error}")
return None
logger.warning(f"{prefix} Enqueue next node {next_node_id}-{next_node_block.name}")
return Execution(
run_id=run_id, node_id=next_node_id, data=next_node_input
)
def execute_node_sync(data: Execution) -> Optional[Execution | None]:
"""
A synchronous version of `execute_node`, to be used in the ProcessPoolExecutor.
"""
prefix = get_log_prefix(data.run_id, data.id)
try:
logger.warning(f"{prefix} Start execution")
loop = asyncio.get_event_loop()
return loop.run_until_complete(execute_node(data))
except Exception as e:
logger.error(f"{prefix} Error: {e}")
def start_executor(pool_size: int, queue: ExecutionQueue) -> None:
with ThreadPoolExecutor(max_workers=pool_size) as executor:
loop = asyncio.get_event_loop()
loop.run_until_complete(db.connect())
loop.run_until_complete(block.initialize_blocks())
def on_complete_execution(f: asyncio.Future[Execution | None]):
exception = f.exception()
if exception:
logger.exception("Error during execution!! %s", exception)
return exception
execution = f.result()
if execution:
loop.run_until_complete(add_execution(execution, queue))
return exception
return None
logger.warning("Executor started!")
with ProcessPoolExecutor(
max_workers=pool_size,
initializer=db.connect_sync,
) as executor:
while True:
execution: Execution | None = queue.get()
if not execution:
time.sleep(1)
continue
executor.submit(
execute_node,
execution.execution_id,
execution.data,
) # type: ignore
future = executor.submit(execute_node_sync, queue.get())
future.add_done_callback(on_complete_execution) # type: ignore
def start_executor_manager(pool_size: int, queue: ExecutionQueue) -> None:

View File

@@ -1,11 +1,14 @@
import uvicorn
from fastapi import APIRouter, FastAPI
import asyncio
import uuid
from autogpt_server.data import ExecutionQueue
import uvicorn
from fastapi import APIRouter, FastAPI, HTTPException
from autogpt_server.data import db, execution, graph
class AgentServer:
def __init__(self, queue: ExecutionQueue):
def __init__(self, queue: execution.ExecutionQueue):
self.app = FastAPI(
title="AutoGPT Agent Server",
description=(
@@ -25,14 +28,38 @@ class AgentServer:
methods=["POST"],
)
self.app.include_router(self.router)
self.app.on_event("startup")(db.connect)
self.app.on_event("shutdown")(db.disconnect)
def execute_agent(self, agent_id: str):
execution_id = self.execution_queue.add(agent_id)
return {"execution_id": execution_id, "agent_id": agent_id}
async def execute_agent(self, agent_id: str, node_input: dict):
agent = await graph.get_graph(agent_id)
if not agent:
raise HTTPException(status_code=404, detail=f"Agent #{agent_id} not found.")
run_id = str(uuid.uuid4())
tasks = []
# Currently, there is no constraint on the number of root nodes in the graph.
for node in agent.starting_nodes:
block = await node.block
if error := block.input_schema.validate_data(node_input):
raise HTTPException(
status_code=400,
detail=f"Input data doesn't match {block.name} input: {error}",
)
task = execution.add_execution(
execution.Execution(
run_id=run_id, node_id=node.id, data=node_input
),
self.execution_queue,
)
tasks.append(task)
return await asyncio.gather(*tasks)
def start_server(queue: ExecutionQueue, use_uvicorn: bool = True):
app = AgentServer(queue).app
if use_uvicorn:
uvicorn.run(app)
return app
def start_server(queue: execution.ExecutionQueue):
agent_server = AgentServer(queue)
uvicorn.run(agent_server.app)

View File

@@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand.
# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
[[package]]
name = "annotated-types"
@@ -33,6 +33,25 @@ doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphin
test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
trio = ["trio (>=0.23)"]
[[package]]
name = "attrs"
version = "23.2.0"
description = "Classes Without Boilerplate"
optional = false
python-versions = ">=3.7"
files = [
{file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"},
{file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"},
]
[package.extras]
cov = ["attrs[tests]", "coverage[toml] (>=5.3)"]
dev = ["attrs[tests]", "pre-commit"]
docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"]
tests = ["attrs[tests-no-zope]", "zope-interface"]
tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"]
tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"]
[[package]]
name = "certifi"
version = "2024.6.2"
@@ -211,6 +230,22 @@ docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1
testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"]
typing = ["typing-extensions (>=4.8)"]
[[package]]
name = "flake8"
version = "7.0.0"
description = "the modular source code checker: pep8 pyflakes and co"
optional = false
python-versions = ">=3.8.1"
files = [
{file = "flake8-7.0.0-py2.py3-none-any.whl", hash = "sha256:a6dfbb75e03252917f2473ea9653f7cd799c3064e54d4c8140044c5c065f53c3"},
{file = "flake8-7.0.0.tar.gz", hash = "sha256:33f96621059e65eec474169085dc92bf26e7b2d47366b70be2f67ab80dc25132"},
]
[package.dependencies]
mccabe = ">=0.7.0,<0.8.0"
pycodestyle = ">=2.11.0,<2.12.0"
pyflakes = ">=3.2.0,<3.3.0"
[[package]]
name = "h11"
version = "0.14.0"
@@ -354,6 +389,41 @@ MarkupSafe = ">=2.0"
[package.extras]
i18n = ["Babel (>=2.7)"]
[[package]]
name = "jsonschema"
version = "4.22.0"
description = "An implementation of JSON Schema validation for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "jsonschema-4.22.0-py3-none-any.whl", hash = "sha256:ff4cfd6b1367a40e7bc6411caec72effadd3db0bbe5017de188f2d6108335802"},
{file = "jsonschema-4.22.0.tar.gz", hash = "sha256:5b22d434a45935119af990552c862e5d6d564e8f6601206b305a61fdf661a2b7"},
]
[package.dependencies]
attrs = ">=22.2.0"
jsonschema-specifications = ">=2023.03.6"
referencing = ">=0.28.4"
rpds-py = ">=0.7.1"
[package.extras]
format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"]
format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"]
[[package]]
name = "jsonschema-specifications"
version = "2023.12.1"
description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry"
optional = false
python-versions = ">=3.8"
files = [
{file = "jsonschema_specifications-2023.12.1-py3-none-any.whl", hash = "sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c"},
{file = "jsonschema_specifications-2023.12.1.tar.gz", hash = "sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc"},
]
[package.dependencies]
referencing = ">=0.31.0"
[[package]]
name = "lief"
version = "0.14.1"
@@ -462,6 +532,17 @@ files = [
{file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"},
]
[[package]]
name = "mccabe"
version = "0.7.0"
description = "McCabe checker, plugin for flake8"
optional = false
python-versions = ">=3.6"
files = [
{file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"},
{file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
]
[[package]]
name = "nodeenv"
version = "1.9.1"
@@ -475,13 +556,13 @@ files = [
[[package]]
name = "packaging"
version = "24.0"
version = "24.1"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.7"
python-versions = ">=3.8"
files = [
{file = "packaging-24.0-py3-none-any.whl", hash = "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5"},
{file = "packaging-24.0.tar.gz", hash = "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"},
{file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"},
{file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"},
]
[[package]]
@@ -573,6 +654,45 @@ typing-extensions = ">=4.5.0"
all = ["nodejs-bin"]
node = ["nodejs-bin"]
[[package]]
name = "psutil"
version = "5.9.8"
description = "Cross-platform lib for process and system monitoring in Python."
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
files = [
{file = "psutil-5.9.8-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:26bd09967ae00920df88e0352a91cff1a78f8d69b3ecabbfe733610c0af486c8"},
{file = "psutil-5.9.8-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:05806de88103b25903dff19bb6692bd2e714ccf9e668d050d144012055cbca73"},
{file = "psutil-5.9.8-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:611052c4bc70432ec770d5d54f64206aa7203a101ec273a0cd82418c86503bb7"},
{file = "psutil-5.9.8-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:50187900d73c1381ba1454cf40308c2bf6f34268518b3f36a9b663ca87e65e36"},
{file = "psutil-5.9.8-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:02615ed8c5ea222323408ceba16c60e99c3f91639b07da6373fb7e6539abc56d"},
{file = "psutil-5.9.8-cp27-none-win32.whl", hash = "sha256:36f435891adb138ed3c9e58c6af3e2e6ca9ac2f365efe1f9cfef2794e6c93b4e"},
{file = "psutil-5.9.8-cp27-none-win_amd64.whl", hash = "sha256:bd1184ceb3f87651a67b2708d4c3338e9b10c5df903f2e3776b62303b26cb631"},
{file = "psutil-5.9.8-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:aee678c8720623dc456fa20659af736241f575d79429a0e5e9cf88ae0605cc81"},
{file = "psutil-5.9.8-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cb6403ce6d8e047495a701dc7c5bd788add903f8986d523e3e20b98b733e421"},
{file = "psutil-5.9.8-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d06016f7f8625a1825ba3732081d77c94589dca78b7a3fc072194851e88461a4"},
{file = "psutil-5.9.8-cp36-cp36m-win32.whl", hash = "sha256:7d79560ad97af658a0f6adfef8b834b53f64746d45b403f225b85c5c2c140eee"},
{file = "psutil-5.9.8-cp36-cp36m-win_amd64.whl", hash = "sha256:27cc40c3493bb10de1be4b3f07cae4c010ce715290a5be22b98493509c6299e2"},
{file = "psutil-5.9.8-cp37-abi3-win32.whl", hash = "sha256:bc56c2a1b0d15aa3eaa5a60c9f3f8e3e565303b465dbf57a1b730e7a2b9844e0"},
{file = "psutil-5.9.8-cp37-abi3-win_amd64.whl", hash = "sha256:8db4c1b57507eef143a15a6884ca10f7c73876cdf5d51e713151c1236a0e68cf"},
{file = "psutil-5.9.8-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:d16bbddf0693323b8c6123dd804100241da461e41d6e332fb0ba6058f630f8c8"},
{file = "psutil-5.9.8.tar.gz", hash = "sha256:6be126e3225486dff286a8fb9a06246a5253f4c7c53b475ea5f5ac934e64194c"},
]
[package.extras]
test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"]
[[package]]
name = "pycodestyle"
version = "2.11.1"
description = "Python style guide checker"
optional = false
python-versions = ">=3.8"
files = [
{file = "pycodestyle-2.11.1-py2.py3-none-any.whl", hash = "sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67"},
{file = "pycodestyle-2.11.1.tar.gz", hash = "sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f"},
]
[[package]]
name = "pydantic"
version = "2.7.3"
@@ -683,6 +803,17 @@ files = [
[package.dependencies]
typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
[[package]]
name = "pyflakes"
version = "3.2.0"
description = "passive checker of Python programs"
optional = false
python-versions = ">=3.8"
files = [
{file = "pyflakes-3.2.0-py2.py3-none-any.whl", hash = "sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a"},
{file = "pyflakes-3.2.0.tar.gz", hash = "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f"},
]
[[package]]
name = "pytest"
version = "8.2.2"
@@ -705,6 +836,39 @@ tomli = {version = ">=1", markers = "python_version < \"3.11\""}
[package.extras]
dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
[[package]]
name = "pytest-asyncio"
version = "0.23.7"
description = "Pytest support for asyncio"
optional = false
python-versions = ">=3.8"
files = [
{file = "pytest_asyncio-0.23.7-py3-none-any.whl", hash = "sha256:009b48127fbe44518a547bddd25611551b0e43ccdbf1e67d12479f569832c20b"},
{file = "pytest_asyncio-0.23.7.tar.gz", hash = "sha256:5f5c72948f4c49e7db4f29f2521d4031f1c27f86e57b046126654083d4770268"},
]
[package.dependencies]
pytest = ">=7.0.0,<9"
[package.extras]
docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"]
testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"]
[[package]]
name = "pytest-watcher"
version = "0.4.2"
description = "Automatically rerun your tests on file modifications"
optional = false
python-versions = "<4.0.0,>=3.7.0"
files = [
{file = "pytest_watcher-0.4.2-py3-none-any.whl", hash = "sha256:a43949ba67dd8d7e1fd0de5eea44a999081f0aec9f93b4e744264b4c6a3d9bbe"},
{file = "pytest_watcher-0.4.2.tar.gz", hash = "sha256:7b292f025ca19617cd7567c228c6187b5087f2da9e4d2cf6e144e5764a0471b0"},
]
[package.dependencies]
tomli = {version = ">=2.0.1,<3.0.0", markers = "python_version < \"3.11\""}
watchdog = ">=2.0.0"
[[package]]
name = "python-dotenv"
version = "1.0.1"
@@ -744,7 +908,6 @@ files = [
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
@@ -779,6 +942,155 @@ files = [
{file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
]
[[package]]
name = "referencing"
version = "0.35.1"
description = "JSON Referencing + Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "referencing-0.35.1-py3-none-any.whl", hash = "sha256:eda6d3234d62814d1c64e305c1331c9a3a6132da475ab6382eaa997b21ee75de"},
{file = "referencing-0.35.1.tar.gz", hash = "sha256:25b42124a6c8b632a425174f24087783efb348a6f1e0008e63cd4466fedf703c"},
]
[package.dependencies]
attrs = ">=22.2.0"
rpds-py = ">=0.7.0"
[[package]]
name = "rpds-py"
version = "0.18.1"
description = "Python bindings to Rust's persistent data structures (rpds)"
optional = false
python-versions = ">=3.8"
files = [
{file = "rpds_py-0.18.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d31dea506d718693b6b2cffc0648a8929bdc51c70a311b2770f09611caa10d53"},
{file = "rpds_py-0.18.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:732672fbc449bab754e0b15356c077cc31566df874964d4801ab14f71951ea80"},
{file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a98a1f0552b5f227a3d6422dbd61bc6f30db170939bd87ed14f3c339aa6c7c9"},
{file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7f1944ce16401aad1e3f7d312247b3d5de7981f634dc9dfe90da72b87d37887d"},
{file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38e14fb4e370885c4ecd734f093a2225ee52dc384b86fa55fe3f74638b2cfb09"},
{file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08d74b184f9ab6289b87b19fe6a6d1a97fbfea84b8a3e745e87a5de3029bf944"},
{file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d70129cef4a8d979caa37e7fe957202e7eee8ea02c5e16455bc9808a59c6b2f0"},
{file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ce0bb20e3a11bd04461324a6a798af34d503f8d6f1aa3d2aa8901ceaf039176d"},
{file = "rpds_py-0.18.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:81c5196a790032e0fc2464c0b4ab95f8610f96f1f2fa3d4deacce6a79852da60"},
{file = "rpds_py-0.18.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:f3027be483868c99b4985fda802a57a67fdf30c5d9a50338d9db646d590198da"},
{file = "rpds_py-0.18.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d44607f98caa2961bab4fa3c4309724b185b464cdc3ba6f3d7340bac3ec97cc1"},
{file = "rpds_py-0.18.1-cp310-none-win32.whl", hash = "sha256:c273e795e7a0f1fddd46e1e3cb8be15634c29ae8ff31c196debb620e1edb9333"},
{file = "rpds_py-0.18.1-cp310-none-win_amd64.whl", hash = "sha256:8352f48d511de5f973e4f2f9412736d7dea76c69faa6d36bcf885b50c758ab9a"},
{file = "rpds_py-0.18.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6b5ff7e1d63a8281654b5e2896d7f08799378e594f09cf3674e832ecaf396ce8"},
{file = "rpds_py-0.18.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8927638a4d4137a289e41d0fd631551e89fa346d6dbcfc31ad627557d03ceb6d"},
{file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:154bf5c93d79558b44e5b50cc354aa0459e518e83677791e6adb0b039b7aa6a7"},
{file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:07f2139741e5deb2c5154a7b9629bc5aa48c766b643c1a6750d16f865a82c5fc"},
{file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c7672e9fba7425f79019db9945b16e308ed8bc89348c23d955c8c0540da0a07"},
{file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:489bdfe1abd0406eba6b3bb4fdc87c7fa40f1031de073d0cfb744634cc8fa261"},
{file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c20f05e8e3d4fc76875fc9cb8cf24b90a63f5a1b4c5b9273f0e8225e169b100"},
{file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:967342e045564cef76dfcf1edb700b1e20838d83b1aa02ab313e6a497cf923b8"},
{file = "rpds_py-0.18.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2cc7c1a47f3a63282ab0f422d90ddac4aa3034e39fc66a559ab93041e6505da7"},
{file = "rpds_py-0.18.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f7afbfee1157e0f9376c00bb232e80a60e59ed716e3211a80cb8506550671e6e"},
{file = "rpds_py-0.18.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9e6934d70dc50f9f8ea47081ceafdec09245fd9f6032669c3b45705dea096b88"},
{file = "rpds_py-0.18.1-cp311-none-win32.whl", hash = "sha256:c69882964516dc143083d3795cb508e806b09fc3800fd0d4cddc1df6c36e76bb"},
{file = "rpds_py-0.18.1-cp311-none-win_amd64.whl", hash = "sha256:70a838f7754483bcdc830444952fd89645569e7452e3226de4a613a4c1793fb2"},
{file = "rpds_py-0.18.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:3dd3cd86e1db5aadd334e011eba4e29d37a104b403e8ca24dcd6703c68ca55b3"},
{file = "rpds_py-0.18.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:05f3d615099bd9b13ecf2fc9cf2d839ad3f20239c678f461c753e93755d629ee"},
{file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35b2b771b13eee8729a5049c976197ff58a27a3829c018a04341bcf1ae409b2b"},
{file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ee17cd26b97d537af8f33635ef38be873073d516fd425e80559f4585a7b90c43"},
{file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b646bf655b135ccf4522ed43d6902af37d3f5dbcf0da66c769a2b3938b9d8184"},
{file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19ba472b9606c36716062c023afa2484d1e4220548751bda14f725a7de17b4f6"},
{file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e30ac5e329098903262dc5bdd7e2086e0256aa762cc8b744f9e7bf2a427d3f8"},
{file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d58ad6317d188c43750cb76e9deacf6051d0f884d87dc6518e0280438648a9ac"},
{file = "rpds_py-0.18.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e1735502458621921cee039c47318cb90b51d532c2766593be6207eec53e5c4c"},
{file = "rpds_py-0.18.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:f5bab211605d91db0e2995a17b5c6ee5edec1270e46223e513eaa20da20076ac"},
{file = "rpds_py-0.18.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2fc24a329a717f9e2448f8cd1f960f9dac4e45b6224d60734edeb67499bab03a"},
{file = "rpds_py-0.18.1-cp312-none-win32.whl", hash = "sha256:1805d5901779662d599d0e2e4159d8a82c0b05faa86ef9222bf974572286b2b6"},
{file = "rpds_py-0.18.1-cp312-none-win_amd64.whl", hash = "sha256:720edcb916df872d80f80a1cc5ea9058300b97721efda8651efcd938a9c70a72"},
{file = "rpds_py-0.18.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:c827576e2fa017a081346dce87d532a5310241648eb3700af9a571a6e9fc7e74"},
{file = "rpds_py-0.18.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:aa3679e751408d75a0b4d8d26d6647b6d9326f5e35c00a7ccd82b78ef64f65f8"},
{file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0abeee75434e2ee2d142d650d1e54ac1f8b01e6e6abdde8ffd6eeac6e9c38e20"},
{file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed402d6153c5d519a0faf1bb69898e97fb31613b49da27a84a13935ea9164dfc"},
{file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:338dee44b0cef8b70fd2ef54b4e09bb1b97fc6c3a58fea5db6cc083fd9fc2724"},
{file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7750569d9526199c5b97e5a9f8d96a13300950d910cf04a861d96f4273d5b104"},
{file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:607345bd5912aacc0c5a63d45a1f73fef29e697884f7e861094e443187c02be5"},
{file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:207c82978115baa1fd8d706d720b4a4d2b0913df1c78c85ba73fe6c5804505f0"},
{file = "rpds_py-0.18.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6d1e42d2735d437e7e80bab4d78eb2e459af48c0a46e686ea35f690b93db792d"},
{file = "rpds_py-0.18.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:5463c47c08630007dc0fe99fb480ea4f34a89712410592380425a9b4e1611d8e"},
{file = "rpds_py-0.18.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:06d218939e1bf2ca50e6b0ec700ffe755e5216a8230ab3e87c059ebb4ea06afc"},
{file = "rpds_py-0.18.1-cp38-none-win32.whl", hash = "sha256:312fe69b4fe1ffbe76520a7676b1e5ac06ddf7826d764cc10265c3b53f96dbe9"},
{file = "rpds_py-0.18.1-cp38-none-win_amd64.whl", hash = "sha256:9437ca26784120a279f3137ee080b0e717012c42921eb07861b412340f85bae2"},
{file = "rpds_py-0.18.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:19e515b78c3fc1039dd7da0a33c28c3154458f947f4dc198d3c72db2b6b5dc93"},
{file = "rpds_py-0.18.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a7b28c5b066bca9a4eb4e2f2663012debe680f097979d880657f00e1c30875a0"},
{file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:673fdbbf668dd958eff750e500495ef3f611e2ecc209464f661bc82e9838991e"},
{file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d960de62227635d2e61068f42a6cb6aae91a7fe00fca0e3aeed17667c8a34611"},
{file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:352a88dc7892f1da66b6027af06a2e7e5d53fe05924cc2cfc56495b586a10b72"},
{file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4e0ee01ad8260184db21468a6e1c37afa0529acc12c3a697ee498d3c2c4dcaf3"},
{file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4c39ad2f512b4041343ea3c7894339e4ca7839ac38ca83d68a832fc8b3748ab"},
{file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aaa71ee43a703c321906813bb252f69524f02aa05bf4eec85f0c41d5d62d0f4c"},
{file = "rpds_py-0.18.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6cd8098517c64a85e790657e7b1e509b9fe07487fd358e19431cb120f7d96338"},
{file = "rpds_py-0.18.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:4adec039b8e2928983f885c53b7cc4cda8965b62b6596501a0308d2703f8af1b"},
{file = "rpds_py-0.18.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:32b7daaa3e9389db3695964ce8e566e3413b0c43e3394c05e4b243a4cd7bef26"},
{file = "rpds_py-0.18.1-cp39-none-win32.whl", hash = "sha256:2625f03b105328729f9450c8badda34d5243231eef6535f80064d57035738360"},
{file = "rpds_py-0.18.1-cp39-none-win_amd64.whl", hash = "sha256:bf18932d0003c8c4d51a39f244231986ab23ee057d235a12b2684ea26a353590"},
{file = "rpds_py-0.18.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cbfbea39ba64f5e53ae2915de36f130588bba71245b418060ec3330ebf85678e"},
{file = "rpds_py-0.18.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:a3d456ff2a6a4d2adcdf3c1c960a36f4fd2fec6e3b4902a42a384d17cf4e7a65"},
{file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7700936ef9d006b7ef605dc53aa364da2de5a3aa65516a1f3ce73bf82ecfc7ae"},
{file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:51584acc5916212e1bf45edd17f3a6b05fe0cbb40482d25e619f824dccb679de"},
{file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:942695a206a58d2575033ff1e42b12b2aece98d6003c6bc739fbf33d1773b12f"},
{file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b906b5f58892813e5ba5c6056d6a5ad08f358ba49f046d910ad992196ea61397"},
{file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6f8e3fecca256fefc91bb6765a693d96692459d7d4c644660a9fff32e517843"},
{file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7732770412bab81c5a9f6d20aeb60ae943a9b36dcd990d876a773526468e7163"},
{file = "rpds_py-0.18.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:bd1105b50ede37461c1d51b9698c4f4be6e13e69a908ab7751e3807985fc0346"},
{file = "rpds_py-0.18.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:618916f5535784960f3ecf8111581f4ad31d347c3de66d02e728de460a46303c"},
{file = "rpds_py-0.18.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:17c6d2155e2423f7e79e3bb18151c686d40db42d8645e7977442170c360194d4"},
{file = "rpds_py-0.18.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6c4c4c3f878df21faf5fac86eda32671c27889e13570645a9eea0a1abdd50922"},
{file = "rpds_py-0.18.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:fab6ce90574645a0d6c58890e9bcaac8d94dff54fb51c69e5522a7358b80ab64"},
{file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:531796fb842b53f2695e94dc338929e9f9dbf473b64710c28af5a160b2a8927d"},
{file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:740884bc62a5e2bbb31e584f5d23b32320fd75d79f916f15a788d527a5e83644"},
{file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:998125738de0158f088aef3cb264a34251908dd2e5d9966774fdab7402edfab7"},
{file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e2be6e9dd4111d5b31ba3b74d17da54a8319d8168890fbaea4b9e5c3de630ae5"},
{file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0cee71bc618cd93716f3c1bf56653740d2d13ddbd47673efa8bf41435a60daa"},
{file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2c3caec4ec5cd1d18e5dd6ae5194d24ed12785212a90b37f5f7f06b8bedd7139"},
{file = "rpds_py-0.18.1-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:27bba383e8c5231cd559affe169ca0b96ec78d39909ffd817f28b166d7ddd4d8"},
{file = "rpds_py-0.18.1-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:a888e8bdb45916234b99da2d859566f1e8a1d2275a801bb8e4a9644e3c7e7909"},
{file = "rpds_py-0.18.1-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:6031b25fb1b06327b43d841f33842b383beba399884f8228a6bb3df3088485ff"},
{file = "rpds_py-0.18.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:48c2faaa8adfacefcbfdb5f2e2e7bdad081e5ace8d182e5f4ade971f128e6bb3"},
{file = "rpds_py-0.18.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:d85164315bd68c0806768dc6bb0429c6f95c354f87485ee3593c4f6b14def2bd"},
{file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6afd80f6c79893cfc0574956f78a0add8c76e3696f2d6a15bca2c66c415cf2d4"},
{file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa242ac1ff583e4ec7771141606aafc92b361cd90a05c30d93e343a0c2d82a89"},
{file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21be4770ff4e08698e1e8e0bce06edb6ea0626e7c8f560bc08222880aca6a6f"},
{file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c45a639e93a0c5d4b788b2613bd637468edd62f8f95ebc6fcc303d58ab3f0a8"},
{file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:910e71711d1055b2768181efa0a17537b2622afeb0424116619817007f8a2b10"},
{file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b9bb1f182a97880f6078283b3505a707057c42bf55d8fca604f70dedfdc0772a"},
{file = "rpds_py-0.18.1-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:1d54f74f40b1f7aaa595a02ff42ef38ca654b1469bef7d52867da474243cc633"},
{file = "rpds_py-0.18.1-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:8d2e182c9ee01135e11e9676e9a62dfad791a7a467738f06726872374a83db49"},
{file = "rpds_py-0.18.1-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:636a15acc588f70fda1661234761f9ed9ad79ebed3f2125d44be0862708b666e"},
{file = "rpds_py-0.18.1.tar.gz", hash = "sha256:dc48b479d540770c811fbd1eb9ba2bb66951863e448efec2e2c102625328e92f"},
]
[[package]]
name = "ruff"
version = "0.4.8"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
files = [
{file = "ruff-0.4.8-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:7663a6d78f6adb0eab270fa9cf1ff2d28618ca3a652b60f2a234d92b9ec89066"},
{file = "ruff-0.4.8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:eeceb78da8afb6de0ddada93112869852d04f1cd0f6b80fe464fd4e35c330913"},
{file = "ruff-0.4.8-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aad360893e92486662ef3be0a339c5ca3c1b109e0134fcd37d534d4be9fb8de3"},
{file = "ruff-0.4.8-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:284c2e3f3396fb05f5f803c9fffb53ebbe09a3ebe7dda2929ed8d73ded736deb"},
{file = "ruff-0.4.8-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7354f921e3fbe04d2a62d46707e569f9315e1a613307f7311a935743c51a764"},
{file = "ruff-0.4.8-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:72584676164e15a68a15778fd1b17c28a519e7a0622161eb2debdcdabdc71883"},
{file = "ruff-0.4.8-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9678d5c9b43315f323af2233a04d747409d1e3aa6789620083a82d1066a35199"},
{file = "ruff-0.4.8-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704977a658131651a22b5ebeb28b717ef42ac6ee3b11e91dc87b633b5d83142b"},
{file = "ruff-0.4.8-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d05f8d6f0c3cce5026cecd83b7a143dcad503045857bc49662f736437380ad45"},
{file = "ruff-0.4.8-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:6ea874950daca5697309d976c9afba830d3bf0ed66887481d6bca1673fc5b66a"},
{file = "ruff-0.4.8-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:fc95aac2943ddf360376be9aa3107c8cf9640083940a8c5bd824be692d2216dc"},
{file = "ruff-0.4.8-py3-none-musllinux_1_2_i686.whl", hash = "sha256:384154a1c3f4bf537bac69f33720957ee49ac8d484bfc91720cc94172026ceed"},
{file = "ruff-0.4.8-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:e9d5ce97cacc99878aa0d084c626a15cd21e6b3d53fd6f9112b7fc485918e1fa"},
{file = "ruff-0.4.8-py3-none-win32.whl", hash = "sha256:6d795d7639212c2dfd01991259460101c22aabf420d9b943f153ab9d9706e6a9"},
{file = "ruff-0.4.8-py3-none-win_amd64.whl", hash = "sha256:e14a3a095d07560a9d6769a72f781d73259655919d9b396c650fc98a8157555d"},
{file = "ruff-0.4.8-py3-none-win_arm64.whl", hash = "sha256:14019a06dbe29b608f6b7cbcec300e3170a8d86efaddb7b23405cb7f7dcaf780"},
{file = "ruff-0.4.8.tar.gz", hash = "sha256:16d717b1d57b2e2fd68bd0bf80fb43931b79d05a7131aa477d66fc40fbd86268"},
]
[[package]]
name = "setuptools"
version = "69.5.1"
@@ -863,13 +1175,13 @@ files = [
[[package]]
name = "typing-extensions"
version = "4.12.1"
version = "4.12.2"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
files = [
{file = "typing_extensions-4.12.1-py3-none-any.whl", hash = "sha256:6024b58b69089e5a89c347397254e35f1bf02a907728ec7fee9bf0fe837d203a"},
{file = "typing_extensions-4.12.1.tar.gz", hash = "sha256:915f5e35ff76f56588223f15fdd5938f9a1cf9195c0de25130c627e4d597f6d1"},
{file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
{file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
]
[[package]]
@@ -942,6 +1254,50 @@ files = [
docs = ["Sphinx (>=4.1.2,<4.2.0)", "sphinx-rtd-theme (>=0.5.2,<0.6.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"]
test = ["Cython (>=0.29.36,<0.30.0)", "aiohttp (==3.9.0b0)", "aiohttp (>=3.8.1)", "flake8 (>=5.0,<6.0)", "mypy (>=0.800)", "psutil", "pyOpenSSL (>=23.0.0,<23.1.0)", "pycodestyle (>=2.9.0,<2.10.0)"]
[[package]]
name = "watchdog"
version = "4.0.1"
description = "Filesystem events monitoring"
optional = false
python-versions = ">=3.8"
files = [
{file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:da2dfdaa8006eb6a71051795856bedd97e5b03e57da96f98e375682c48850645"},
{file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e93f451f2dfa433d97765ca2634628b789b49ba8b504fdde5837cdcf25fdb53b"},
{file = "watchdog-4.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ef0107bbb6a55f5be727cfc2ef945d5676b97bffb8425650dadbb184be9f9a2b"},
{file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:17e32f147d8bf9657e0922c0940bcde863b894cd871dbb694beb6704cfbd2fb5"},
{file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03e70d2df2258fb6cb0e95bbdbe06c16e608af94a3ffbd2b90c3f1e83eb10767"},
{file = "watchdog-4.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:123587af84260c991dc5f62a6e7ef3d1c57dfddc99faacee508c71d287248459"},
{file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:093b23e6906a8b97051191a4a0c73a77ecc958121d42346274c6af6520dec175"},
{file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:611be3904f9843f0529c35a3ff3fd617449463cb4b73b1633950b3d97fa4bfb7"},
{file = "watchdog-4.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:62c613ad689ddcb11707f030e722fa929f322ef7e4f18f5335d2b73c61a85c28"},
{file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d4925e4bf7b9bddd1c3de13c9b8a2cdb89a468f640e66fbfabaf735bd85b3e35"},
{file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cad0bbd66cd59fc474b4a4376bc5ac3fc698723510cbb64091c2a793b18654db"},
{file = "watchdog-4.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a3c2c317a8fb53e5b3d25790553796105501a235343f5d2bf23bb8649c2c8709"},
{file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c9904904b6564d4ee8a1ed820db76185a3c96e05560c776c79a6ce5ab71888ba"},
{file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:667f3c579e813fcbad1b784db7a1aaa96524bed53437e119f6a2f5de4db04235"},
{file = "watchdog-4.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d10a681c9a1d5a77e75c48a3b8e1a9f2ae2928eda463e8d33660437705659682"},
{file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0144c0ea9997b92615af1d94afc0c217e07ce2c14912c7b1a5731776329fcfc7"},
{file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:998d2be6976a0ee3a81fb8e2777900c28641fb5bfbd0c84717d89bca0addcdc5"},
{file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e7921319fe4430b11278d924ef66d4daa469fafb1da679a2e48c935fa27af193"},
{file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f0de0f284248ab40188f23380b03b59126d1479cd59940f2a34f8852db710625"},
{file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bca36be5707e81b9e6ce3208d92d95540d4ca244c006b61511753583c81c70dd"},
{file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ab998f567ebdf6b1da7dc1e5accfaa7c6992244629c0fdaef062f43249bd8dee"},
{file = "watchdog-4.0.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:dddba7ca1c807045323b6af4ff80f5ddc4d654c8bce8317dde1bd96b128ed253"},
{file = "watchdog-4.0.1-py3-none-manylinux2014_armv7l.whl", hash = "sha256:4513ec234c68b14d4161440e07f995f231be21a09329051e67a2118a7a612d2d"},
{file = "watchdog-4.0.1-py3-none-manylinux2014_i686.whl", hash = "sha256:4107ac5ab936a63952dea2a46a734a23230aa2f6f9db1291bf171dac3ebd53c6"},
{file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64.whl", hash = "sha256:6e8c70d2cd745daec2a08734d9f63092b793ad97612470a0ee4cbb8f5f705c57"},
{file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f27279d060e2ab24c0aa98363ff906d2386aa6c4dc2f1a374655d4e02a6c5e5e"},
{file = "watchdog-4.0.1-py3-none-manylinux2014_s390x.whl", hash = "sha256:f8affdf3c0f0466e69f5b3917cdd042f89c8c63aebdb9f7c078996f607cdb0f5"},
{file = "watchdog-4.0.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ac7041b385f04c047fcc2951dc001671dee1b7e0615cde772e84b01fbf68ee84"},
{file = "watchdog-4.0.1-py3-none-win32.whl", hash = "sha256:206afc3d964f9a233e6ad34618ec60b9837d0582b500b63687e34011e15bb429"},
{file = "watchdog-4.0.1-py3-none-win_amd64.whl", hash = "sha256:7577b3c43e5909623149f76b099ac49a1a01ca4e167d1785c76eb52fa585745a"},
{file = "watchdog-4.0.1-py3-none-win_ia64.whl", hash = "sha256:d7b9f5f3299e8dd230880b6c55504a1f69cf1e4316275d1b215ebdd8187ec88d"},
{file = "watchdog-4.0.1.tar.gz", hash = "sha256:eebaacf674fa25511e8867028d281e602ee6500045b57f43b08778082f7f8b44"},
]
[package.extras]
watchmedo = ["PyYAML (>=3.10)"]
[[package]]
name = "watchfiles"
version = "0.22.0"
@@ -1127,4 +1483,4 @@ test = ["pytest (>=6.0.0)", "setuptools (>=65)"]
[metadata]
lock-version = "2.0"
python-versions = "^3.10"
content-hash = "d999a99c717019087f238f4f1aeb89b1671a77e53f175108bf91fb85ac0be657"
content-hash = "391567de870dbbf86ea217ff6b15f7c6d2c9406707c196661d29f45deb886812"

View File

@@ -16,12 +16,18 @@ prisma = "^0.13.1"
pytest = "^8.2.1"
uvicorn = { extras = ["standard"], version = "^0.30.1" }
fastapi = "^0.109.0"
pytest-asyncio = "^0.23.7"
ruff = "^0.4.8"
flake8 = "^7.0.0"
jsonschema = "^4.22.0"
psutil = "^5.9.8"
[tool.poetry.group.dev.dependencies]
cx-freeze = "7.0.0"
poethepoet = "^0.26.1"
httpx = "^0.27.0"
pytest-watcher = "^0.4.2"
[build-system]
requires = ["poetry-core"]
@@ -29,6 +35,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry.scripts]
app = "autogpt_server.app:main"
cli = "autogpt_server.cli:start_cli"
# https://poethepoet.natn.io/index.html
[tool.poe]
@@ -46,8 +53,8 @@ install = ["build", "_dinstall"]
[tool.poe.tasks._dbuild]
cmd = "python setup.py build"
[tool.poe.tasks.dist_mac]
cmd = "python setup.py bdist_mac"
[tool.poe.tasks.dist_app]
cmd = "python setup.py bdist_app"
[tool.poe.tasks.dist_dmg]
cmd = "python setup.py bdist_dmg"
@@ -55,5 +62,20 @@ cmd = "python setup.py bdist_dmg"
[tool.poe.tasks.dist_msi]
cmd = "python setup.py bdist_msi"
[tool.poe.tasks.dist_appimage]
cmd = "python setup.py bdist_appimage"
[tool.poe.tasks.dist_deb]
cmd = "python setup.py bdist_deb"
[tool.poe.tasks._dinstall]
cmd = "python setup.py install"
[tool.pytest-watcher]
now = false
clear = true
delay = 0.2
runner = "pytest"
runner_args = []
patterns = ["*.py"]
ignore_patterns = []

View File

@@ -15,9 +15,6 @@ model AgentGraph {
name String?
description String?
startingAgentNodeId String
StartingAgentNode AgentNode @relation("AgentGraphRoot", fields: [startingAgentNodeId], references: [id])
AgentNodes AgentNode[] @relation("AgentGraphNodes")
}
@@ -32,30 +29,30 @@ model AgentNode {
AgentGraph AgentGraph @relation("AgentGraphNodes", fields: [agentGraphId], references: [id])
// List of consumed input, that the parent node should provide.
Input AgentNodeLink[] @relation("AgentNodeInput")
Input AgentNodeLink[] @relation("AgentNodeSink")
// List of produced output, that the child node should be executed.
Output AgentNodeLink[] @relation("AgentNodeOutput")
Output AgentNodeLink[] @relation("AgentNodeSource")
// JSON serialized dict[str, str] containing predefined input values.
constantInput String @default("{}")
ExecutionHistory AgentNodeExecution[]
// Prisma requires explicit back-references.
ReferencedByAgentGraphAsRoot AgentGraph[] @relation("AgentGraphRoot")
}
// This model describes the link between two AgentNodes.
model AgentNodeLink {
id String @id
agentNodeInputId String
AgentNodeInput AgentNode @relation("AgentNodeOutput", fields: [agentNodeInputId], references: [id]) // Output of the node is the input of the link.
agentNodeInputSchemaId String
AgentNodeInputSchema AgentBlockInputOutput @relation("AgentNodeInputSchema", fields: [agentNodeInputSchemaId], references: [id])
// Output of a node is connected to the source of the link.
agentNodeSourceId String
AgentNodeSource AgentNode @relation("AgentNodeSource", fields: [agentNodeSourceId], references: [id])
sourceName String
agentNodeOutputId String
AgentNodeOutput AgentNode @relation("AgentNodeInput", fields: [agentNodeOutputId], references: [id]) // Input of the node is the output of the link.
agentNodeOutputSchemaId String
AgentNodeOutputSchema AgentBlockInputOutput @relation("AgentNodeOutputSchema", fields: [agentNodeOutputSchemaId], references: [id])
// Input of a node is connected to the sink of the link.
agentNodeSinkId String
AgentNodeSink AgentNode @relation("AgentNodeSink", fields: [agentNodeSinkId], references: [id])
sinkName String
}
// This model describes a component that will be executed by the AgentNode.
@@ -63,49 +60,35 @@ model AgentBlock {
id String @id
name String @unique
// We allow a block to have multiple types of output.
Input AgentBlockInputOutput[] @relation("AgentBlockInput")
Output AgentBlockInputOutput[] @relation("AgentBlockOutput")
// We allow a block to have multiple types of input & output.
// Serialized object-typed `jsonschema` with top-level properties as input/output name.
inputSchema String
outputSchema String
// Prisma requires explicit back-references.
ReferencedByAgentNode AgentNode[]
}
// This model describes the output (produced event) or input (consumed event) of an AgentBlock.
model AgentBlockInputOutput {
id String @id
name String
schema String
description String
// Prisma requires explicit back-references.
ReferencedByAgentBlockInput AgentBlock[] @relation("AgentBlockInput")
ReferencedByAgentBlockOutput AgentBlock[] @relation("AgentBlockOutput")
ReferencedByAgentNodeLinkAsInput AgentNodeLink[] @relation("AgentNodeInputSchema")
ReferencedByAgentNodeLinkAsOutput AgentNodeLink[] @relation("AgentNodeOutputSchema")
ReferencedByAgentNodeExecution AgentNodeExecution[]
}
// This model describes the execution of an AgentNode.
model AgentNodeExecution {
id String @id
id String @id
executionId String
agentNodeId String
AgentNode AgentNode @relation(fields: [agentNodeId], references: [id])
inputData String
inputFiles FileDefinition[] @relation("InputFiles")
outputData String
outputFiles FileDefinition[] @relation("OutputFiles")
outputTypeId String?
outputType AgentBlockInputOutput? @relation(fields: [outputTypeId], references: [id])
inputData String?
inputFiles FileDefinition[] @relation("InputFiles")
outputName String?
outputData String?
outputFiles FileDefinition[] @relation("OutputFiles")
// sqlite does not support enum
// enum Status { STARTED, RUNNING, SUCCESS, FAILED }
// enum Status { QUEUED, RUNNING, SUCCESS, FAILED }
executionStatus String
// JSON serialized object of the execution state: information required to resume the execution.
executionStateData String
creationTime DateTime
startTime DateTime?
endTime DateTime?
}
// This model describes a file that can be used as input/output of an AgentNodeExecution.

View File

@@ -20,12 +20,20 @@ icon = (
setup(
name="AutoGPT Server",
url="https://agpt.co",
# The entry points of the application
executables=[
Executable(
"autogpt_server/app.py", target_name="server", base="console", icon=icon
"autogpt_server/app.py",
target_name="agpt_server",
base="console",
icon=icon,
),
Executable(
"autogpt_server/cli.py", target_name="agpt_server_cli", base="console", icon=icon
),
],
options={
# Options for building all the executables
"build_exe": {
"packages": packages,
"includes": [
@@ -35,22 +43,36 @@ setup(
"uvicorn.protocols.websockets.auto",
"uvicorn.lifespan.on",
],
# Exclude the two module from readability.compat as it causes issues
"excludes": ["readability.compat.two"],
},
# Mac .app specific options
"bdist_mac": {
"bundle_name": "AutoGPT",
"iconfile": "../../assets/gpt_dark_RGB.icns",
# "include_resources": ["IMG_3775.jpeg"],
},
# Mac .dmg specific options
"bdist_dmg": {
"applications_shortcut": True,
"volume_label": "AutoGPT Server",
"volume_label": "AutoGPTServer",
},
# Windows .msi specific options
"bdist_msi": {
"target_name": "AutoGPTServer",
"add_to_path": True,
"install_icon": "../../assets/gpt_dark_RGB.ico",
},
# Linux .appimage specific options
"bdist_appimage": {},
# Linux rpm specific options
"bdist_rpm": {
"name": "AutoGPTServer",
"description": "AutoGPT Server",
"version": "0.1",
"license": "UNKNOWNORPROPRIETARY",
"url": "https://agpt.co",
"long_description": "AutoGPT Server",
},
},
)

View File

@@ -1,30 +1,97 @@
import pytest
from fastapi.testclient import TestClient
from autogpt_server.data import ExecutionQueue
from autogpt_server.executor import start_executor_manager
from autogpt_server.server import start_server
from autogpt_server.data import block, db, graph
from autogpt_server.data.execution import ExecutionQueue, add_execution
from autogpt_server.executor import executor
from autogpt_server.server import server
@pytest.fixture
def client():
execution_queue = ExecutionQueue()
start_executor_manager(5, execution_queue)
return TestClient(start_server(execution_queue, use_uvicorn=False))
async def create_test_graph() -> graph.Graph:
"""
ParrotBlock
\
---- TextCombinerBlock ---- PrintingBlock
/
ParrotBlock
"""
nodes = [
graph.Node(block_id=block.ParrotBlock.id),
graph.Node(block_id=block.ParrotBlock.id),
graph.Node(
block_id=block.TextCombinerBlock.id,
input_default={"format": "{text1},{text2}"}
),
graph.Node(block_id=block.PrintingBlock.id),
]
nodes[0].connect(nodes[2], "output", "text1")
nodes[1].connect(nodes[2], "output", "text2")
nodes[2].connect(nodes[3], "combined_text", "text")
test_graph = graph.Graph(
name="TestGraph",
description="Test graph",
nodes=nodes,
)
await block.initialize_blocks()
result = await graph.create_graph(test_graph)
# Assertions
assert result.name == test_graph.name
assert result.description == test_graph.description
assert len(result.nodes) == len(test_graph.nodes)
return result
def test_execute_agent(client: TestClient):
# Assert API is working
response = client.post("/agents/dummy_agent_1/execute")
assert response.status_code == 200
async def execute_node(queue: ExecutionQueue) -> dict | None:
next_exec = await executor.execute_node(queue.get())
if not next_exec:
return None
await add_execution(next_exec, queue)
return next_exec.data
# Assert response is correct
data = response.json()
exec_id = data["execution_id"]
agent_id = data["agent_id"]
assert agent_id == "dummy_agent_1"
assert isinstance(exec_id, str)
assert len(exec_id) == 36
# TODO: Add assertion that the executor is executed after some time
# Add this when db integration is done.
@pytest.mark.asyncio
async def test_agent_execution():
await db.connect()
test_graph = await create_test_graph()
test_queue = ExecutionQueue()
test_server = server.AgentServer(test_queue)
# --- Test adding new executions --- #
text = "Hello, World!"
input_data = {"input": text}
executions = await test_server.execute_agent(test_graph.id, input_data)
# 2 executions should be created, one for each ParrotBlock, with same run_id.
assert len(executions) == 2
assert executions[0].run_id == executions[1].run_id
assert executions[0].node_id != executions[1].node_id
assert executions[0].data == executions[1].data == input_data
# --- Test Executing added tasks --- #
# Executing ParrotBlock1, TextCombinerBlock won't be enqueued yet.
assert not test_queue.empty()
next_execution = await execute_node(test_queue)
assert next_execution is None
# Executing ParrotBlock2, TextCombinerBlock will be enqueued.
assert not test_queue.empty()
next_execution = await execute_node(test_queue)
assert test_queue.empty()
assert next_execution
assert next_execution.keys() == {"text1", "text2", "format"}
assert next_execution["text1"] == text
assert next_execution["text2"] == text
assert next_execution["format"] == "{text1},{text2}"
# Executing TextCombinerBlock, PrintingBlock will be enqueued.
next_execution = await execute_node(test_queue)
assert next_execution
assert next_execution.keys() == {"text"}
assert next_execution["text"] == f"{text},{text}"
# Executing PrintingBlock, no more tasks will be enqueued.
next_execution = await execute_node(test_queue)
assert next_execution is None