Compare commits

..

2 Commits

Author SHA1 Message Date
Luke
bb3a06d54b Release v0.4.7 (#5094) 2023-08-11 13:49:39 -04:00
Luke
ec47a318bc Release v0.4.6 (#5039) 2023-07-28 14:37:32 +02:00
3239 changed files with 16870 additions and 784647 deletions

View File

@@ -10,4 +10,4 @@ RUN apt-get update && apt-get install -y \
RUN apt-get install -y curl jq wget git
# Declare working directory
WORKDIR /workspace/AutoGPT
WORKDIR /workspace/Auto-GPT

View File

@@ -1,7 +1,7 @@
{
"dockerComposeFile": "./docker-compose.yml",
"service": "auto-gpt",
"workspaceFolder": "/workspace/AutoGPT",
"workspaceFolder": "/workspace/Auto-GPT",
"shutdownAction": "stopCompose",
"features": {
"ghcr.io/devcontainers/features/common-utils:2": {
@@ -46,11 +46,11 @@
// "forwardPorts": [],
// Use 'postCreateCommand' to run commands after the container is created.
// "postCreateCommand": "poetry install",
// "postCreateCommand": "pip3 install --user -r requirements.txt",
// Set `remoteUser` to `root` to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
"remoteUser": "vscode",
// Add the freshly containerized repo to the list of safe repositories
"postCreateCommand": "git config --global --add safe.directory /workspace/AutoGPT && poetry install"
}
"postCreateCommand": "git config --global --add safe.directory /workspace/Auto-GPT && pip3 install --user -r requirements.txt"
}

View File

@@ -9,4 +9,4 @@ services:
context: ../
tty: true
volumes:
- ../:/workspace/AutoGPT
- ../:/workspace/Auto-GPT

9
.dockerignore Normal file
View File

@@ -0,0 +1,9 @@
.*
*.template
*.yaml
*.yml
!prompt_settings.yaml
*.md
*.png
!BULLETIN.md

View File

@@ -1,61 +1,50 @@
# For further descriptions of these settings see docs/configuration/options.md or go to docs.agpt.co
################################################################################
### AutoGPT - GENERAL SETTINGS
### AUTO-GPT - GENERAL SETTINGS
################################################################################
## OPENAI_API_KEY - OpenAI API Key (Example: sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)
# OPENAI_API_KEY=
## ANTHROPIC_API_KEY - Anthropic API Key (Example: sk-ant-api03-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)
# ANTHROPIC_API_KEY=
## TELEMETRY_OPT_IN - Share telemetry on errors and other issues with the AutoGPT team, e.g. through Sentry.
## This helps us to spot and solve problems earlier & faster. (Default: DISABLED)
# TELEMETRY_OPT_IN=true
## OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
OPENAI_API_KEY=your-openai-api-key
## EXECUTE_LOCAL_COMMANDS - Allow local command execution (Default: False)
# EXECUTE_LOCAL_COMMANDS=False
### Workspace ###
## RESTRICT_TO_WORKSPACE - Restrict file operations to workspace ./data/agents/<agent_id>/workspace (Default: True)
## RESTRICT_TO_WORKSPACE - Restrict file operations to workspace ./auto_gpt_workspace (Default: True)
# RESTRICT_TO_WORKSPACE=True
## DISABLED_COMMANDS - The comma separated list of commands that are disabled (Default: None)
# DISABLED_COMMANDS=
## FILE_STORAGE_BACKEND - Choose a storage backend for contents
## Options: local, gcs, s3
# FILE_STORAGE_BACKEND=local
## STORAGE_BUCKET - GCS/S3 Bucket to store contents in
# STORAGE_BUCKET=autogpt
## GCS Credentials
# see https://cloud.google.com/storage/docs/authentication#libauth
## AWS/S3 Credentials
# see https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html
## S3_ENDPOINT_URL - If you're using non-AWS S3, set your endpoint here.
# S3_ENDPOINT_URL=
### Miscellaneous ###
## USER_AGENT - Define the user-agent used by the requests library to browse website (string)
# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
## AI_SETTINGS_FILE - Specifies which AI Settings file to use, relative to the AutoGPT root directory. (defaults to ai_settings.yaml)
## AI_SETTINGS_FILE - Specifies which AI Settings file to use, relative to the Auto-GPT root directory. (defaults to ai_settings.yaml)
# AI_SETTINGS_FILE=ai_settings.yaml
## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use, relative to the AutoGPT root directory. (defaults to prompt_settings.yaml)
## PLUGINS_CONFIG_FILE - The path to the plugins_config.yaml file, relative to the Auto-GPT root directory. (Default plugins_config.yaml)
# PLUGINS_CONFIG_FILE=plugins_config.yaml
## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use, relative to the Auto-GPT root directory. (defaults to prompt_settings.yaml)
# PROMPT_SETTINGS_FILE=prompt_settings.yaml
## OPENAI_API_BASE_URL - Custom url for the OpenAI API, useful for connecting to custom backends. No effect if USE_AZURE is true, leave blank to keep the default url
# the following is an example:
# OPENAI_API_BASE_URL=http://localhost:443/v1
## OPENAI_FUNCTIONS - Enables OpenAI functions: https://platform.openai.com/docs/guides/gpt/function-calling
## WARNING: this feature is only supported by OpenAI's newest models. Until these models become the default on 27 June, add a '-0613' suffix to the model of your choosing.
# OPENAI_FUNCTIONS=False
## AUTHORISE COMMAND KEY - Key to authorise commands
# AUTHORISE_COMMAND_KEY=y
## EXIT_KEY - Key to exit AutoGPT
## EXIT_KEY - Key to exit AUTO-GPT
# EXIT_KEY=n
## PLAIN_OUTPUT - Plain output, which disables the spinner (Default: False)
# PLAIN_OUTPUT=False
## DISABLED_COMMAND_CATEGORIES - The list of categories of commands that are disabled (Default: None)
# DISABLED_COMMAND_CATEGORIES=
################################################################################
### LLM PROVIDER
################################################################################
@@ -63,41 +52,28 @@
## TEMPERATURE - Sets temperature in OpenAI (Default: 0)
# TEMPERATURE=0
## OPENAI_API_BASE_URL - Custom url for the OpenAI API, useful for connecting to custom backends. No effect if USE_AZURE is true, leave blank to keep the default url
# the following is an example:
# OPENAI_API_BASE_URL=http://localhost:443/v1
# OPENAI_API_TYPE=
# OPENAI_API_VERSION=
## OPENAI_FUNCTIONS - Enables OpenAI functions: https://platform.openai.com/docs/guides/gpt/function-calling
## Note: this feature is only supported by OpenAI's newer models.
# OPENAI_FUNCTIONS=False
## OPENAI_ORGANIZATION - Your OpenAI Organization key (Default: None)
# OPENAI_ORGANIZATION=
## USE_AZURE - Use Azure OpenAI or not (Default: False)
# USE_AZURE=False
## AZURE_CONFIG_FILE - The path to the azure.yaml file, relative to the folder containing this file. (Default: azure.yaml)
## AZURE_CONFIG_FILE - The path to the azure.yaml file, relative to the Auto-GPT root directory. (Default: azure.yaml)
# AZURE_CONFIG_FILE=azure.yaml
# AZURE_OPENAI_AD_TOKEN=
# AZURE_OPENAI_ENDPOINT=
################################################################################
### LLM MODELS
################################################################################
## SMART_LLM - Smart language model (Default: gpt-4-turbo)
# SMART_LLM=gpt-4-turbo
## SMART_LLM - Smart language model (Default: gpt-4)
# SMART_LLM=gpt-4
## FAST_LLM - Fast language model (Default: gpt-3.5-turbo)
# FAST_LLM=gpt-3.5-turbo
## EMBEDDING_MODEL - Model to use for creating embeddings
# EMBEDDING_MODEL=text-embedding-3-small
# EMBEDDING_MODEL=text-embedding-ada-002
################################################################################
### SHELL EXECUTION
@@ -107,13 +83,39 @@
# SHELL_COMMAND_CONTROL=denylist
## ONLY if SHELL_COMMAND_CONTROL is set to denylist:
## SHELL_DENYLIST - List of shell commands that ARE NOT allowed to be executed by AutoGPT (Default: sudo,su)
## SHELL_DENYLIST - List of shell commands that ARE NOT allowed to be executed by Auto-GPT (Default: sudo,su)
# SHELL_DENYLIST=sudo,su
## ONLY if SHELL_COMMAND_CONTROL is set to allowlist:
## SHELL_ALLOWLIST - List of shell commands that ARE allowed to be executed by AutoGPT (Default: None)
## SHELL_ALLOWLIST - List of shell commands that ARE allowed to be executed by Auto-GPT (Default: None)
# SHELL_ALLOWLIST=
################################################################################
### MEMORY
################################################################################
### General
## MEMORY_BACKEND - Memory backend type
# MEMORY_BACKEND=json_file
## MEMORY_INDEX - Value used in the Memory backend for scoping, naming, or indexing (Default: auto-gpt)
# MEMORY_INDEX=auto-gpt
### Redis
## REDIS_HOST - Redis host (Default: localhost, use "redis" for docker-compose)
# REDIS_HOST=localhost
## REDIS_PORT - Redis port (Default: 6379)
# REDIS_PORT=6379
## REDIS_PASSWORD - Redis password (Default: "")
# REDIS_PASSWORD=
## WIPE_REDIS_ON_START - Wipes data / index on start (Default: True)
# WIPE_REDIS_ON_START=True
################################################################################
### IMAGE GENERATION PROVIDER
################################################################################
@@ -189,12 +191,13 @@
################################################################################
## TEXT_TO_SPEECH_PROVIDER - Which Text to Speech provider to use (Default: gtts)
## Options: gtts, streamelements, elevenlabs, macos
# TEXT_TO_SPEECH_PROVIDER=gtts
### Only if TEXT_TO_SPEECH_PROVIDER=streamelements
## STREAMELEMENTS_VOICE - Voice to use for StreamElements (Default: Brian)
# STREAMELEMENTS_VOICE=Brian
### Only if TEXT_TO_SPEECH_PROVIDER=elevenlabs
## ELEVENLABS_API_KEY - Eleven Labs API key (Default: None)
# ELEVENLABS_API_KEY=
@@ -207,33 +210,3 @@
## CHAT_MESSAGES_ENABLED - Enable chat messages (Default: False)
# CHAT_MESSAGES_ENABLED=False
################################################################################
### LOGGING
################################################################################
## LOG_LEVEL - Set the minimum level to filter log output by. Setting this to DEBUG implies LOG_FORMAT=debug, unless LOG_FORMAT is set explicitly.
## Options: DEBUG, INFO, WARNING, ERROR, CRITICAL
# LOG_LEVEL=INFO
## LOG_FORMAT - The format in which to log messages to the console (and log files).
## Options: simple, debug, structured_google_cloud
# LOG_FORMAT=simple
## LOG_FILE_FORMAT - Normally follows the LOG_FORMAT setting, but can be set separately.
## Note: Log file output is disabled if LOG_FORMAT=structured_google_cloud.
# LOG_FILE_FORMAT=simple
## PLAIN_OUTPUT - Disables animated typing and the spinner in the console output. (Default: False)
# PLAIN_OUTPUT=False
################################################################################
### Agent Protocol Server Settings
################################################################################
## AP_SERVER_PORT - Specifies what port the agent protocol server will listen on. (Default: 8000)
## AP_SERVER_DB_URL - Specifies what connection url the agent protocol database will connect to (Default: Internal SQLite)
## AP_SERVER_CORS_ALLOWED_ORIGINS - Comma separated list of allowed origins for CORS. (Default: http://localhost:{AP_SERVER_PORT})
# AP_SERVER_PORT=8000
# AP_SERVER_DB_URL=sqlite:///data/ap_server.db
# AP_SERVER_CORS_ALLOWED_ORIGINS=

View File

@@ -1,4 +1,4 @@
# Upon entering directory, direnv requests user permission once to automatically load project dependencies onwards.
# Eliminating the need of running "nix develop github:superherointj/nix-auto-gpt" for Nix users to develop/use AutoGPT.
# Eliminating the need of running "nix develop github:superherointj/nix-auto-gpt" for Nix users to develop/use Auto-GPT.
[[ -z $IN_NIX_SHELL ]] && use flake github:superherointj/nix-auto-gpt

View File

@@ -1,5 +1,6 @@
[flake8]
max-line-length = 88
select = "E303, W293, W291, W292, E305, E231, E302"
exclude =
.tox,
__pycache__,
@@ -9,4 +10,3 @@ exclude =
.venv/*,
reports/*,
dist/*,
data/*,

6
.gitattributes vendored
View File

@@ -1,3 +1,5 @@
frontend/build/* linguist-generated
# Exclude VCR cassettes from stats
tests/Auto-GPT-test-cassettes/**/**.y*ml linguist-generated
**/poetry.lock linguist-generated
# Mark documentation as such
docs/**.md linguist-documentation

7
.github/CODEOWNERS vendored
View File

@@ -1,5 +1,2 @@
.github/workflows/ @Significant-Gravitas/devops
autogpts/autogpt/ @Significant-Gravitas/maintainers
autogpts/forge/ @Significant-Gravitas/forge-maintainers
benchmark/ @Significant-Gravitas/benchmark-maintainers
frontend/ @Significant-Gravitas/frontend-maintainers
.github/workflows/ @Significant-Gravitas/maintainers
autogpt/core @collijk

3
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1,3 @@
# These are supported funding model platforms
github: Torantulino

View File

@@ -1,5 +1,5 @@
name: Bug report 🐛
description: Create a bug report for AutoGPT.
description: Create a bug report for Auto-GPT.
labels: ['status: needs triage']
body:
- type: markdown
@@ -13,16 +13,16 @@ body:
[backlog]: https://github.com/orgs/Significant-Gravitas/projects/1
[roadmap]: https://github.com/orgs/Significant-Gravitas/projects/2
[discord]: https://discord.gg/autogpt
[discussions]: https://github.com/Significant-Gravitas/AutoGPT/discussions
[discussions]: https://github.com/Significant-Gravitas/Auto-GPT/discussions
[#tech-support]: https://discord.com/channels/1092243196446249134/1092275629602394184
[existing issues]: https://github.com/Significant-Gravitas/AutoGPT/issues?q=is%3Aissue
[existing issues]: https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue
[wiki page on Contributing]: https://github.com/Significant-Gravitas/Nexus/wiki/Contributing
- type: checkboxes
attributes:
label: ⚠️ Search for existing issues first ⚠️
description: >
Please [search the history](https://github.com/Significant-Gravitas/AutoGPT/issues)
Please [search the history](https://github.com/Torantulino/Auto-GPT/issues)
to see if an issue already exists for the same problem.
options:
- label: I have searched the existing issues, and there is no existing issue for my problem
@@ -35,8 +35,8 @@ body:
A good rule of thumb: What would you type if you were searching for the issue?
For example:
BAD - my AutoGPT keeps looping
GOOD - After performing execute_python_file, AutoGPT goes into a loop where it keeps trying to execute the file.
BAD - my auto-gpt keeps looping
GOOD - After performing execute_python_file, auto-gpt goes into a loop where it keeps trying to execute the file.
⚠️ SUPER-busy repo, please help the volunteer maintainers.
The less time we spend here, the more time we can spend building AutoGPT.
@@ -54,7 +54,7 @@ body:
attributes:
label: Which Operating System are you using?
description: >
Please select the operating system you were using to run AutoGPT when this problem occurred.
Please select the operating system you were using to run Auto-GPT when this problem occurred.
options:
- Windows
- Linux
@@ -73,12 +73,12 @@ body:
- type: dropdown
attributes:
label: Which version of AutoGPT are you using?
label: Which version of Auto-GPT are you using?
description: |
Please select which version of AutoGPT you were using when this issue occurred.
If you downloaded the code from the [releases page](https://github.com/Significant-Gravitas/AutoGPT/releases/) make sure you were using the latest code.
**If you weren't please try with the [latest code](https://github.com/Significant-Gravitas/AutoGPT/releases/)**.
If installed with git you can run `git branch` to see which version of AutoGPT you are running.
Please select which version of Auto-GPT you were using when this issue occurred.
If you downloaded the code from the [releases page](https://github.com/Significant-Gravitas/Auto-GPT/releases/) make sure you were using the latest code.
**If you weren't please try with the [latest code](https://github.com/Significant-Gravitas/Auto-GPT/releases/)**.
If installed with git you can run `git branch` to see which version of Auto-GPT you are running.
options:
- Latest Release
- Stable (branch)
@@ -90,8 +90,8 @@ body:
attributes:
label: Do you use OpenAI GPT-3 or GPT-4?
description: >
If you are using AutoGPT with `--gpt3only`, your problems may be caused by
the [limitations](https://github.com/Significant-Gravitas/AutoGPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5.
If you are using Auto-GPT with `--gpt3only`, your problems may be caused by
the [limitations](https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5.
options:
- GPT-3.5
- GPT-4
@@ -129,7 +129,7 @@ body:
- type: textarea
attributes:
label: Describe your issue.
description: Describe the problem you are experiencing. Try to describe only the issue and phrase it short but clear. ⚠️ Provide NO other data in this field
description: Describe the problem you are experiencing. Try to describe only the issue and phrase it short but clear. ⚠️ Provide NO other data in this field
validations:
required: true
@@ -139,16 +139,16 @@ body:
value: |
The following is OPTIONAL, please keep in mind that the log files may contain personal information such as credentials.⚠️
"The log files are located in the folder 'logs' inside the main AutoGPT folder."
"The log files are located in the folder 'logs' inside the main auto-gpt folder."
- type: textarea
attributes:
label: Upload Activity Log Content
description: |
Upload the activity log content, this can help us understand the issue better.
To do this, go to the folder logs in your main AutoGPT folder, open activity.log and copy/paste the contents to this field.
⚠️ The activity log may contain personal data given to AutoGPT by you in prompt or input as well as
any personal information that AutoGPT collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
To do this, go to the folder logs in your main auto-gpt folder, open activity.log and copy/paste the contents to this field.
⚠️ The activity log may contain personal data given to auto-gpt by you in prompt or input as well as
any personal information that auto-gpt collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
validations:
required: false
@@ -157,8 +157,8 @@ body:
label: Upload Error Log Content
description: |
Upload the error log content, this will help us understand the issue better.
To do this, go to the folder logs in your main AutoGPT folder, open error.log and copy/paste the contents to this field.
⚠️ The error log may contain personal data given to AutoGPT by you in prompt or input as well as
any personal information that AutoGPT collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
To do this, go to the folder logs in your main auto-gpt folder, open error.log and copy/paste the contents to this field.
⚠️ The error log may contain personal data given to auto-gpt by you in prompt or input as well as
any personal information that auto-gpt collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
validations:
required: false

View File

@@ -1,5 +1,5 @@
name: Feature request 🚀
description: Suggest a new idea for AutoGPT!
description: Suggest a new idea for Auto-GPT!
labels: ['status: needs triage']
body:
- type: markdown
@@ -10,7 +10,7 @@ body:
- type: checkboxes
attributes:
label: Duplicates
description: Please [search the history](https://github.com/Significant-Gravitas/AutoGPT/issues) to see if an issue already exists for the same problem.
description: Please [search the history](https://github.com/Torantulino/Auto-GPT/issues) to see if an issue already exists for the same problem.
options:
- label: I have searched the existing issues
required: true

View File

@@ -1,31 +1,49 @@
### Background
<!-- Clearly explain the need for these changes: -->
### Changes 🏗️
<!-- Concisely describe all of the changes made in this pull request: -->
### PR Quality Scorecard ✨
<!--
Check out our contribution guide:
https://github.com/Significant-Gravitas/Nexus/wiki/Contributing
1. Avoid duplicate work, issues, PRs etc.
2. Also consider contributing something other than code; see the [contribution guide]
for options.
3. Clearly explain your changes.
4. Avoid making unnecessary changes, especially if they're purely based on personal
preferences. Doing so is the maintainers' job. ;-)
<!-- ⚠️ At the moment any non-essential commands are not being merged.
If you want to add non-essential commands to Auto-GPT, please create a plugin instead.
We are expecting to ship plugin support within the week (PR #757).
Resources:
* https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template
-->
- [x] Have you used the PR description template? &ensp; `+2 pts`
- [ ] Is your pull request atomic, focusing on a single change? &ensp; `+5 pts`
- [ ] Have you linked the GitHub issue(s) that this PR addresses? &ensp; `+5 pts`
- [ ] Have you documented your changes clearly and comprehensively? &ensp; `+5 pts`
- [ ] Have you changed or added a feature? &ensp; `-4 pts`
- [ ] Have you added/updated corresponding documentation? &ensp; `+4 pts`
- [ ] Have you added/updated corresponding integration tests? &ensp; `+5 pts`
- [ ] Have you changed the behavior of AutoGPT? &ensp; `-5 pts`
- [ ] Have you also run `agbenchmark` to verify that these changes do not regress performance? &ensp; `+10 pts`
<!-- 📢 Announcement
We've recently noticed an increase in pull requests focusing on combining multiple changes. While the intentions behind these PRs are appreciated, it's essential to maintain a clean and manageable git history. To ensure the quality of our repository, we kindly ask you to adhere to the following guidelines when submitting PRs:
Focus on a single, specific change.
Do not include any unrelated or "extra" modifications.
Provide clear documentation and explanations of the changes made.
Ensure diffs are limited to the intended lines — no applying preferred formatting styles or line endings (unless that's what the PR is about).
For guidance on committing only the specific lines you have changed, refer to this helpful video: https://youtu.be/8-hSNHHbiZg
Check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/Nexus/wiki/Contributing)
By following these guidelines, your PRs are more likely to be merged quickly after testing, as long as they align with the project's overall direction. -->
### Background
<!-- Provide a concise overview of the rationale behind this change. Include relevant context, prior discussions, or links to related issues. Ensure that the change aligns with the project's overall direction. -->
### Changes
<!-- Describe the specific, focused change made in this pull request. Detail the modifications clearly and avoid any unrelated or "extra" changes. -->
### Documentation
<!-- Explain how your changes are documented, such as in-code comments or external documentation. Ensure that the documentation is clear, concise, and easy to understand. -->
### Test Plan
<!-- Describe how you tested this functionality. Include steps to reproduce, relevant test cases, and any other pertinent information. -->
### PR Quality Checklist
- [ ] My pull request is atomic and focuses on a single change.
- [ ] I have thoroughly tested my changes with multiple different prompts.
- [ ] I have considered potential risks and mitigations for my changes.
- [ ] I have documented my changes clearly and comprehensively.
- [ ] I have not snuck in any "extra" small tweaks changes. <!-- Submit these as separate Pull Requests, they are the easiest to merge! -->
- [ ] I have run the following commands against my code to ensure it passes our linters:
```shell
black .
isort .
mypy
autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests --in-place
```
<!-- If you haven't added tests, please explain why. If you have, check the appropriate box. If you've ensured your PR is atomic and well-documented, check the corresponding boxes. -->
<!-- By submitting this, I agree that my pull request should be closed if I do not fill this out or follow the guidelines. -->

23
.github/labeler.yml vendored
View File

@@ -1,23 +0,0 @@
AutoGPT Agent:
- changed-files:
- any-glob-to-any-file: autogpts/autogpt/**
Forge:
- changed-files:
- any-glob-to-any-file: autogpts/forge/**
Benchmark:
- changed-files:
- any-glob-to-any-file: benchmark/**
Frontend:
- changed-files:
- any-glob-to-any-file: frontend/**
Arena:
- changed-files:
- any-glob-to-any-file: arena/**
documentation:
- changed-files:
- any-glob-to-any-file: docs/**

View File

@@ -1,169 +0,0 @@
name: Arena intake
on:
# We recommend `pull_request_target` so that github secrets are available.
# In `pull_request` we wouldn't be able to change labels of fork PRs
pull_request_target:
types: [ opened, synchronize ]
paths:
- 'arena/**'
jobs:
check:
permissions:
pull-requests: write
runs-on: ubuntu-latest
steps:
- name: Checkout PR
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Check Arena entry
uses: actions/github-script@v7
with:
script: |
console.log('⚙️ Setting up...');
const fs = require('fs');
const path = require('path');
const pr = context.payload.pull_request;
const isFork = pr.head.repo.fork;
console.log('🔄️ Fetching PR diff metadata...');
const prFilesChanged = (await github.rest.pulls.listFiles({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: pr.number,
})).data;
console.debug(prFilesChanged);
const arenaFilesChanged = prFilesChanged.filter(
({ filename: file }) => file.startsWith('arena/') && file.endsWith('.json')
);
const hasChangesInAutogptsFolder = prFilesChanged.some(
({ filename }) => filename.startsWith('autogpts/')
);
console.log(`🗒️ ${arenaFilesChanged.length} arena entries affected`);
console.debug(arenaFilesChanged);
if (arenaFilesChanged.length === 0) {
// If no files in `arena/` are changed, this job does not need to run.
return;
}
let close = false;
let flagForManualCheck = false;
let issues = [];
if (isFork) {
if (arenaFilesChanged.length > 1) {
// Impacting multiple entries in `arena/` is not allowed
issues.push('This pull request impacts multiple arena entries');
}
if (hasChangesInAutogptsFolder) {
// PRs that include the custom agent are generally not allowed
issues.push(
'This pull request includes changes in `autogpts/`.\n'
+ 'Please make sure to only submit your arena entry (`arena/*.json`), '
+ 'and not to accidentally include your custom agent itself.'
);
}
}
if (arenaFilesChanged.length === 1) {
const newArenaFile = arenaFilesChanged[0]
const newArenaFileName = path.basename(newArenaFile.filename)
console.log(`🗒️ Arena entry in PR: ${newArenaFile}`);
if (newArenaFile.status != 'added') {
flagForManualCheck = true;
}
if (pr.mergeable != false) {
const newArenaEntry = JSON.parse(fs.readFileSync(newArenaFile.filename));
const allArenaFiles = await (await glob.create('arena/*.json')).glob();
console.debug(newArenaEntry);
console.log(`➡️ Checking ${newArenaFileName} against existing entries...`);
for (const file of allArenaFiles) {
const existingEntryName = path.basename(file);
if (existingEntryName === newArenaFileName) {
continue;
}
console.debug(`Checking against ${existingEntryName}...`);
const arenaEntry = JSON.parse(fs.readFileSync(file));
if (arenaEntry.github_repo_url === newArenaEntry.github_repo_url) {
console.log(`⚠️ Duplicate detected: ${existingEntryName}`);
issues.push(
`The \`github_repo_url\` specified in __${newArenaFileName}__ `
+ `already exists in __${existingEntryName}__. `
+ `This PR will be closed as duplicate.`
)
close = true;
}
}
} else {
console.log('⚠️ PR has conflicts');
issues.push(
`__${newArenaFileName}__ conflicts with existing entry with the same name`
)
close = true;
}
} // end if (arenaFilesChanged.length === 1)
console.log('🏁 Finished checking against existing entries');
if (issues.length == 0) {
console.log('✅ No issues detected');
if (flagForManualCheck) {
console.log('🤔 Requesting review from maintainers...');
await github.rest.pulls.requestReviewers({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: pr.number,
reviewers: ['Pwuts'],
// team_reviewers: ['maintainers'], // doesn't work: https://stackoverflow.com/a/64977184/4751645
});
} else {
console.log('➡️ Approving PR...');
await github.rest.pulls.createReview({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: pr.number,
event: 'APPROVE',
});
}
} else {
console.log(`⚠️ ${issues.length} issues detected`);
console.log('➡️ Posting comment indicating issues...');
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: pr.number,
body: `Our automation found one or more issues with this submission:\n`
+ issues.map(i => `- ${i.replace('\n', '\n ')}`).join('\n'),
});
console.log("➡️ Applying label 'invalid'...");
await github.rest.issues.addLabels({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: pr.number,
labels: ['invalid'],
});
if (close) {
console.log('➡️ Auto-closing PR...');
await github.rest.pulls.update({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: pr.number,
state: 'closed',
});
}
}

View File

@@ -1,296 +0,0 @@
name: AutoGPT Python CI
on:
push:
branches: [ master, development, ci-test* ]
paths:
- '.github/workflows/autogpt-ci.yml'
- 'autogpts/autogpt/**'
- '!autogpts/autogpt/tests/vcr_cassettes'
pull_request:
branches: [ master, development, release-* ]
paths:
- '.github/workflows/autogpt-ci.yml'
- 'autogpts/autogpt/**'
- '!autogpts/autogpt/tests/vcr_cassettes'
concurrency:
group: ${{ format('autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
defaults:
run:
shell: bash
working-directory: autogpts/autogpt
jobs:
lint:
runs-on: ubuntu-latest
env:
min-python-version: "3.10"
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- id: get_date
name: Get date
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Set up Python dependency cache
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: ${{ runner.os }}-poetry-${{ hashFiles('autogpts/autogpt/pyproject.toml') }}-${{ steps.get_date.outputs.date }}
- name: Install Python dependencies
run: |
curl -sSL https://install.python-poetry.org | python3 -
poetry install
- name: Lint with flake8
run: poetry run flake8
- name: Check black formatting
run: poetry run black . --check
if: success() || failure()
- name: Check isort formatting
run: poetry run isort . --check
if: success() || failure()
# - name: Check mypy formatting
# run: poetry run mypy
# if: success() || failure()
# - name: Check for unused imports and pass statements
# run: |
# cmd="autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests"
# poetry run $cmd --check || (echo "You have unused imports or pass statements, please run '${cmd} --in-place'" && exit 1)
test:
permissions:
contents: read
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
python-version: ["3.10"]
platform-os: [ubuntu, macos, macos-arm64, windows]
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
steps:
# Quite slow on macOS (2~4 minutes to set up Docker)
# - name: Set up Docker (macOS)
# if: runner.os == 'macOS'
# uses: crazy-max/ghaction-setup-docker@v3
- name: Start MinIO service (Linux)
if: runner.os == 'Linux'
working-directory: '.'
run: |
docker pull minio/minio:edge-cicd
docker run -d -p 9000:9000 minio/minio:edge-cicd
- name: Start MinIO service (macOS)
if: runner.os == 'macOS'
working-directory: ${{ runner.temp }}
run: |
brew install minio/stable/minio
mkdir data
minio server ./data &
# No MinIO on Windows:
# - Windows doesn't support running Linux Docker containers
# - It doesn't seem possible to start background processes on Windows. They are
# killed after the step returns.
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Configure git user Auto-GPT-Bot
run: |
git config --global user.name "Auto-GPT-Bot"
git config --global user.email "github-bot@agpt.co"
- name: Checkout cassettes
if: ${{ startsWith(github.event_name, 'pull_request') }}
env:
PR_BASE: ${{ github.event.pull_request.base.ref }}
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
run: |
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
cassette_base_branch="${PR_BASE}"
cd tests/vcr_cassettes
if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
cassette_base_branch="master"
fi
if git ls-remote --exit-code --heads origin $cassette_branch ; then
git fetch origin $cassette_branch
git fetch origin $cassette_base_branch
git checkout $cassette_branch
# Pick non-conflicting cassette updates from the base branch
git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
echo "Using cassettes from mirror branch '$cassette_branch'," \
"synced to upstream branch '$cassette_base_branch'."
else
git checkout -b $cassette_branch
echo "Branch '$cassette_branch' does not exist in cassette submodule." \
"Using cassettes from '$cassette_base_branch'."
fi
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- id: get_date
name: Get date
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Set up Python dependency cache
# On Windows, unpacking cached dependencies takes longer than just installing them
if: runner.os != 'Windows'
uses: actions/cache@v4
with:
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('autogpts/autogpt/poetry.lock') }}
- name: Install Poetry (Unix)
if: runner.os != 'Windows'
run: |
curl -sSL https://install.python-poetry.org | python3 -
if [ "${{ runner.os }}" = "macOS" ]; then
PATH="$HOME/.local/bin:$PATH"
echo "$HOME/.local/bin" >> $GITHUB_PATH
fi
- name: Install Poetry (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
$env:PATH += ";$env:APPDATA\Python\Scripts"
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
- name: Install Python dependencies
run: poetry install
- name: Run pytest with coverage
run: |
poetry run pytest -vv \
--cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
--numprocesses=logical --durations=10 \
tests/unit tests/integration
env:
CI: true
PLAIN_OUTPUT: True
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
flags: autogpt-agent,${{ runner.os }}
- id: setup_git_auth
name: Set up git token authentication
# Cassettes may be pushed even when tests fail
if: success() || failure()
run: |
config_key="http.${{ github.server_url }}/.extraheader"
if [ "${{ runner.os }}" = 'macOS' ]; then
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64)
else
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
fi
git config "$config_key" \
"Authorization: Basic $base64_pat"
cd tests/vcr_cassettes
git config "$config_key" \
"Authorization: Basic $base64_pat"
echo "config_key=$config_key" >> $GITHUB_OUTPUT
- id: push_cassettes
name: Push updated cassettes
# For pull requests, push updated cassettes even when tests fail
if: github.event_name == 'push' || (! github.event.pull_request.head.repo.fork && (success() || failure()))
env:
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
run: |
if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
is_pull_request=true
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
else
cassette_branch="${{ github.ref_name }}"
fi
cd tests/vcr_cassettes
# Commit & push changes to cassettes if any
if ! git diff --quiet; then
git add .
git commit -m "Auto-update cassettes"
git push origin HEAD:$cassette_branch
if [ ! $is_pull_request ]; then
cd ../..
git add tests/vcr_cassettes
git commit -m "Update cassette submodule"
git push origin HEAD:$cassette_branch
fi
echo "updated=true" >> $GITHUB_OUTPUT
else
echo "updated=false" >> $GITHUB_OUTPUT
echo "No cassette changes to commit"
fi
- name: Post Set up git token auth
if: steps.setup_git_auth.outcome == 'success'
run: |
git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
- name: Apply "behaviour change" label and comment on PR
if: ${{ startsWith(github.event_name, 'pull_request') }}
run: |
PR_NUMBER="${{ github.event.pull_request.number }}"
TOKEN="${{ secrets.PAT_REVIEW }}"
REPO="${{ github.repository }}"
if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then
echo "Adding label and comment..."
echo $TOKEN | gh auth login --with-token
gh issue edit $PR_NUMBER --add-label "behaviour change"
gh issue comment $PR_NUMBER --body "You changed AutoGPT's behaviour on ${{ runner.os }}. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
fi
- name: Upload logs to artifact
if: always()
uses: actions/upload-artifact@v4
with:
name: test-logs
path: autogpts/autogpt/logs/

View File

@@ -1,165 +0,0 @@
name: AutoGPT Docker CI
on:
push:
branches: [ master, development ]
paths:
- '.github/workflows/autogpt-docker-ci.yml'
- 'autogpts/autogpt/**'
- '!autogpts/autogpt/tests/vcr_cassettes'
pull_request:
branches: [ master, development, release-* ]
paths:
- '.github/workflows/autogpt-docker-ci.yml'
- 'autogpts/autogpt/**'
- '!autogpts/autogpt/tests/vcr_cassettes'
concurrency:
group: ${{ format('autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
defaults:
run:
working-directory: autogpts/autogpt
env:
IMAGE_NAME: auto-gpt
DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER && format('{0}/', secrets.DOCKER_USER) || '' }}auto-gpt
DEV_IMAGE_TAG: latest-dev
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
build-type: [release, dev]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- if: runner.debug
run: |
ls -al
du -hs *
- id: build
name: Build image
uses: docker/build-push-action@v5
with:
context: autogpts/autogpt
build-args: BUILD_TYPE=${{ matrix.build-type }}
tags: ${{ env.IMAGE_NAME }}
labels: GIT_REVISION=${{ github.sha }}
load: true # save to docker images
# cache layers in GitHub Actions cache to speed up builds
cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
- name: Generate build report
env:
event_name: ${{ github.event_name }}
event_ref: ${{ github.event.ref }}
event_ref_type: ${{ github.event.ref}}
build_type: ${{ matrix.build-type }}
prod_branch: master
dev_branch: development
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
current_ref: ${{ github.ref_name }}
commit_hash: ${{ github.event.after }}
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
new_commits_json: ${{ toJSON(github.event.commits) }}
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
github_context_json: ${{ toJSON(github) }}
job_env_json: ${{ toJSON(env) }}
vars_json: ${{ toJSON(vars) }}
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
working-directory: ./
continue-on-error: true
test:
runs-on: ubuntu-latest
timeout-minutes: 10
services:
minio:
image: minio/minio:edge-cicd
options: >
--name=minio
--health-interval=10s --health-timeout=5s --health-retries=3
--health-cmd="curl -f http://localhost:9000/minio/health/live"
steps:
- name: Check out repository
uses: actions/checkout@v4
with:
submodules: true
- if: github.event_name == 'push'
name: Log in to Docker hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- id: build
name: Build image
uses: docker/build-push-action@v5
with:
context: autogpts/autogpt
build-args: BUILD_TYPE=dev # include pytest
tags: >
${{ env.IMAGE_NAME }},
${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}
labels: GIT_REVISION=${{ github.sha }}
load: true # save to docker images
# cache layers in GitHub Actions cache to speed up builds
cache-from: type=gha,scope=autogpt-docker-dev
cache-to: type=gha,scope=autogpt-docker-dev,mode=max
- id: test
name: Run tests
env:
CI: true
PLAIN_OUTPUT: True
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
S3_ENDPOINT_URL: http://minio:9000
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
run: |
set +e
docker run --env CI --env OPENAI_API_KEY \
--network container:minio \
--env S3_ENDPOINT_URL --env AWS_ACCESS_KEY_ID --env AWS_SECRET_ACCESS_KEY \
--entrypoint poetry ${{ env.IMAGE_NAME }} run \
pytest -v --cov=autogpt --cov-branch --cov-report term-missing \
--numprocesses=4 --durations=10 \
tests/unit tests/integration 2>&1 | tee test_output.txt
test_failure=${PIPESTATUS[0]}
cat << $EOF >> $GITHUB_STEP_SUMMARY
# Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌')
\`\`\`
$(cat test_output.txt)
\`\`\`
$EOF
exit $test_failure
- if: github.event_name == 'push' && github.ref_name == 'master'
name: Push image to Docker Hub
run: docker push ${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}

View File

@@ -1,97 +0,0 @@
name: AutoGPTs Nightly Benchmark
on:
workflow_dispatch:
schedule:
- cron: '0 2 * * *'
jobs:
benchmark:
permissions:
contents: write
runs-on: ubuntu-latest
strategy:
matrix:
agent-name: [ autogpt ]
fail-fast: false
timeout-minutes: 120
env:
min-python-version: '3.10'
REPORTS_BRANCH: data/benchmark-reports
REPORTS_FOLDER: ${{ format('benchmark/reports/{0}', matrix.agent-name) }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- name: Install Poetry
run: curl -sSL https://install.python-poetry.org | python -
- name: Prepare reports folder
run: mkdir -p ${{ env.REPORTS_FOLDER }}
- run: poetry -C benchmark install
- name: Benchmark ${{ matrix.agent-name }}
run: |
./run agent start ${{ matrix.agent-name }}
cd autogpts/${{ matrix.agent-name }}
set +e # Do not quit on non-zero exit codes
poetry run agbenchmark run -N 3 \
--test=ReadFile \
--test=BasicRetrieval --test=RevenueRetrieval2 \
--test=CombineCsv --test=LabelCsv --test=AnswerQuestionCombineCsv \
--test=UrlShortener --test=TicTacToe --test=Battleship \
--test=WebArenaTask_0 --test=WebArenaTask_21 --test=WebArenaTask_124 \
--test=WebArenaTask_134 --test=WebArenaTask_163
# Convert exit code 1 (some challenges failed) to exit code 0
if [ $? -eq 0 ] || [ $? -eq 1 ]; then
exit 0
else
exit $?
fi
env:
AGENT_NAME: ${{ matrix.agent-name }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
REPORTS_FOLDER: ${{ format('../../{0}', env.REPORTS_FOLDER) }} # account for changed workdir
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
- name: Push reports to data branch
run: |
# BODGE: Remove success_rate.json and regression_tests.json to avoid conflicts on checkout
rm ${{ env.REPORTS_FOLDER }}/*.json
# Find folder with newest (untracked) report in it
report_subfolder=$(find ${{ env.REPORTS_FOLDER }} -type f -name 'report.json' \
| xargs -I {} dirname {} \
| xargs -I {} git ls-files --others --exclude-standard {} \
| xargs -I {} dirname {} \
| sort -u)
json_report_file="$report_subfolder/report.json"
# Convert JSON report to Markdown
markdown_report_file="$report_subfolder/report.md"
poetry -C benchmark run benchmark/reports/format.py "$json_report_file" > "$markdown_report_file"
cat "$markdown_report_file" >> $GITHUB_STEP_SUMMARY
git config --global user.name 'GitHub Actions'
git config --global user.email 'github-actions@agpt.co'
git fetch origin ${{ env.REPORTS_BRANCH }}:${{ env.REPORTS_BRANCH }} \
&& git checkout ${{ env.REPORTS_BRANCH }} \
|| git checkout --orphan ${{ env.REPORTS_BRANCH }}
git reset --hard
git add ${{ env.REPORTS_FOLDER }}
git commit -m "Benchmark report for ${{ matrix.agent-name }} @ $(date +'%Y-%m-%d')" \
&& git push origin ${{ env.REPORTS_BRANCH }}

View File

@@ -1,69 +0,0 @@
name: AutoGPTs smoke test CI
on:
workflow_dispatch:
schedule:
- cron: '0 8 * * *'
push:
branches: [ master, development, ci-test* ]
paths:
- '.github/workflows/autogpts-ci.yml'
- 'autogpts/**'
- 'benchmark/**'
- 'run'
- 'cli.py'
- 'setup.py'
- '!**/*.md'
pull_request:
branches: [ master, development, release-* ]
paths:
- '.github/workflows/autogpts-ci.yml'
- 'autogpts/**'
- 'benchmark/**'
- 'run'
- 'cli.py'
- 'setup.py'
- '!**/*.md'
jobs:
run-tests:
runs-on: ubuntu-latest
strategy:
matrix:
agent-name: [ autogpt, forge ]
fail-fast: false
timeout-minutes: 20
env:
min-python-version: '3.10'
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- name: Install Poetry
working-directory: ./autogpts/${{ matrix.agent-name }}/
run: |
curl -sSL https://install.python-poetry.org | python -
- name: Run regression tests
run: |
./run agent start ${{ matrix.agent-name }}
cd autogpts/${{ matrix.agent-name }}
poetry run agbenchmark --mock --test=BasicRetrieval --test=Battleship --test=WebArenaTask_0
poetry run agbenchmark --test=WriteFile
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
AGENT_NAME: ${{ matrix.agent-name }}
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
HELICONE_CACHE_ENABLED: false
HELICONE_PROPERTY_AGENT: ${{ matrix.agent-name }}
REPORTS_FOLDER: ${{ format('../../reports/{0}', matrix.agent-name) }}
TELEMETRY_ENVIRONMENT: autogpt-ci
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}

View File

@@ -1,141 +0,0 @@
name: Benchmark CI
on:
push:
branches: [ master, development, ci-test* ]
paths:
- 'benchmark/**'
- .github/workflows/benchmark-ci.yml
- '!benchmark/reports/**'
pull_request:
branches: [ master, development, release-* ]
paths:
- 'benchmark/**'
- '!benchmark/reports/**'
- .github/workflows/benchmark-ci.yml
env:
min-python-version: '3.10'
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- id: get_date
name: Get date
working-directory: ./benchmark/
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Install Poetry
working-directory: ./benchmark/
run: |
curl -sSL https://install.python-poetry.org | python -
- name: Install dependencies
working-directory: ./benchmark/
run: |
export POETRY_VIRTUALENVS_IN_PROJECT=true
poetry install -vvv
- name: Lint with flake8
working-directory: ./benchmark/
run: poetry run flake8
- name: Check black formatting
working-directory: ./benchmark/
run: poetry run black . --exclude test.py --check
if: success() || failure()
- name: Check isort formatting
working-directory: ./benchmark/
run: poetry run isort . --check
if: success() || failure()
- name: Check for unused imports and pass statements
working-directory: ./benchmark/
run: |
cmd="poetry run autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring agbenchmark"
$cmd --check || (echo "You have unused imports or pass statements, please run '${cmd} --in-place'" && exit 1)
if: success() || failure()
tests-agbenchmark:
runs-on: ubuntu-latest
strategy:
matrix:
agent-name: [ forge ]
fail-fast: false
timeout-minutes: 20
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- name: Install Poetry
working-directory: ./autogpts/${{ matrix.agent-name }}/
run: |
curl -sSL https://install.python-poetry.org | python -
- name: Run regression tests
run: |
./run agent start ${{ matrix.agent-name }}
cd autogpts/${{ matrix.agent-name }}
set +e # Ignore non-zero exit codes and continue execution
echo "Running the following command: poetry run agbenchmark --maintain --mock"
poetry run agbenchmark --maintain --mock
EXIT_CODE=$?
set -e # Stop ignoring non-zero exit codes
# Check if the exit code was 5, and if so, exit with 0 instead
if [ $EXIT_CODE -eq 5 ]; then
echo "regression_tests.json is empty."
fi
echo "Running the following command: poetry run agbenchmark --mock"
poetry run agbenchmark --mock
echo "Running the following command: poetry run agbenchmark --mock --category=data"
poetry run agbenchmark --mock --category=data
echo "Running the following command: poetry run agbenchmark --mock --category=coding"
poetry run agbenchmark --mock --category=coding
echo "Running the following command: poetry run agbenchmark --test=WriteFile"
poetry run agbenchmark --test=WriteFile
cd ../../benchmark
poetry install
echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed"
export BUILD_SKILL_TREE=true
poetry run agbenchmark --mock
poetry run pytest -vv -s tests
CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../frontend/assets)') || echo "No diffs"
if [ ! -z "$CHANGED" ]; then
echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
echo "$CHANGED"
exit 1
else
echo "No unstaged changes."
fi
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}

View File

@@ -1,55 +0,0 @@
name: Publish to PyPI
on:
workflow_dispatch:
jobs:
deploy:
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
submodules: true
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: 3.8
- name: Install Poetry
working-directory: ./benchmark/
run: |
curl -sSL https://install.python-poetry.org | python3 -
echo "$HOME/.poetry/bin" >> $GITHUB_PATH
- name: Build project for distribution
working-directory: ./benchmark/
run: poetry build
- name: Install dependencies
working-directory: ./benchmark/
run: poetry install
- name: Check Version
working-directory: ./benchmark/
id: check-version
run: |
echo version=$(poetry version --short) >> $GITHUB_OUTPUT
- name: Create Release
uses: ncipollo/release-action@v1
with:
artifacts: "benchmark/dist/*"
token: ${{ secrets.GITHUB_TOKEN }}
draft: false
generateReleaseNotes: false
tag: agbenchmark-v${{ steps.check-version.outputs.version }}
commit: master
- name: Build and publish
working-directory: ./benchmark/
run: poetry publish -u __token__ -p ${{ secrets.PYPI_API_TOKEN }}

73
.github/workflows/benchmarks.yml vendored Normal file
View File

@@ -0,0 +1,73 @@
name: Benchmarks
on:
schedule:
- cron: '0 8 * * *'
workflow_dispatch:
jobs:
Benchmark:
name: ${{ matrix.config.task-name }}
runs-on: ubuntu-latest
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
config:
- python-version: "3.10"
task: "tests/challenges"
task-name: "Mandatory Tasks"
- python-version: "3.10"
task: "--beat-challenges -ra tests/challenges"
task-name: "Challenging Tasks"
steps:
- name: Checkout repository
uses: actions/checkout@v3
with:
ref: master
- name: Set up Python ${{ matrix.config.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.config.python-version }}
- id: get_date
name: Get date
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Set up Python dependency cache
uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}-${{ steps.get_date.outputs.date }}
- name: Install Python dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Run pytest with coverage
run: |
rm -rf tests/Auto-GPT-test-cassettes
pytest -n auto --record-mode=all ${{ matrix.config.task }}
env:
CI: true
PROXY: ${{ secrets.PROXY }}
AGENT_MODE: ${{ secrets.AGENT_MODE }}
AGENT_TYPE: ${{ secrets.AGENT_TYPE }}
PLAIN_OUTPUT: True
- name: Upload logs as artifact
if: always()
uses: actions/upload-artifact@v3
with:
name: test-logs-${{ matrix.config.task-name }}
path: logs/
- name: Upload cassettes as artifact
if: always()
uses: actions/upload-artifact@v3
with:
name: cassettes-${{ matrix.config.task-name }}
path: tests/Auto-GPT-test-cassettes/

261
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,261 @@
name: Python CI
on:
push:
branches: [ master, ci-test* ]
paths-ignore:
- 'tests/Auto-GPT-test-cassettes'
- 'tests/challenges/current_score.json'
pull_request:
branches: [ stable, master, release-* ]
pull_request_target:
branches: [ master, release-*, ci-test* ]
concurrency:
group: ${{ format('ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
jobs:
lint:
# eliminate duplicate runs
if: github.event_name == 'push' || (github.event.pull_request.head.repo.fork == (github.event_name == 'pull_request_target'))
runs-on: ubuntu-latest
env:
min-python-version: "3.10"
steps:
- name: Checkout repository
uses: actions/checkout@v3
with:
fetch-depth: 0
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ env.min-python-version }}
- id: get_date
name: Get date
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Set up Python dependency cache
uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}-${{ steps.get_date.outputs.date }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Lint with flake8
run: flake8
- name: Check black formatting
run: black . --check
if: success() || failure()
- name: Check isort formatting
run: isort . --check
if: success() || failure()
- name: Check mypy formatting
run: mypy
if: success() || failure()
- name: Check for unused imports and pass statements
run: |
cmd="autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests"
$cmd --check || (echo "You have unused imports or pass statements, please run '${cmd} --in-place'" && exit 1)
test:
# eliminate duplicate runs
if: github.event_name == 'push' || (github.event.pull_request.head.repo.fork == (github.event_name == 'pull_request_target'))
permissions:
# Gives the action the necessary permissions for publishing new
# comments in pull requests.
pull-requests: write
# Gives the action the necessary permissions for pushing data to the
# python-coverage-comment-action branch, and for editing existing
# comments (to avoid publishing multiple comments in the same PR)
contents: write
runs-on: ubuntu-latest
timeout-minutes: 30
strategy:
matrix:
python-version: ["3.10"]
steps:
- name: Checkout repository
uses: actions/checkout@v3
with:
fetch-depth: 0
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
submodules: true
- name: Configure git user Auto-GPT-Bot
run: |
git config --global user.name "Auto-GPT-Bot"
git config --global user.email "github-bot@agpt.co"
- name: Checkout cassettes
if: ${{ startsWith(github.event_name, 'pull_request') }}
run: |
cassette_branch="${{ github.event.pull_request.user.login }}-${{ github.event.pull_request.head.ref }}"
cassette_base_branch="${{ github.event.pull_request.base.ref }}"
cd tests/Auto-GPT-test-cassettes
if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
cassette_base_branch="master"
fi
if git ls-remote --exit-code --heads origin $cassette_branch ; then
git fetch origin $cassette_branch
git fetch origin $cassette_base_branch
git checkout $cassette_branch
# Pick non-conflicting cassette updates from the base branch
git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
echo "Using cassettes from mirror branch '$cassette_branch'," \
"synced to upstream branch '$cassette_base_branch'."
else
git checkout -b $cassette_branch
echo "Branch '$cassette_branch' does not exist in cassette submodule." \
"Using cassettes from '$cassette_base_branch'."
fi
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- id: get_date
name: Get date
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Set up Python dependency cache
uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}-${{ steps.get_date.outputs.date }}
- name: Install Python dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Run pytest with coverage
run: |
pytest -vv --cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
--numprocesses=logical --durations=10 \
tests/unit tests/integration tests/challenges
python tests/challenges/utils/build_current_score.py
env:
CI: true
PROXY: ${{ github.event_name == 'pull_request_target' && secrets.PROXY || '' }}
AGENT_MODE: ${{ github.event_name == 'pull_request_target' && secrets.AGENT_MODE || '' }}
AGENT_TYPE: ${{ github.event_name == 'pull_request_target' && secrets.AGENT_TYPE || '' }}
OPENAI_API_KEY: ${{ github.event_name != 'pull_request_target' && secrets.OPENAI_API_KEY || '' }}
PLAIN_OUTPUT: True
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
- id: setup_git_auth
name: Set up git token authentication
# Cassettes may be pushed even when tests fail
if: success() || failure()
run: |
config_key="http.${{ github.server_url }}/.extraheader"
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
git config "$config_key" \
"Authorization: Basic $base64_pat"
cd tests/Auto-GPT-test-cassettes
git config "$config_key" \
"Authorization: Basic $base64_pat"
echo "config_key=$config_key" >> $GITHUB_OUTPUT
- name: Push updated challenge scores
if: github.event_name == 'push'
run: |
score_file="tests/challenges/current_score.json"
if ! git diff --quiet $score_file; then
git add $score_file
git commit -m "Update challenge scores"
git push origin HEAD:${{ github.ref_name }}
else
echo "The challenge scores didn't change."
fi
- id: push_cassettes
name: Push updated cassettes
# For pull requests, push updated cassettes even when tests fail
if: github.event_name == 'push' || success() || failure()
run: |
if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
is_pull_request=true
cassette_branch="${{ github.event.pull_request.user.login }}-${{ github.event.pull_request.head.ref }}"
else
cassette_branch="${{ github.ref_name }}"
fi
cd tests/Auto-GPT-test-cassettes
# Commit & push changes to cassettes if any
if ! git diff --quiet; then
git add .
git commit -m "Auto-update cassettes"
git push origin HEAD:$cassette_branch
if [ ! $is_pull_request ]; then
cd ../..
git add tests/Auto-GPT-test-cassettes
git commit -m "Update cassette submodule"
git push origin HEAD:$cassette_branch
fi
echo "updated=true" >> $GITHUB_OUTPUT
else
echo "updated=false" >> $GITHUB_OUTPUT
echo "No cassette changes to commit"
fi
- name: Post Set up git token auth
if: steps.setup_git_auth.outcome == 'success'
run: |
git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
- name: Apply "behaviour change" label and comment on PR
if: ${{ startsWith(github.event_name, 'pull_request') }}
run: |
PR_NUMBER=${{ github.event.pull_request.number }}
TOKEN=${{ secrets.PAT_REVIEW }}
REPO=${{ github.repository }}
if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then
echo "Adding label and comment..."
curl -X POST \
-H "Authorization: Bearer $TOKEN" \
-H "Accept: application/vnd.github.v3+json" \
https://api.github.com/repos/$REPO/issues/$PR_NUMBER/labels \
-d '{"labels":["behaviour change"]}'
echo $TOKEN | gh auth login --with-token
gh api repos/$REPO/issues/$PR_NUMBER/comments -X POST -F body="You changed AutoGPT's behaviour. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
fi
- name: Upload logs to artifact
if: always()
uses: actions/upload-artifact@v3
with:
name: test-logs
path: logs/

View File

@@ -1,34 +0,0 @@
name: 'Close stale issues'
on:
schedule:
- cron: '30 1 * * *'
workflow_dispatch:
permissions:
issues: write
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v9
with:
# operations-per-run: 5000
stale-issue-message: >
This issue has automatically been marked as _stale_ because it has not had
any activity in the last 50 days. You can _unstale_ it by commenting or
removing the label. Otherwise, this issue will be closed in 10 days.
stale-pr-message: >
This pull request has automatically been marked as _stale_ because it has
not had any activity in the last 50 days. You can _unstale_ it by commenting
or removing the label.
close-issue-message: >
This issue was closed automatically because it has been stale for 10 days
with no activity.
days-before-stale: 50
days-before-close: 10
# Do not touch meta issues:
exempt-issue-labels: meta,fridge,project management
# Do not affect pull requests:
days-before-pr-stale: -1
days-before-pr-close: -1

View File

@@ -1,11 +1,11 @@
name: Purge Auto-GPT Docker CI cache
name: Purge Docker CI cache
on:
schedule:
- cron: 20 4 * * 1,4
env:
BASE_BRANCH: development
BASE_BRANCH: master
IMAGE_NAME: auto-gpt
jobs:
@@ -16,20 +16,19 @@ jobs:
build-type: [release, dev]
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v2
- id: build
name: Build image
uses: docker/build-push-action@v5
uses: docker/build-push-action@v3
with:
context: autogpts/autogpt
build-args: BUILD_TYPE=${{ matrix.build-type }}
load: true # save to docker images
# use GHA cache as read-only
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
cache-to: type=gha,scope=docker-${{ matrix.build-type }},mode=max
- name: Generate build report
env:
@@ -38,10 +37,10 @@ jobs:
build_type: ${{ matrix.build-type }}
prod_branch: master
dev_branch: development
prod_branch: stable
dev_branch: master
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'stable' && 'master' || 'stable' }}
current_ref: ${{ github.ref_name }}
commit_hash: ${{ github.sha }}

124
.github/workflows/docker-ci.yml vendored Normal file
View File

@@ -0,0 +1,124 @@
name: Docker CI
on:
push:
branches: [ master ]
paths-ignore:
- 'tests/Auto-GPT-test-cassettes'
- 'tests/challenges/current_score.json'
pull_request:
branches: [ master, release-*, stable ]
concurrency:
group: ${{ format('docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
env:
IMAGE_NAME: auto-gpt
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
build-type: [release, dev]
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- if: runner.debug
run: |
ls -al
du -hs *
- id: build
name: Build image
uses: docker/build-push-action@v3
with:
build-args: BUILD_TYPE=${{ matrix.build-type }}
tags: ${{ env.IMAGE_NAME }}
load: true # save to docker images
# cache layers in GitHub Actions cache to speed up builds
cache-from: type=gha,scope=docker-${{ matrix.build-type }}
cache-to: type=gha,scope=docker-${{ matrix.build-type }},mode=max
- name: Generate build report
env:
event_name: ${{ github.event_name }}
event_ref: ${{ github.event.ref }}
event_ref_type: ${{ github.event.ref}}
build_type: ${{ matrix.build-type }}
prod_branch: stable
dev_branch: master
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'stable' && 'master' || 'stable' }}
current_ref: ${{ github.ref_name }}
commit_hash: ${{ github.event.after }}
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
new_commits_json: ${{ toJSON(github.event.commits) }}
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
github_context_json: ${{ toJSON(github) }}
job_env_json: ${{ toJSON(env) }}
vars_json: ${{ toJSON(vars) }}
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
continue-on-error: true
test:
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Check out repository
uses: actions/checkout@v3
with:
submodules: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- id: build
name: Build image
uses: docker/build-push-action@v3
with:
build-args: BUILD_TYPE=dev # include pytest
tags: ${{ env.IMAGE_NAME }}
load: true # save to docker images
# cache layers in GitHub Actions cache to speed up builds
cache-from: type=gha,scope=docker-dev
cache-to: type=gha,scope=docker-dev,mode=max
- id: test
name: Run tests
env:
CI: true
PLAIN_OUTPUT: True
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
set +e
test_output=$(
docker run --env CI --env OPENAI_API_KEY --entrypoint python ${{ env.IMAGE_NAME }} -m \
pytest -v --cov=autogpt --cov-branch --cov-report term-missing \
--numprocesses=4 --durations=10 \
tests/unit tests/integration 2>&1
)
test_failure=$?
echo "$test_output"
cat << $EOF >> $GITHUB_STEP_SUMMARY
# Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌')
\`\`\`
$test_output
\`\`\`
$EOF
exit $test_failure

View File

@@ -1,4 +1,4 @@
name: AutoGPT Docker Release
name: Docker Release
on:
release:
@@ -10,45 +10,37 @@ on:
type: boolean
description: 'Build from scratch, without using cached layers'
defaults:
run:
working-directory: autogpts/autogpt
env:
IMAGE_NAME: auto-gpt
DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER }}/auto-gpt
jobs:
build:
if: startsWith(github.ref, 'refs/tags/autogpt-')
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Log in to Docker hub
uses: docker/login-action@v3
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v2
# slashes are not allowed in image tags, but can appear in git branch or tag names
- id: sanitize_tag
name: Sanitize image tag
run: |
tag=${raw_tag//\//-}
echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
run: echo tag=${raw_tag//\//-} >> $GITHUB_OUTPUT
env:
raw_tag: ${{ github.ref_name }}
- id: build
name: Build image
uses: docker/build-push-action@v5
uses: docker/build-push-action@v3
with:
context: autogpts/autogpt
build-args: BUILD_TYPE=release
load: true # save to docker images
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
@@ -56,11 +48,10 @@ jobs:
${{ env.IMAGE_NAME }},
${{ env.DEPLOY_IMAGE_NAME }}:latest,
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
labels: GIT_REVISION=${{ github.sha }}
# cache layers in GitHub Actions cache to speed up builds
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
cache-to: type=gha,scope=autogpt-docker-release,mode=max
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=docker-release
cache-to: type=gha,scope=docker-release,mode=max
- name: Push image to Docker Hub
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
@@ -72,10 +63,10 @@ jobs:
event_ref_type: ${{ github.event.ref}}
inputs_no_cache: ${{ inputs.no_cache }}
prod_branch: master
dev_branch: development
prod_branch: stable
dev_branch: master
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'stable' && 'master' || 'stable' }}
ref_type: ${{ github.ref_type }}
current_ref: ${{ github.ref_name }}
@@ -87,5 +78,4 @@ jobs:
vars_json: ${{ toJSON(vars) }}
run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
working-directory: ./
continue-on-error: true

View File

@@ -0,0 +1,37 @@
name: Docs
on:
push:
branches: [ stable ]
paths:
- 'docs/**'
- 'mkdocs.yml'
- '.github/workflows/documentation.yml'
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
permissions:
contents: write
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Set up Python 3
uses: actions/setup-python@v4
with:
python-version: 3.x
- name: Set up workflow cache
uses: actions/cache@v3
with:
key: ${{ github.ref }}
path: .cache
- run: pip install mkdocs-material
- run: mkdocs gh-deploy --force

View File

@@ -1,60 +0,0 @@
name: Frontend CI/CD
on:
push:
branches:
- master
- development
- 'ci-test*' # This will match any branch that starts with "ci-test"
paths:
- 'frontend/**'
- '.github/workflows/frontend-ci.yml'
pull_request:
paths:
- 'frontend/**'
- '.github/workflows/frontend-ci.yml'
jobs:
build:
permissions:
contents: write
pull-requests: write
runs-on: ubuntu-latest
env:
BUILD_BRANCH: ${{ format('frontend-build/{0}', github.ref_name) }}
steps:
- name: Checkout Repo
uses: actions/checkout@v4
- name: Setup Flutter
uses: subosito/flutter-action@v2
with:
flutter-version: '3.13.2'
- name: Build Flutter to Web
run: |
cd frontend
flutter build web --base-href /app/
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
# if: github.event_name == 'push'
# run: |
# git config --local user.email "action@github.com"
# git config --local user.name "GitHub Action"
# git add frontend/build/web
# git checkout -B ${{ env.BUILD_BRANCH }}
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
# git push -f origin ${{ env.BUILD_BRANCH }}
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
if: github.event_name == 'push'
uses: peter-evans/create-pull-request@v6
with:
add-paths: frontend/build/web
base: ${{ github.ref_name }}
branch: ${{ env.BUILD_BRANCH }}
delete-branch: true
title: "Update frontend build in `${{ github.ref_name }}`"
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
commit-message: "Update frontend build based on commit ${{ github.sha }}"

View File

@@ -1,133 +0,0 @@
name: Hackathon
on:
workflow_dispatch:
inputs:
agents:
description: "Agents to run (comma-separated)"
required: false
default: "autogpt" # Default agents if none are specified
jobs:
matrix-setup:
runs-on: ubuntu-latest
# Service containers to run with `matrix-setup`
services:
# Label used to access the service container
postgres:
# Docker Hub image
image: postgres
# Provide the password for postgres
env:
POSTGRES_PASSWORD: postgres
# Set health checks to wait until postgres has started
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
# Maps tcp port 5432 on service container to the host
- 5432:5432
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
env-name: ${{ steps.set-matrix.outputs.env-name }}
steps:
- id: set-matrix
run: |
if [ "${{ github.event_name }}" == "schedule" ]; then
echo "::set-output name=env-name::production"
echo "::set-output name=matrix::[ 'irrelevant']"
elif [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
IFS=',' read -ra matrix_array <<< "${{ github.event.inputs.agents }}"
matrix_string="[ \"$(echo "${matrix_array[@]}" | sed 's/ /", "/g')\" ]"
echo "::set-output name=env-name::production"
echo "::set-output name=matrix::$matrix_string"
else
echo "::set-output name=env-name::testing"
echo "::set-output name=matrix::[ 'irrelevant' ]"
fi
tests:
environment:
name: "${{ needs.matrix-setup.outputs.env-name }}"
needs: matrix-setup
env:
min-python-version: "3.10"
name: "${{ matrix.agent-name }}"
runs-on: ubuntu-latest
services:
# Label used to access the service container
postgres:
# Docker Hub image
image: postgres
# Provide the password for postgres
env:
POSTGRES_PASSWORD: postgres
# Set health checks to wait until postgres has started
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
# Maps tcp port 5432 on service container to the host
- 5432:5432
timeout-minutes: 50
strategy:
fail-fast: false
matrix:
agent-name: ${{fromJson(needs.matrix-setup.outputs.matrix)}}
steps:
- name: Print Environment Name
run: |
echo "Matrix Setup Environment Name: ${{ needs.matrix-setup.outputs.env-name }}"
- name: Check Docker Container
id: check
run: docker ps
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- id: get_date
name: Get date
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Install Poetry
run: |
curl -sSL https://install.python-poetry.org | python -
- name: Install Node.js
uses: actions/setup-node@v4
with:
node-version: v18.15
- name: Run benchmark
run: |
link=$(jq -r '.["github_repo_url"]' arena/$AGENT_NAME.json)
branch=$(jq -r '.["branch_to_benchmark"]' arena/$AGENT_NAME.json)
git clone "$link" -b "$branch" "$AGENT_NAME"
cd $AGENT_NAME
cp ./autogpts/$AGENT_NAME/.env.example ./autogpts/$AGENT_NAME/.env || echo "file not found"
./run agent start $AGENT_NAME
cd ../benchmark
poetry install
poetry run agbenchmark --no-dep
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
SERP_API_KEY: ${{ secrets.SERP_API_KEY }}
SERPAPI_API_KEY: ${{ secrets.SERP_API_KEY }}
WEAVIATE_API_KEY: ${{ secrets.WEAVIATE_API_KEY }}
WEAVIATE_URL: ${{ secrets.WEAVIATE_URL }}
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
GOOGLE_CUSTOM_SEARCH_ENGINE_ID: ${{ secrets.GOOGLE_CUSTOM_SEARCH_ENGINE_ID }}
AGENT_NAME: ${{ matrix.agent-name }}

View File

@@ -3,10 +3,10 @@ name: "Pull Request auto-label"
on:
# So that PRs touching the same files as the push are updated
push:
branches: [ master, development, release-* ]
branches: [ master, release-* ]
paths-ignore:
- 'autogpts/autogpt/tests/vcr_cassettes'
- 'benchmark/reports/**'
- 'tests/Auto-GPT-test-cassettes'
- 'tests/challenges/current_score.json'
# So that the `dirtyLabel` is removed if conflicts are resolve
# We recommend `pull_request_target` so that github secrets are available.
# In `pull_request` we wouldn't be able to change labels of fork PRs
@@ -52,15 +52,6 @@ jobs:
l_label: 'size/l'
l_max_size: 500
xl_label: 'size/xl'
message_if_xl:
scope:
if: ${{ github.event_name == 'pull_request_target' }}
permissions:
contents: read
pull-requests: write
runs-on: ubuntu-latest
steps:
- uses: actions/labeler@v5
with:
sync-labels: true
message_if_xl: >
This PR exceeds the recommended size of 500 lines.
Please make sure you are NOT addressing multiple issues with one PR.

View File

@@ -1,20 +0,0 @@
name: github-repo-stats
on:
schedule:
# Run this once per day, towards the end of the day for keeping the most
# recent data point most meaningful (hours are interpreted in UTC).
- cron: "0 23 * * *"
workflow_dispatch: # Allow for running this manually.
jobs:
j1:
name: github-repo-stats
runs-on: ubuntu-latest
steps:
- name: run-ghrs
# Use latest release.
uses: jgehrcke/github-repo-stats@HEAD
with:
ghtoken: ${{ secrets.ghrs_github_api_token }}

18
.gitignore vendored
View File

@@ -1,5 +1,4 @@
## Original ignores
.github_access_token
autogpt/keys.py
autogpt/*.json
auto_gpt_workspace/*
@@ -29,8 +28,11 @@ __pycache__/
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
/plugins/
plugins_config.yaml
downloads/
eggs/
.eggs/
@@ -164,16 +166,4 @@ agbenchmark/reports/
# Nodejs
package-lock.json
package.json
# Allow for locally private items
# private
pri*
# ignore
ig*
.github_access_token
arena/TestAgent.json
# evo.ninja
autogpts/evo.ninja/*
!autogpts/evo.ninja/setup
package.json

7
.gitmodules vendored
View File

@@ -1,3 +1,4 @@
[submodule "autogpts/autogpt/tests/vcr_cassettes"]
path = autogpts/autogpt/tests/vcr_cassettes
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
[submodule "tests/Auto-GPT-test-cassettes"]
path = tests/Auto-GPT-test-cassettes
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
branch = master

10
.isort.cfg Normal file
View File

@@ -0,0 +1,10 @@
[settings]
profile = black
multi_line_output = 3
include_trailing_comma = true
force_grid_wrap = 0
use_parentheses = true
ensure_newline_before_comments = true
line_length = 88
sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER
skip = .tox,__pycache__,*.pyc,venv*/*,reports,venv,env,node_modules,.env,.venv,dist

View File

@@ -1,6 +0,0 @@
[pr_reviewer]
num_code_suggestions=0
[pr_code_suggestions]
commitable_code_suggestions=false
num_code_suggestions=0

View File

@@ -31,6 +31,12 @@ repos:
hooks:
- id: autoflake
name: autoflake
entry: autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring --in-place agbenchmark
entry: autoflake --in-place --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests
language: python
types: [ python ]
- id: pytest-check
name: pytest-check
entry: pytest --cov=autogpt tests/unit
language: system
pass_filenames: false
always_run: true

21
BULLETIN.md Normal file
View File

@@ -0,0 +1,21 @@
# QUICK LINKS 🔗
# --------------
🌎 *Official Website*: https://agpt.co.
📖 *User Guide*: https://docs.agpt.co.
👩 *Contributors Wiki*: https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing.
# v0.4.7 RELEASE HIGHLIGHTS! 🚀
# -----------------------------
This release introduces initial REST API support, powered by e2b's agent
protocol SDK (https://github.com/e2b-dev/agent-protocol#sdk).
It also includes improvements to prompt generation and support
for our new benchmarking tool, Auto-GPT-Benchmarks
(https://github.com/Significant-Gravitas/Auto-GPT-Benchmarks).
We've also moved our documentation to Material Theme, at https://docs.agpt.co.
As usual, we've squashed a few bugs and made some under-the-hood improvements.
Take a look at the Release Notes on Github for the full changelog:
https://github.com/Significant-Gravitas/Auto-GPT/releases.

View File

@@ -1,21 +0,0 @@
# This CITATION.cff file was generated with cffinit.
# Visit https://bit.ly/cffinit to generate yours today!
cff-version: 1.2.0
title: AutoGPT
message: >-
If you use this software, please cite it using the
metadata from this file.
type: software
authors:
- name: Significant Gravitas
website: 'https://agpt.co'
repository-code: 'https://github.com/Significant-Gravitas/AutoGPT'
url: 'https://agpt.co'
abstract: >-
A collection of tools and experimental open-source attempts to make GPT-4 fully
autonomous.
keywords:
- AI
- Agent
license: MIT

View File

@@ -1,182 +0,0 @@
## CLI Documentation
This document describes how to interact with the project's CLI (Command Line Interface). It includes the types of outputs you can expect from each command. Note that the `agents stop` command will terminate any process running on port 8000.
### 1. Entry Point for the CLI
Running the `./run` command without any parameters will display the help message, which provides a list of available commands and options. Additionally, you can append `--help` to any command to view help information specific to that command.
```sh
./run
```
**Output**:
```
Usage: cli.py [OPTIONS] COMMAND [ARGS]...
Options:
--help Show this message and exit.
Commands:
agent Commands to create, start and stop agents
benchmark Commands to start the benchmark and list tests and categories
setup Installs dependencies needed for your system.
```
If you need assistance with any command, simply add the `--help` parameter to the end of your command, like so:
```sh
./run COMMAND --help
```
This will display a detailed help message regarding that specific command, including a list of any additional options and arguments it accepts.
### 2. Setup Command
```sh
./run setup
```
**Output**:
```
Setup initiated
Installation has been completed.
```
This command initializes the setup of the project.
### 3. Agents Commands
**a. List All Agents**
```sh
./run agent list
```
**Output**:
```
Available agents: 🤖
🐙 forge
🐙 autogpt
```
Lists all the available agents.
**b. Create a New Agent**
```sh
./run agent create my_agent
```
**Output**:
```
🎉 New agent 'my_agent' created and switched to the new directory in autogpts folder.
```
Creates a new agent named 'my_agent'.
**c. Start an Agent**
```sh
./run agent start my_agent
```
**Output**:
```
... (ASCII Art representing the agent startup)
[Date and Time] [forge.sdk.db] [DEBUG] 🐛 Initializing AgentDB with database_string: sqlite:///agent.db
[Date and Time] [forge.sdk.agent] [INFO] 📝 Agent server starting on http://0.0.0.0:8000
```
Starts the 'my_agent' and displays startup ASCII art and logs.
**d. Stop an Agent**
```sh
./run agent stop
```
**Output**:
```
Agent stopped
```
Stops the running agent.
### 4. Benchmark Commands
**a. List Benchmark Categories**
```sh
./run benchmark categories list
```
**Output**:
```
Available categories: 📚
📖 code
📖 safety
📖 memory
... (and so on)
```
Lists all available benchmark categories.
**b. List Benchmark Tests**
```sh
./run benchmark tests list
```
**Output**:
```
Available tests: 📚
📖 interface
🔬 Search - TestSearch
🔬 Write File - TestWriteFile
... (and so on)
```
Lists all available benchmark tests.
**c. Show Details of a Benchmark Test**
```sh
./run benchmark tests details TestWriteFile
```
**Output**:
```
TestWriteFile
-------------
Category: interface
Task: Write the word 'Washington' to a .txt file
... (and other details)
```
Displays the details of the 'TestWriteFile' benchmark test.
**d. Start Benchmark for the Agent**
```sh
./run benchmark start my_agent
```
**Output**:
```
(more details about the testing process shown whilst the test are running)
============= 13 failed, 1 passed in 0.97s ============...
```
Displays the results of the benchmark tests on 'my_agent'.

View File

@@ -1,12 +1,12 @@
# Code of Conduct for AutoGPT
# Code of Conduct for Auto-GPT
## 1. Purpose
The purpose of this Code of Conduct is to provide guidelines for contributors to the AutoGPT projects on GitHub. We aim to create a positive and inclusive environment where all participants can contribute and collaborate effectively. By participating in this project, you agree to abide by this Code of Conduct.
The purpose of this Code of Conduct is to provide guidelines for contributors to the auto-gpt project on GitHub. We aim to create a positive and inclusive environment where all participants can contribute and collaborate effectively. By participating in this project, you agree to abide by this Code of Conduct.
## 2. Scope
This Code of Conduct applies to all contributors, maintainers, and users of the AutoGPT project. It extends to all project spaces, including but not limited to issues, pull requests, code reviews, comments, and other forms of communication within the project.
This Code of Conduct applies to all contributors, maintainers, and users of the auto-gpt project. It extends to all project spaces, including but not limited to issues, pull requests, code reviews, comments, and other forms of communication within the project.
## 3. Our Standards
@@ -36,5 +36,4 @@ This Code of Conduct is adapted from the [Contributor Covenant](https://www.cont
## 6. Contact
If you have any questions or concerns, please contact the project maintainers on Discord:
https://discord.gg/autogpt
If you have any questions or concerns, please contact the project maintainers.

View File

@@ -1,34 +1,14 @@
# AutoGPT Contribution Guide
If you are reading this, you are probably looking for our **[contribution guide]**,
which is part of our [knowledge base].
We maintain a knowledgebase at this [wiki](https://github.com/Significant-Gravitas/Nexus/wiki)
You can find our immediate priorities and their progress on our public [kanban board].
We would like to say "We value all contributions". After all, we are an open-source project, so we should say something fluffy like this, right?
[contribution guide]: https://github.com/Significant-Gravitas/Nexus/wiki/Contributing
[knowledge base]: https://github.com/Significant-Gravitas/Nexus/wiki
[kanban board]: https://github.com/orgs/Significant-Gravitas/projects/1
However the reality is that some contributions are SUPER-valuable, while others create more trouble than they are worth and actually _create_ work for the core team.
## In short
1. Avoid duplicate work, issues, PRs etc.
2. We encourage you to collaborate with fellow community members on some of our bigger
[todo's][kanban board]!
* We highly recommend to post your idea and discuss it in the [dev channel].
4. Create a draft PR when starting work on bigger changes.
3. Please also consider contributing something other than code; see the
[contribution guide] for options.
5. Clearly explain your changes when submitting a PR.
6. Don't submit stuff that's broken.
7. Avoid making unnecessary changes, especially if they're purely based on your personal
preferences. Doing so is the maintainers' job. ;-)
If you wish to contribute, please look through the wiki [contributing](https://github.com/Significant-Gravitas/Nexus/wiki/Contributing) page.
[dev channel]: https://discord.com/channels/1092243196446249134/1095817829405704305
If you wish to involve with the project (beyond just contributing PRs), please read the wiki [catalyzing](https://github.com/Significant-Gravitas/Nexus/wiki/Catalyzing) page.
If you wish to involve with the project (beyond just contributing PRs), please read the
wiki [catalyzing](https://github.com/Significant-Gravitas/Nexus/wiki/Catalyzing) page.
In fact, why not just look through the whole wiki (it's only a few pages) and hop on our discord (you'll find it in the wiki).
In fact, why not just look through the whole wiki (it's only a few pages) and
hop on our Discord. See you there! :-)
❤️ & 🔆
The team @ AutoGPT
https://discord.gg/autogpt
❤️ & 🔆
The team @ Auto-GPT

View File

@@ -6,7 +6,7 @@ FROM python:3.10-slim AS autogpt-base
# Install browsers
RUN apt-get update && apt-get install -y \
chromium-driver ca-certificates gcc \
chromium-driver firefox-esr ca-certificates \
&& apt-get clean && rm -rf /var/lib/apt/lists/*
# Install utilities
@@ -17,40 +17,30 @@ RUN apt-get update && apt-get install -y \
# Set environment variables
ENV PIP_NO_CACHE_DIR=yes \
PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1 \
POETRY_HOME="/opt/poetry" \
POETRY_VIRTUALENVS_PATH="/venv" \
POETRY_VIRTUALENVS_IN_PROJECT=0 \
POETRY_NO_INTERACTION=1
PYTHONDONTWRITEBYTECODE=1
# Install and configure Poetry
RUN curl -sSL https://install.python-poetry.org | python3 -
ENV PATH="$POETRY_HOME/bin:$PATH"
RUN poetry config installer.max-workers 10
WORKDIR /app
COPY pyproject.toml poetry.lock ./
# Install the required python packages globally
ENV PATH="$PATH:/root/.local/bin"
COPY requirements.txt .
# Set the entrypoint
ENTRYPOINT ["poetry", "run", "autogpt"]
CMD []
ENTRYPOINT ["python", "-m", "autogpt", "--install-plugin-deps"]
# dev build -> include everything
FROM autogpt-base as autogpt-dev
RUN poetry install --no-cache --no-root \
&& rm -rf $(poetry env info --path)/src
RUN pip install --no-cache-dir -r requirements.txt
WORKDIR /app
ONBUILD COPY . ./
# release build -> include bare minimum
FROM autogpt-base as autogpt-release
RUN poetry install --no-cache --no-root --without dev \
&& rm -rf $(poetry env info --path)/src
RUN sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \
pip install --no-cache-dir -r requirements.txt
WORKDIR /app
ONBUILD COPY autogpt/ ./autogpt
ONBUILD COPY scripts/ ./scripts
ONBUILD COPY plugins/ ./plugins
ONBUILD COPY prompt_settings.yaml ./prompt_settings.yaml
ONBUILD COPY README.md ./README.md
ONBUILD RUN mkdir ./data
FROM autogpt-${BUILD_TYPE} AS autogpt
RUN poetry install --only-root
FROM autogpt-${BUILD_TYPE} AS auto-gpt

View File

@@ -1,200 +0,0 @@
# Quickstart Guide
> For the complete getting started [tutorial series](https://aiedge.medium.com/autogpt-forge-e3de53cc58ec) <- click here
Welcome to the Quickstart Guide! This guide will walk you through the process of setting up and running your own AutoGPT agent. Whether you're a seasoned AI developer or just starting out, this guide will provide you with the necessary steps to jumpstart your journey in the world of AI development with AutoGPT.
## System Requirements
This project supports Linux (Debian based), Mac, and Windows Subsystem for Linux (WSL). If you are using a Windows system, you will need to install WSL. You can find the installation instructions for WSL [here](https://learn.microsoft.com/en-us/windows/wsl/).
## Getting Setup
1. **Fork the Repository**
To fork the repository, follow these steps:
- Navigate to the main page of the repository.
![Repository](docs/content/imgs/quickstart/001_repo.png)
- In the top-right corner of the page, click Fork.
![Create Fork UI](docs/content/imgs/quickstart/002_fork.png)
- On the next page, select your GitHub account to create the fork under.
- Wait for the forking process to complete. You now have a copy of the repository in your GitHub account.
2. **Clone the Repository**
To clone the repository, you need to have Git installed on your system. If you don't have Git installed, you can download it from [here](https://git-scm.com/downloads). Once you have Git installed, follow these steps:
- Open your terminal.
- Navigate to the directory where you want to clone the repository.
- Run the git clone command for the fork you just created
![Clone the Repository](docs/content/imgs/quickstart/003_clone.png)
- Then open your project in your ide
![Open the Project in your IDE](docs/content/imgs/quickstart/004_ide.png)
4. **Setup the Project**
Next we need to setup the required dependencies. We have a tool for helping you do all the tasks you need to on the repo.
It can be accessed by running the `run` command by typing `./run` in the terminal.
The first command you need to use is `./run setup` This will guide you through the process of setting up your system.
Initially you will get instructions for installing flutter, chrome and setting up your github access token like the following image:
> Note: for advanced users. The github access token is only needed for the ./run arena enter command so the system can automatically create a PR
![Setup the Project](docs/content/imgs/quickstart/005_setup.png)
### For Windows Users
If you're a Windows user and experience issues after installing WSL, follow the steps below to resolve them.
#### Update WSL
Run the following command in Powershell or Command Prompt to:
1. Enable the optional WSL and Virtual Machine Platform components.
2. Download and install the latest Linux kernel.
3. Set WSL 2 as the default.
4. Download and install the Ubuntu Linux distribution (a reboot may be required).
```shell
wsl --install
```
For more detailed information and additional steps, refer to [Microsoft's WSL Setup Environment Documentation](https://learn.microsoft.com/en-us/windows/wsl/setup/environment).
#### Resolve FileNotFoundError or "No such file or directory" Errors
When you run `./run setup`, if you encounter errors like `No such file or directory` or `FileNotFoundError`, it might be because Windows-style line endings (CRLF - Carriage Return Line Feed) are not compatible with Unix/Linux style line endings (LF - Line Feed).
To resolve this, you can use the `dos2unix` utility to convert the line endings in your script from CRLF to LF. Heres how to install and run `dos2unix` on the script:
```shell
sudo apt update
sudo apt install dos2unix
dos2unix ./run
```
After executing the above commands, running `./run setup` should work successfully.
#### Store Project Files within the WSL File System
If you continue to experience issues, consider storing your project files within the WSL file system instead of the Windows file system. This method avoids issues related to path translations and permissions and provides a more consistent development environment.
You can keep running the command to get feedback on where you are up to with your setup.
When setup has been completed, the command will return an output like this:
![Setup Complete](docs/content/imgs/quickstart/006_setup_complete.png)
## Creating Your Agent
After completing the setup, the next step is to create your agent template.
Execute the command `./run agent create YOUR_AGENT_NAME`, where `YOUR_AGENT_NAME` should be replaced with a name of your choosing.
Tips for naming your agent:
* Give it its own unique name, or name it after yourself
* Include an important aspect of your agent in the name, such as its purpose
Examples: `SwiftyosAssistant`, `PwutsPRAgent`, `Narvis`, `evo.ninja`
![Create an Agent](docs/content/imgs/quickstart/007_create_agent.png)
### Optional: Entering the Arena
Entering the Arena is an optional step intended for those who wish to actively participate in the agent leaderboard. If you decide to participate, you can enter the Arena by running `./run arena enter YOUR_AGENT_NAME`. This step is not mandatory for the development or testing of your agent.
Entries with names like `agent`, `ExampleAgent`, `test_agent` or `MyExampleGPT` will NOT be merged. We also don't accept copycat entries that use the name of other projects, like `AutoGPT` or `evo.ninja`.
![Enter the Arena](docs/content/imgs/quickstart/008_enter_arena.png)
> **Note**
> For advanced users, create a new branch and create a file called YOUR_AGENT_NAME.json in the arena directory. Then commit this and create a PR to merge into the main repo. Only single file entries will be permitted. The json file needs the following format:
> ```json
> {
> "github_repo_url": "https://github.com/Swiftyos/YourAgentName",
> "timestamp": "2023-09-18T10:03:38.051498",
> "commit_hash_to_benchmark": "ac36f7bfc7f23ad8800339fa55943c1405d80d5e",
> "branch_to_benchmark": "master"
> }
> ```
> - `github_repo_url`: the url to your fork
> - `timestamp`: timestamp of the last update of this file
> - `commit_hash_to_benchmark`: the commit hash of your entry. You update each time you have an something ready to be officially entered into the hackathon
> - `branch_to_benchmark`: the branch you are using to develop your agent on, default is master.
## Running your Agent
Your agent can started using the `./run agent start YOUR_AGENT_NAME`
This start the agent on `http://localhost:8000/`
![Start the Agent](docs/content/imgs/quickstart/009_start_agent.png)
The frontend can be accessed from `http://localhost:8000/`, you will first need to login using either a google account or your github account.
![Login](docs/content/imgs/quickstart/010_login.png)
Upon logging in you will get a page that looks something like this. With your task history down the left hand side of the page and the 'chat' window to send tasks to your agent.
![Login](docs/content/imgs/quickstart/011_home.png)
When you have finished with your agent, or if you just need to restart it, use Ctl-C to end the session then you can re-run the start command.
If you are having issues and want to ensure the agent has been stopped there is a `./run agent stop` command which will kill the process using port 8000, which should be the agent.
## Benchmarking your Agent
The benchmarking system can also be accessed using the cli too:
```bash
agpt % ./run benchmark
Usage: cli.py benchmark [OPTIONS] COMMAND [ARGS]...
Commands to start the benchmark and list tests and categories
Options:
--help Show this message and exit.
Commands:
categories Benchmark categories group command
start Starts the benchmark command
tests Benchmark tests group command
agpt % ./run benchmark categories
Usage: cli.py benchmark categories [OPTIONS] COMMAND [ARGS]...
Benchmark categories group command
Options:
--help Show this message and exit.
Commands:
list List benchmark categories command
agpt % ./run benchmark tests
Usage: cli.py benchmark tests [OPTIONS] COMMAND [ARGS]...
Benchmark tests group command
Options:
--help Show this message and exit.
Commands:
details Benchmark test details command
list List benchmark tests command
```
The benchmark has been split into different categories of skills you can test your agent on. You can see what categories are available with
```bash
./run benchmark categories list
# And what tests are available with
./run benchmark tests list
```
![Login](docs/content/imgs/quickstart/012_tests.png)
Finally you can run the benchmark with
```bash
./run benchmark start YOUR_AGENT_NAME
```
>

212
README.md

File diff suppressed because one or more lines are too long

View File

@@ -1,66 +0,0 @@
# Security Policy
- [**Using AutoGPT Securely**](#using-AutoGPT-securely)
- [Restrict Workspace](#restrict-workspace)
- [Untrusted inputs](#untrusted-inputs)
- [Data privacy](#data-privacy)
- [Untrusted environments or networks](#untrusted-environments-or-networks)
- [Multi-Tenant environments](#multi-tenant-environments)
- [**Reporting a Vulnerability**](#reporting-a-vulnerability)
## Using AutoGPT Securely
### Restrict Workspace
Since agents can read and write files, it is important to keep them restricted to a specific workspace. This happens by default *unless* RESTRICT_TO_WORKSPACE is set to False.
Disabling RESTRICT_TO_WORKSPACE can increase security risks. However, if you still need to disable it, consider running AutoGPT inside a [sandbox](https://developers.google.com/code-sandboxing), to mitigate some of these risks.
### Untrusted inputs
When handling untrusted inputs, it's crucial to isolate the execution and carefully pre-process inputs to mitigate script injection risks.
For maximum security when handling untrusted inputs, you may need to employ the following:
* Sandboxing: Isolate the process.
* Updates: Keep your libraries (including AutoGPT) updated with the latest security patches.
* Input Sanitation: Before feeding data to the model, sanitize inputs rigorously. This involves techniques such as:
* Validation: Enforce strict rules on allowed characters and data types.
* Filtering: Remove potentially malicious scripts or code fragments.
* Encoding: Convert special characters into safe representations.
* Verification: Run tooling that identifies potential script injections (e.g. [models that detect prompt injection attempts](https://python.langchain.com/docs/guides/safety/hugging_face_prompt_injection)).
### Data privacy
To protect sensitive data from potential leaks or unauthorized access, it is crucial to sandbox the agent execution. This means running it in a secure, isolated environment, which helps mitigate many attack vectors.
### Untrusted environments or networks
Since AutoGPT performs network calls to the OpenAI API, it is important to always run it with trusted environments and networks. Running it on untrusted environments can expose your API KEY to attackers.
Additionally, running it on an untrusted network can expose your data to potential network attacks.
However, even when running on trusted networks, it is important to always encrypt sensitive data while sending it over the network.
### Multi-Tenant environments
If you intend to run multiple AutoGPT brains in parallel, it is your responsibility to ensure the models do not interact or access each other's data.
The primary areas of concern are tenant isolation, resource allocation, model sharing and hardware attacks.
- Tenant Isolation: you must make sure that the tenants run separately to prevent unwanted access to the data from other tenants. Keeping model network traffic separate is also important because you not only prevent unauthorized access to data, but also prevent malicious users or tenants sending prompts to execute under another tenants identity.
- Resource Allocation: a denial of service caused by one tenant can affect the overall system health. Implement safeguards like rate limits, access controls, and health monitoring.
- Data Sharing: in a multi-tenant design with data sharing, ensure tenants and users understand the security risks and sandbox agent execution to mitigate risks.
- Hardware Attacks: the hardware (GPUs or TPUs) can also be attacked. [Research](https://scholar.google.com/scholar?q=gpu+side+channel) has shown that side channel attacks on GPUs are possible, which can make data leak from other brains or processes running on the same system at the same time.
## Reporting a Vulnerability
Beware that none of the topics under [Using AutoGPT Securely](#using-AutoGPT-securely) are considered vulnerabilities on AutoGPT.
However, If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
Please disclose it as a private [security advisory](https://github.com/Significant-Gravitas/AutoGPT/security/advisories/new).
A team of volunteers on a reasonable-effort basis maintains this project. As such, please give us at least 90 days to work on a fix before public exposure.

View File

@@ -1,23 +0,0 @@
This page is a list of issues you could encounter along with their fixes.
# Forge
**Poetry configuration invalid**
The poetry configuration is invalid:
- Additional properties are not allowed ('group' was unexpected)
<img width="487" alt="Screenshot 2023-09-22 at 5 42 59 PM" src="https://github.com/Significant-Gravitas/AutoGPT/assets/9652976/dd451e6b-8114-44de-9928-075f5f06d661">
**Pydantic Validation Error**
Remove your sqlite agent.db file. it's probably because some of your data is not complying with the new spec (we will create migrations soon to avoid this problem)
*Solution*
Update poetry
# Benchmark
TODO
# Frontend
TODO

54
agbenchmark/benchmarks.py Normal file
View File

@@ -0,0 +1,54 @@
import os
import sys
from pathlib import Path
from typing import Tuple
from autogpt.agents import Agent
from autogpt.app.main import run_interaction_loop
from autogpt.commands import COMMAND_CATEGORIES
from autogpt.config import AIConfig, Config, ConfigBuilder
from autogpt.memory.vector import get_memory
from autogpt.models.command_registry import CommandRegistry
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
from autogpt.workspace import Workspace
PROJECT_DIR = Path().resolve()
def run_specific_agent(task, continuous_mode=False) -> Tuple[str, int]:
agent = bootstrap_agent(task, continuous_mode)
run_interaction_loop(agent)
def bootstrap_agent(task, continuous_mode) -> Agent:
config = ConfigBuilder.build_config_from_env(workdir=PROJECT_DIR)
config.debug_mode = True
config.continuous_mode = continuous_mode
config.temperature = 0
config.plain_output = True
command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config)
config.memory_backend = "no_memory"
config.workspace_path = Workspace.init_workspace_directory(config)
config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path)
ai_config = AIConfig(
ai_name="Auto-GPT",
ai_role="a multi-purpose AI assistant.",
ai_goals=[task],
)
ai_config.command_registry = command_registry
return Agent(
memory=get_memory(config),
command_registry=command_registry,
ai_config=ai_config,
config=config,
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
)
if __name__ == "__main__":
# The first argument is the script name itself, second is the task
if len(sys.argv) != 2:
print("Usage: python script.py <task>")
sys.exit(1)
task = sys.argv[1]
run_specific_agent(task, continuous_mode=True)

4
agbenchmark/config.json Normal file
View File

@@ -0,0 +1,4 @@
{
"workspace": "auto_gpt_workspace",
"entry_path": "agbenchmark.benchmarks"
}

View File

@@ -0,0 +1,24 @@
{
"TestBasicCodeGeneration": {
"difficulty": "basic",
"dependencies": [
"TestWriteFile"
],
"data_path": "agbenchmark/challenges/code/d3"
},
"TestBasicMemory": {
"difficulty": "basic",
"data_path": "agbenchmark/challenges/memory/m1"
},
"TestReadFile": {
"difficulty": "basic",
"dependencies": [
"TestWriteFile"
],
"data_path": "agbenchmark/challenges/interface/read_file"
},
"TestWriteFile": {
"dependencies": [],
"data_path": "agbenchmark/challenges/interface/write_file"
}
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/480/AutoGPT",
"timestamp": "2023-10-22T06:49:52.536177",
"commit_hash_to_benchmark": "16e266c65fb4620a1b1397532c503fa426ec191d",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/filipjakubowski/AutoGPT",
"timestamp": "2023-11-01T17:13:24.272333",
"commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/filipjakubowski/AutoGPT",
"timestamp": "2023-11-04T10:13:11.039444",
"commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/QingquanBao/AutoGPT",
"timestamp": "2023-11-01T16:20:51.086235",
"commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4",
"branch_to_benchmark": "master"
}

View File

@@ -1,7 +0,0 @@
{
"github_repo_url": "https://github.com/imakb/AKBAgent",
"timestamp": "2023-10-31T00:03:23.000000",
"commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0",
"branch_to_benchmark": "AKBAgent"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/hongzzz/AutoGPT",
"timestamp": "2023-10-13T03:22:59.347424",
"commit_hash_to_benchmark": "38790a27ed2c1b63a301b6a67e7590f2d30de53e",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/kaiomagalhaes/AutoGPT",
"timestamp": "2023-10-04T15:25:30.458687",
"commit_hash_to_benchmark": "1bd85cbc09473c0252928fb849ae8373607d6065",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/Jonobinsoftware/AutoGPT-Tutorial",
"timestamp": "2023-10-10T06:01:23.439061",
"commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/aivaras-mazylis/AutoGPT",
"timestamp": "2023-10-17T13:16:16.327237",
"commit_hash_to_benchmark": "1eadc64dc0a693c7c9de77ddaef857f3a36f7950",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/althaf004/AutoGPT",
"timestamp": "2023-09-26T03:40:03.658369",
"commit_hash_to_benchmark": "4a8da53d85d466f2eb325c745a2c03cf88792e7d",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/againeureka/AutoGPT",
"timestamp": "2023-10-12T02:20:01.005361",
"commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/kitdesai/AgentKD",
"timestamp": "2023-10-14T02:35:09.979434",
"commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/JawadAbu/AutoGPT.git",
"timestamp": "2023-11-05T12:35:35.352028",
"commit_hash_to_benchmark": "a1d60878141116641ea864ef6de7ca6142e9534c",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/Shadowless422/Alfred",
"timestamp": "2023-10-03T10:42:45.473477",
"commit_hash_to_benchmark": "949ab477a87cfb7a3668d7961e9443922081e098",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/alphaciso/AutoGPT",
"timestamp": "2023-10-21T08:26:41.961187",
"commit_hash_to_benchmark": "415b4ceed1417d0b21d87d7d4ea0cd38943e264f",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/4nd3rs/AutoGPT",
"timestamp": "2023-10-11T11:00:08.150159",
"commit_hash_to_benchmark": "57bcbdf45c6c1493a4e5f6a4e72594ea13c10f93",
"branch_to_benchmark": "master"
}

View File

@@ -1 +0,0 @@
{"github_repo_url": "https://github.com/pjw1/AntlerAI", "timestamp": "2023-10-07T11:46:39Z", "commit_hash_to_benchmark": "f81e086e5647370854ec639c531c900775a99207", "branch_to_benchmark": "master"}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/Nimit3-droid/AutoGPT",
"timestamp": "2023-10-03T11:59:15.495902",
"commit_hash_to_benchmark": "d8d7fc4858a8d13407f6d7da360c6b5d398f2175",
"branch_to_benchmark": "master"
}

View File

@@ -1 +0,0 @@
{"github_repo_url": "https://github.com/somnistudio/SomniGPT", "timestamp": "2023-10-06T16:40:14Z", "commit_hash_to_benchmark": "47eb5124fa97187d7f3fa4036e422cd771cf0ae7", "branch_to_benchmark": "master"}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/AmahAjavon/AutoGPT",
"timestamp": "2023-10-28T20:32:15.845741",
"commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/arunqa/AutoGPT",
"timestamp": "2023-09-26T05:13:24.466017",
"commit_hash_to_benchmark": "4a8da53d85d466f2eb325c745a2c03cf88792e7d",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/Nikhil8652/AutoGPT",
"timestamp": "2023-10-16T09:12:17.452121",
"commit_hash_to_benchmark": "2f79caa6b901d006a78c1ac9e69db4465c0f971a",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/RedTachyon/AutoGPT",
"timestamp": "2023-10-21T22:31:30.871023",
"commit_hash_to_benchmark": "eda21d51921899756bf866cf5c4d0f2dcd3e2e23",
"branch_to_benchmark": "master"
}

View File

@@ -1 +0,0 @@
{"github_repo_url": "https://github.com/SarahGrevy/AutoGPT", "timestamp": "2023-10-20T17:21:22Z", "commit_hash_to_benchmark": "32300906c9aafea8c550fa2f9edcc113fbfc512c", "branch_to_benchmark": "master"}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/JasonDRZ/AutoGPT",
"timestamp": "2023-10-26T13:27:58.805270",
"commit_hash_to_benchmark": "ab2a61833584c42ededa805cbac50718c72aa5ae",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/vshneer/AutoTDD",
"timestamp": "2023-10-11T19:14:30.939747",
"commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/cagdasbas/AutoGPT",
"timestamp": "2023-10-15T08:43:40.193080",
"commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/LuisLechugaRuiz/AwareAgent",
"timestamp": "2023-10-26T10:10:01.481205",
"commit_hash_to_benchmark": "c180063dde49af02ed95ec4c019611da0a5540d7",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/xpineda/AutoGPT_xabyvng.git",
"timestamp": "2023-10-20T09:21:48.837635",
"commit_hash_to_benchmark": "2187f66149ffa4bb99f9ca6a11b592fe4d683791",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/aniruddha-adhikary/AutoGPT",
"timestamp": "2023-09-27T15:32:24.056105",
"commit_hash_to_benchmark": "6f289e6dfa8246f8993b76c933527f3707b8d7e5",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/Baptistecaille/AutoGPT",
"timestamp": "2023-10-01T19:44:23.416591",
"commit_hash_to_benchmark": "3da29eae45683457131ee8736bedae7e2a74fbba",
"branch_to_benchmark": "master"
}

View File

@@ -1 +0,0 @@
{"github_repo_url": "https://github.com/jafar-albadarneh/Bravo06GPT", "timestamp": "2023-10-04T23:01:27Z", "commit_hash_to_benchmark": "f8c177b4b0e4ca45a3a104011b866c0415c648f1", "branch_to_benchmark": "master"}

View File

@@ -1 +0,0 @@
{"github_repo_url": "https://github.com/dabeer021/Brillante-AI", "timestamp": "2023-10-02T19:05:04Z", "commit_hash_to_benchmark": "163ab75379e1ee7792f50d4d70a1f482ca9cb6a1", "branch_to_benchmark": "master"}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/razorhasbeen/AutoGPT",
"timestamp": "2023-10-03T11:50:56.725628",
"commit_hash_to_benchmark": "d8d7fc4858a8d13407f6d7da360c6b5d398f2175",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/ccsnow127/AutoGPT",
"timestamp": "2023-10-21T13:57:15.131761",
"commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/ces-sonnguyen/CES-GPT",
"timestamp": "2023-10-30T07:45:07.337258",
"commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/cislerk/AutoGPT",
"timestamp": "2023-10-10T18:40:50.718850",
"commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/myncow/DocumentAgent.git",
"timestamp": "2023-10-31T21:21:28.951345",
"commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/dr1yl/AutoGPT",
"timestamp": "2023-10-09T20:01:05.041446",
"commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3",
"branch_to_benchmark": "master"
}

View File

@@ -1 +0,0 @@
{"github_repo_url": "https://github.com/Ahmad-Alaziz/ChadGPT", "timestamp": "2023-10-26T09:39:35Z", "commit_hash_to_benchmark": "84dd029c011379791a6fec8b148b2982a2ef159e", "branch_to_benchmark": "master"}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/darkcyber-ninja/AutoGPT",
"timestamp": "2023-10-31T17:55:41.458834",
"commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/hugomastromauro/AutoGPT",
"timestamp": "2023-11-01T13:21:42.624202",
"commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4",
"branch_to_benchmark": "master"
}

View File

@@ -1 +0,0 @@
{"github_repo_url": "https://github.com/simonfunk/Auto-GPT", "timestamp": "2023-10-08T02:10:18Z", "commit_hash_to_benchmark": "e99e9b6181f091a9625ef9b922dac15dd5f0a885", "branch_to_benchmark": "master"}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/HMDCrew/AutoGPT",
"timestamp": "2023-10-06T20:41:26.293944",
"commit_hash_to_benchmark": "9e353e09b5df39d4d410bef57cf17387331e96f6",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/wic0144/AutoGPT",
"timestamp": "2023-10-26T09:05:21.013962",
"commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/beisdog/AutoGPT",
"timestamp": "2023-09-29T22:06:18.846082",
"commit_hash_to_benchmark": "d6abb27db61142a70defd0c75b53985ea9a71fce",
"branch_to_benchmark": "master"
}

View File

@@ -1,6 +0,0 @@
{
"github_repo_url": "https://github.com/schumacher-m/Derpmaster",
"timestamp": "2023-10-30T21:10:27.407732",
"commit_hash_to_benchmark": "d9fbd26b8563e5f59d705623bae0d5cf9c9499c7",
"branch_to_benchmark": "master"
}

Some files were not shown because too many files have changed in this diff Show More