mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-13 17:18:08 -05:00
Compare commits
94 Commits
self-feedb
...
v0.3.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3d494f1032 | ||
|
|
dda8d0f6bf | ||
|
|
f778483ac3 | ||
|
|
15ebe23bc2 | ||
|
|
bc6f8a27ff | ||
|
|
2d9b9294d0 | ||
|
|
de6b8ee9f2 | ||
|
|
a110ff94a5 | ||
|
|
4143d212a5 | ||
|
|
e6f8e51504 | ||
|
|
b958386689 | ||
|
|
2f7beebc61 | ||
|
|
900de5fe63 | ||
|
|
233f900fa6 | ||
|
|
7a34d49264 | ||
|
|
6c78d80d37 | ||
|
|
12e806d754 | ||
|
|
21a202b655 | ||
|
|
79fba4ab7b | ||
|
|
c771e1fd50 | ||
|
|
a60512cdae | ||
|
|
b06ea616d9 | ||
|
|
2513178980 | ||
|
|
8f3119621c | ||
|
|
5989c14577 | ||
|
|
980bbe2bc3 | ||
|
|
23e1e1ed53 | ||
|
|
0166eacb2b | ||
|
|
33a3e6f998 | ||
|
|
d1327fd1c2 | ||
|
|
baa7873ec1 | ||
|
|
dc959596fc | ||
|
|
08bc8ff3f7 | ||
|
|
ca5abff93f | ||
|
|
d184d0d235 | ||
|
|
06317dfb2b | ||
|
|
d57af05f66 | ||
|
|
7eddfacd10 | ||
|
|
3df88be6cb | ||
|
|
b496cdcfb2 | ||
|
|
6d4bea3bb6 | ||
|
|
e12438de41 | ||
|
|
f2bef76368 | ||
|
|
ea08050049 | ||
|
|
7d234522b7 | ||
|
|
d9170cab22 | ||
|
|
ad8b8cb9eb | ||
|
|
fad24b3525 | ||
|
|
b0163230a9 | ||
|
|
cb97f5c101 | ||
|
|
a48f26c150 | ||
|
|
911cea781f | ||
|
|
d2a9e54dfb | ||
|
|
d74428057e | ||
|
|
e21917cc93 | ||
|
|
26c6cfeefd | ||
|
|
479c7468b4 | ||
|
|
b0c6ed999c | ||
|
|
b5f95fd672 | ||
|
|
d8f5cdbb50 | ||
|
|
6e5ddeb015 | ||
|
|
725abbb662 | ||
|
|
e4129e1a3a | ||
|
|
dbd68df40c | ||
|
|
3a80e2f399 | ||
|
|
0e1c0c55f8 | ||
|
|
2e9c80a486 | ||
|
|
1d26f6b697 | ||
|
|
4767fe63d3 | ||
|
|
0ef6f06462 | ||
|
|
a5f856328d | ||
|
|
7fc6f2abfc | ||
|
|
94ec4a4ea5 | ||
|
|
9c56b1beef | ||
|
|
34261a1583 | ||
|
|
d8968ae899 | ||
|
|
6ae90a3ea2 | ||
|
|
c317cf0e75 | ||
|
|
c1329c92fd | ||
|
|
abd6115aea | ||
|
|
6d2c0c4242 | ||
|
|
aab79fdf6d | ||
|
|
dd96d98fa1 | ||
|
|
064ac5c742 | ||
|
|
06ae4684c8 | ||
|
|
6997bb0bdd | ||
|
|
cdd91f7ea3 | ||
|
|
4f72ee7815 | ||
|
|
095883ca54 | ||
|
|
f77c3604ce | ||
|
|
2d058feaf8 | ||
|
|
9c6494aca7 | ||
|
|
91537b0496 | ||
|
|
999990b614 |
@@ -18,13 +18,40 @@
|
||||
## EXIT_KEY - Key to exit AUTO-GPT
|
||||
# EXIT_KEY=n
|
||||
|
||||
## DISABLED_COMMAND_CATEGORIES - The list of categories of commands that are disabled. Each of the below are an option:
|
||||
## autogpt.commands.analyze_code
|
||||
## autogpt.commands.audio_text
|
||||
## autogpt.commands.execute_code
|
||||
## autogpt.commands.file_operations
|
||||
## autogpt.commands.git_operations
|
||||
## autogpt.commands.google_search
|
||||
## autogpt.commands.image_gen
|
||||
## autogpt.commands.improve_code
|
||||
## autogpt.commands.twitter
|
||||
## autogpt.commands.web_selenium
|
||||
## autogpt.commands.write_tests
|
||||
## autogpt.app
|
||||
## autogpt.commands.task_statuses
|
||||
## For example, to disable coding related features, uncomment the next line
|
||||
# DISABLED_COMMAND_CATEGORIES=autogpt.commands.analyze_code,autogpt.commands.execute_code,autogpt.commands.git_operations,autogpt.commands.improve_code,autogpt.commands.write_tests
|
||||
|
||||
################################################################################
|
||||
### LLM PROVIDER
|
||||
################################################################################
|
||||
|
||||
### OPENAI
|
||||
## OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
|
||||
|
||||
|
||||
## NOTE: https://platform.openai.com/docs/api-reference/completions
|
||||
# The temperature setting in language models like GPT controls the balance between predictable and random responses.
|
||||
# Lower temperature makes the responses more focused and deterministic, while higher temperature makes them more
|
||||
# creative and varied. The temperature range typically goes from 0 to 2 in OpenAI's implementation.
|
||||
##
|
||||
## TEMPERATURE - Sets temperature in OpenAI (Default: 0)
|
||||
##
|
||||
###
|
||||
|
||||
## USE_AZURE - Use Azure OpenAI or not (Default: False)
|
||||
OPENAI_API_KEY=your-openai-api-key
|
||||
# TEMPERATURE=0
|
||||
@@ -49,6 +76,14 @@ OPENAI_API_KEY=your-openai-api-key
|
||||
# FAST_TOKEN_LIMIT=4000
|
||||
# SMART_TOKEN_LIMIT=8000
|
||||
|
||||
### EMBEDDINGS
|
||||
## EMBEDDING_MODEL - Model to use for creating embeddings
|
||||
## EMBEDDING_TOKENIZER - Tokenizer to use for chunking large inputs
|
||||
## EMBEDDING_TOKEN_LIMIT - Chunk size limit for large inputs
|
||||
# EMBEDDING_MODEL=text-embedding-ada-002
|
||||
# EMBEDDING_TOKENIZER=cl100k_base
|
||||
# EMBEDDING_TOKEN_LIMIT=8191
|
||||
|
||||
################################################################################
|
||||
### MEMORY
|
||||
################################################################################
|
||||
@@ -157,7 +192,7 @@ OPENAI_API_KEY=your-openai-api-key
|
||||
### BROWSER
|
||||
## HEADLESS_BROWSER - Whether to run the browser in headless mode (default: True)
|
||||
## USE_WEB_BROWSER - Sets the web-browser driver to use with selenium (default: chrome).
|
||||
## Note: set this to either 'chrome', 'firefox', or 'safari' depending on your current browser
|
||||
## Note: set this to either 'chrome', 'firefox', 'safari' or 'edge' depending on your current browser
|
||||
# HEADLESS_BROWSER=True
|
||||
# USE_WEB_BROWSER=chrome
|
||||
## BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunks to summarize (in number of tokens, excluding the response. 75 % of FAST_TOKEN_LIMIT is usually wise )
|
||||
@@ -205,7 +240,9 @@ OPENAI_API_KEY=your-openai-api-key
|
||||
################################################################################
|
||||
|
||||
#ALLOWLISTED_PLUGINS - Sets the listed plugins that are allowed (Example: plugin1,plugin2,plugin3)
|
||||
#DENYLISTED_PLUGINS - Sets the listed plugins that are not allowed (Example: plugin1,plugin2,plugin3)
|
||||
ALLOWLISTED_PLUGINS=
|
||||
DENYLISTED_PLUGINS=
|
||||
|
||||
################################################################################
|
||||
### CHAT PLUGIN SETTINGS
|
||||
|
||||
1
.github/CODEOWNERS
vendored
Normal file
1
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1 @@
|
||||
.github/workflows/ @Significant-Gravitas/Auto-GPT-Source
|
||||
2
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
2
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
@@ -9,6 +9,7 @@ body:
|
||||
* Check out our [backlog], [roadmap] and join our [discord] to discuss what's going on
|
||||
* If you need help, you can ask in the [discussions] section or in [#tech-support]
|
||||
* **Throughly search the [existing issues] before creating a new one**
|
||||
* Read our [wiki page on Contributing]
|
||||
|
||||
[backlog]: https://github.com/orgs/Significant-Gravitas/projects/1
|
||||
[roadmap]: https://github.com/orgs/Significant-Gravitas/projects/2
|
||||
@@ -16,6 +17,7 @@ body:
|
||||
[discussions]: https://github.com/Significant-Gravitas/Auto-GPT/discussions
|
||||
[#tech-support]: https://discord.com/channels/1092243196446249134/1092275629602394184
|
||||
[existing issues]: https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue
|
||||
[wiki page on Contributing]: https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: ⚠️ Search for existing issues first ⚠️
|
||||
|
||||
7
.github/ISSUE_TEMPLATE/2.feature.yml
vendored
7
.github/ISSUE_TEMPLATE/2.feature.yml
vendored
@@ -1,13 +1,12 @@
|
||||
name: Feature request 🚀
|
||||
description: Suggest a new idea for Auto-GPT.
|
||||
description: Suggest a new idea for Auto-GPT!
|
||||
labels: ['status: needs triage']
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
First, check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing)
|
||||
Please provide a searchable summary of the issue in the title above ⬆️.
|
||||
|
||||
Thanks for contributing by creating an issue! ❤️
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Duplicates
|
||||
@@ -26,4 +25,4 @@ body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Motivation 🔦
|
||||
description: What are you trying to accomplish? How has the lack of this feature affected you? Providing context helps us come up with a solution that is more useful in the real world.
|
||||
description: What are you trying to accomplish? How has the lack of this feature affected you? Providing context helps us come up with a solution that is more useful in the real world.
|
||||
|
||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -14,6 +14,8 @@ Provide clear documentation and explanations of the changes made.
|
||||
Ensure diffs are limited to the intended lines — no applying preferred formatting styles or line endings (unless that's what the PR is about).
|
||||
For guidance on committing only the specific lines you have changed, refer to this helpful video: https://youtu.be/8-hSNHHbiZg
|
||||
|
||||
Check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing)
|
||||
|
||||
By following these guidelines, your PRs are more likely to be merged quickly after testing, as long as they align with the project's overall direction. -->
|
||||
|
||||
### Background
|
||||
|
||||
49
.github/workflows/add-cassettes.yml
vendored
Normal file
49
.github/workflows/add-cassettes.yml
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
name: Merge and Commit Cassettes
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- closed
|
||||
|
||||
jobs:
|
||||
update-cassettes:
|
||||
if: github.event.pull_request.merged == true
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0 # This is necessary to fetch all branches and tags
|
||||
|
||||
- name: Fetch all branches
|
||||
run: git fetch --all
|
||||
|
||||
- name: Reset branch
|
||||
run: |
|
||||
git checkout ${{ github.event.pull_request.base.ref }}
|
||||
git reset --hard origin/cassette-diff-${{ github.event.pull_request.number }}
|
||||
|
||||
- name: Create PR
|
||||
id: create_pr
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
with:
|
||||
commit-message: Update cassettes
|
||||
signoff: false
|
||||
branch: cassette-diff-${{ github.event.pull_request.number }}
|
||||
delete-branch: false
|
||||
title: "Update cassettes"
|
||||
body: "This PR updates the cassettes."
|
||||
draft: false
|
||||
|
||||
- name: Check PR
|
||||
run: |
|
||||
echo "Pull Request Number - ${{ steps.create_pr.outputs.pull-request-number }}"
|
||||
echo "Pull Request URL - ${{ steps.create_pr.outputs.pull-request-url }}"
|
||||
|
||||
- name: Comment PR URL in the current PR
|
||||
uses: thollander/actions-comment-pull-request@v2
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
message: |
|
||||
New pull request created for cassettes: [HERE](${{ steps.create_pr.outputs.pull-request-url }}). Please merge it asap.
|
||||
34
.github/workflows/ci.yml
vendored
34
.github/workflows/ci.yml
vendored
@@ -3,12 +3,12 @@ name: Python CI
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
pull_request_target:
|
||||
branches: [ master, stable ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request_target' }}
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
@@ -19,6 +19,10 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.pull_request.head.ref }}
|
||||
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
@@ -53,11 +57,15 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.10", "3.11"]
|
||||
python-version: ["3.10"]
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.pull_request.head.ref }}
|
||||
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
@@ -71,7 +79,23 @@ jobs:
|
||||
|
||||
- name: Run unittest tests with coverage
|
||||
run: |
|
||||
pytest --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term
|
||||
pytest -n auto --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term
|
||||
env:
|
||||
CI: true
|
||||
PROXY: ${{ vars.PROXY }}
|
||||
AGENT_MODE: ${{ vars.AGENT_MODE }}
|
||||
AGENT_TYPE: ${{ vars.AGENT_TYPE }}
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
|
||||
- name: Stage new files and commit
|
||||
run: |
|
||||
git add tests
|
||||
git diff --cached --quiet && echo "No changes to commit" && exit 0
|
||||
git config user.email "github-actions@github.com"
|
||||
git config user.name "GitHub Actions"
|
||||
git commit -m "Add new cassettes"
|
||||
git checkout -b cassette-diff-${{ github.event.pull_request.number }}
|
||||
git remote add target https://${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.event.pull_request.base.repo.full_name }}
|
||||
git push -f target cassette-diff-${{ github.event.pull_request.number }}
|
||||
|
||||
4
.github/workflows/docker-ci.yml
vendored
4
.github/workflows/docker-ci.yml
vendored
@@ -4,7 +4,7 @@ on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
branches: [ master, stable ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
@@ -101,7 +101,7 @@ jobs:
|
||||
set +e
|
||||
test_output=$(
|
||||
docker run --env CI --env OPENAI_API_KEY --entrypoint python ${{ env.IMAGE_NAME }} -m \
|
||||
pytest --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term 2>&1
|
||||
pytest -n auto --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term 2>&1
|
||||
)
|
||||
test_failure=$?
|
||||
|
||||
|
||||
27
BULLETIN.md
27
BULLETIN.md
@@ -1,9 +1,24 @@
|
||||
Welcome to Auto-GPT! We'll keep you informed of the latest news and features by printing messages here.
|
||||
If you don't wish to see this message, you can run Auto-GPT with the --skip-news flag
|
||||
# Website and Documentation Site 📰📖
|
||||
Check out *https://agpt.co*, the official news & updates site for Auto-GPT!
|
||||
The documentation also has a place here, at *https://docs.agpt.co*
|
||||
|
||||
# INCLUDED COMMAND 'send_tweet' IS DEPRICATED, AND WILL BE REMOVED IN THE NEXT STABLE RELEASE
|
||||
Base Twitter functionality (and more) is now covered by plugins: https://github.com/Significant-Gravitas/Auto-GPT-Plugins
|
||||
# 🚀 v0.3.0 Release 🚀
|
||||
Over a week and 275 pull requests have passed since v0.2.2, and we are happy to announce
|
||||
the release of v0.3.0! *From now on, we will be focusing on major improvements* rather
|
||||
than bugfixes, as we feel stability has reached a reasonable level. Most remaining
|
||||
issues relate to limitations in prompt generation and the memory system, which will be
|
||||
the focus of our efforts for the next release.
|
||||
|
||||
## Changes to Docker configuration
|
||||
The workdir has been changed from /home/appuser to /app. Be sure to update any volume mounts accordingly.
|
||||
Highlights and notable changes in this release:
|
||||
|
||||
## Plugin support 🔌
|
||||
Auto-GPT now has support for plugins! With plugins, you can extend Auto-GPT's abilities,
|
||||
adding support for third-party services and more.
|
||||
See https://github.com/Significant-Gravitas/Auto-GPT-Plugins for instructions and available plugins.
|
||||
|
||||
## Changes to Docker configuration 🐋
|
||||
The workdir has been changed from */home/appuser* to */app*.
|
||||
Be sure to update any volume mounts accordingly!
|
||||
|
||||
# ⚠️ Command `send_tweet` is DEPRECATED, and will be removed in v0.4.0 ⚠️
|
||||
Twitter functionality (and more) is now covered by plugins, see [Plugin support 🔌]
|
||||
|
||||
130
CONTRIBUTING.md
130
CONTRIBUTING.md
@@ -1,129 +1 @@
|
||||
# Contributing to Auto-GPT
|
||||
|
||||
First of all, thank you for considering contributing to our project! We appreciate your time and effort, and we value any contribution, whether it's reporting a bug, suggesting a new feature, or submitting a pull request.
|
||||
|
||||
This document provides guidelines and best practices to help you contribute effectively.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
By participating in this project, you agree to abide by our [Code of Conduct]. Please read it to understand the expectations we have for everyone who contributes to this project.
|
||||
|
||||
[Code of Conduct]: https://significant-gravitas.github.io/Auto-GPT/code-of-conduct.md
|
||||
|
||||
## 📢 A Quick Word
|
||||
Right now we will not be accepting any Contributions that add non-essential commands to Auto-GPT.
|
||||
|
||||
However, you absolutely can still add these commands to Auto-GPT in the form of plugins.
|
||||
Please check out this [template](https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template).
|
||||
|
||||
## Getting Started
|
||||
|
||||
1. Fork the repository and clone your fork.
|
||||
2. Create a new branch for your changes (use a descriptive name, such as `fix-bug-123` or `add-new-feature`).
|
||||
3. Make your changes in the new branch.
|
||||
4. Test your changes thoroughly.
|
||||
5. Commit and push your changes to your fork.
|
||||
6. Create a pull request following the guidelines in the [Submitting Pull Requests](#submitting-pull-requests) section.
|
||||
|
||||
## How to Contribute
|
||||
|
||||
### Reporting Bugs
|
||||
|
||||
If you find a bug in the project, please create an issue on GitHub with the following information:
|
||||
|
||||
- A clear, descriptive title for the issue.
|
||||
- A description of the problem, including steps to reproduce the issue.
|
||||
- Any relevant logs, screenshots, or other supporting information.
|
||||
|
||||
### Suggesting Enhancements
|
||||
|
||||
If you have an idea for a new feature or improvement, please create an issue on GitHub with the following information:
|
||||
|
||||
- A clear, descriptive title for the issue.
|
||||
- A detailed description of the proposed enhancement, including any benefits and potential drawbacks.
|
||||
- Any relevant examples, mockups, or supporting information.
|
||||
|
||||
### Submitting Pull Requests
|
||||
|
||||
When submitting a pull request, please ensure that your changes meet the following criteria:
|
||||
|
||||
- Your pull request should be atomic and focus on a single change.
|
||||
- Your pull request should include tests for your change. We automatically enforce this with [CodeCov](https://docs.codecov.com/docs/commit-status)
|
||||
- You should have thoroughly tested your changes with multiple different prompts.
|
||||
- You should have considered potential risks and mitigations for your changes.
|
||||
- You should have documented your changes clearly and comprehensively.
|
||||
- You should not include any unrelated or "extra" small tweaks or changes.
|
||||
|
||||
## Style Guidelines
|
||||
|
||||
### Code Formatting
|
||||
|
||||
We use the `black` and `isort` code formatters to maintain a consistent coding style across the project. Please ensure that your code is formatted properly before submitting a pull request.
|
||||
|
||||
To format your code, run the following commands in the project's root directory:
|
||||
|
||||
```bash
|
||||
python -m black .
|
||||
python -m isort .
|
||||
```
|
||||
|
||||
Or if you have these tools installed globally:
|
||||
```bash
|
||||
black .
|
||||
isort .
|
||||
```
|
||||
|
||||
### Pre-Commit Hooks
|
||||
|
||||
We use pre-commit hooks to ensure that code formatting and other checks are performed automatically before each commit. To set up pre-commit hooks for this project, follow these steps:
|
||||
|
||||
Install the pre-commit package using pip:
|
||||
```bash
|
||||
pip install pre-commit
|
||||
```
|
||||
|
||||
Run the following command in the project's root directory to install the pre-commit hooks:
|
||||
```bash
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
Now, the pre-commit hooks will run automatically before each commit, checking your code formatting and other requirements.
|
||||
|
||||
If you encounter any issues or have questions, feel free to reach out to the maintainers or open a new issue on GitHub. We're here to help and appreciate your efforts to contribute to the project.
|
||||
|
||||
Happy coding, and once again, thank you for your contributions!
|
||||
|
||||
Maintainers will look at PR that have no merge conflicts when deciding what to add to the project. Make sure your PR shows up here:
|
||||
https://github.com/Significant-Gravitas/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-label%3Aconflicts
|
||||
|
||||
## Testing your changes
|
||||
|
||||
If you add or change code, make sure the updated code is covered by tests.
|
||||
To increase coverage if necessary, [write tests using pytest].
|
||||
|
||||
For more info on running tests, please refer to ["Running tests"](https://significant-gravitas.github.io/Auto-GPT/testing/).
|
||||
|
||||
[write tests using pytest]: https://realpython.com/pytest-python-testing/
|
||||
|
||||
### API-dependent tests
|
||||
|
||||
To run tests that involve making calls to the OpenAI API, we use VCRpy. It caches known
|
||||
requests and matching responses in so-called *cassettes*, allowing us to run the tests
|
||||
in CI without needing actual API access.
|
||||
|
||||
When changes cause a test prompt to be generated differently, it will likely miss the
|
||||
cache and make a request to the API, updating the cassette with the new request+response.
|
||||
*Be sure to include the updated cassette in your PR!*
|
||||
|
||||
When you run Pytest locally:
|
||||
|
||||
- If no prompt change: you will not consume API tokens because there are no new OpenAI calls required.
|
||||
- If the prompt changes in a way that the cassettes are not reusable:
|
||||
- If no API key, the test fails. It requires a new cassette. So, add an API key to .env.
|
||||
- If the API key is present, the tests will make a real call to OpenAI.
|
||||
- If the test ends up being successful, your prompt changes didn't introduce regressions. This is good. Commit your cassettes to your PR.
|
||||
- If the test is unsuccessful:
|
||||
- Either: Your change made Auto-GPT less capable, in that case, you have to change your code.
|
||||
- Or: The test might be poorly written. In that case, you can make suggestions to change the test.
|
||||
|
||||
In our CI pipeline, Pytest will use the cassettes and not call paid API providers, so we need your help to record the replays that you break.
|
||||
This document now lives at https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing
|
||||
|
||||
@@ -22,7 +22,7 @@ ENV PATH="$PATH:/root/.local/bin"
|
||||
COPY requirements.txt .
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["python", "-m", "autogpt"]
|
||||
ENTRYPOINT ["python", "-m", "autogpt", "--install-plugin-deps"]
|
||||
|
||||
# dev build -> include everything
|
||||
FROM autogpt-base as autogpt-dev
|
||||
@@ -36,5 +36,7 @@ RUN sed -i '/Items below this point will not be included in the Docker Image/,$d
|
||||
pip install --no-cache-dir -r requirements.txt
|
||||
WORKDIR /app
|
||||
ONBUILD COPY autogpt/ ./autogpt
|
||||
ONBUILD COPY scripts/ ./scripts
|
||||
ONBUILD COPY plugins/ ./plugins
|
||||
|
||||
FROM autogpt-${BUILD_TYPE} AS auto-gpt
|
||||
|
||||
18
README.md
18
README.md
@@ -1,4 +1,5 @@
|
||||
# Auto-GPT: An Autonomous GPT-4 Experiment
|
||||
[](https://agpt.co)
|
||||
[](https://github.com/Significant-Gravitas/Auto-GPT/actions/workflows/ci.yml)
|
||||
[](https://discord.gg/autogpt)
|
||||
[](https://github.com/Significant-Gravitas/Auto-GPT/stargazers)
|
||||
@@ -91,6 +92,7 @@ Your support is greatly appreciated. Development of this free, open-source proje
|
||||
|
||||
## Quickstart
|
||||
|
||||
0. Check out the [wiki](https://github.com/Significant-Gravitas/Auto-GPT/wiki)
|
||||
1. Get an OpenAI [API Key](https://platform.openai.com/account/api-keys)
|
||||
2. Download the [latest release](https://github.com/Significant-Gravitas/Auto-GPT/releases/latest)
|
||||
3. Follow the [installation instructions][docs/setup]
|
||||
@@ -99,21 +101,21 @@ Your support is greatly appreciated. Development of this free, open-source proje
|
||||
|
||||
Please see the [documentation][docs] for full setup instructions and configuration options.
|
||||
|
||||
[docs]: https://significant-gravitas.github.io/Auto-GPT/
|
||||
[docs]: https://docs.agpt.co/
|
||||
|
||||
## 📖 Documentation
|
||||
* [⚙️ Setup][docs/setup]
|
||||
* [💻 Usage][docs/usage]
|
||||
* [🔌 Plugins][docs/plugins]
|
||||
* Configuration
|
||||
* [🔍 Web Search](https://significant-gravitas.github.io/Auto-GPT/configuration/search/)
|
||||
* [🧠 Memory](https://significant-gravitas.github.io/Auto-GPT/configuration/memory/)
|
||||
* [🗣️ Voice (TTS)](https://significant-gravitas.github.io/Auto-GPT/configuration/voice/)
|
||||
* [🖼️ Image Generation](https://significant-gravitas.github.io/Auto-GPT/configuration/imagegen/)
|
||||
* [🔍 Web Search](https://docs.agpt.co/configuration/search/)
|
||||
* [🧠 Memory](https://docs.agpt.co/configuration/memory/)
|
||||
* [🗣️ Voice (TTS)](https://docs.agpt.co/configuration/voice/)
|
||||
* [🖼️ Image Generation](https://docs.agpt.co/configuration/imagegen/)
|
||||
|
||||
[docs/setup]: https://significant-gravitas.github.io/Auto-GPT/setup/
|
||||
[docs/usage]: https://significant-gravitas.github.io/Auto-GPT/usage/
|
||||
[docs/plugins]: https://significant-gravitas.github.io/Auto-GPT/plugins/
|
||||
[docs/setup]: https://docs.agpt.co/setup/
|
||||
[docs/usage]: https://docs.agpt.co/usage/
|
||||
[docs/plugins]: https://docs.agpt.co/plugins/
|
||||
|
||||
## ⚠️ Limitations
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from datetime import datetime
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.app import execute_command, get_command
|
||||
@@ -5,10 +7,17 @@ from autogpt.config import Config
|
||||
from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques
|
||||
from autogpt.json_utils.utilities import LLM_DEFAULT_RESPONSE_FORMAT, validate_json
|
||||
from autogpt.llm import chat_with_ai, create_chat_completion, create_chat_message
|
||||
from autogpt.llm.token_counter import count_string_tokens
|
||||
from autogpt.log_cycle.log_cycle import (
|
||||
FULL_MESSAGE_HISTORY_FILE_NAME,
|
||||
NEXT_ACTION_FILE_NAME,
|
||||
USER_INPUT_FILE_NAME,
|
||||
LogCycleHandler,
|
||||
)
|
||||
from autogpt.logs import logger, print_assistant_thoughts
|
||||
from autogpt.speech import say_text
|
||||
from autogpt.spinner import Spinner
|
||||
from autogpt.utils import clean_input, send_chat_message_to_user
|
||||
from autogpt.utils import clean_input
|
||||
from autogpt.workspace import Workspace
|
||||
|
||||
|
||||
@@ -57,7 +66,7 @@ class Agent:
|
||||
self.ai_name = ai_name
|
||||
self.memory = memory
|
||||
self.summary_memory = (
|
||||
"I was created." # Initial memory necessary to avoid hilucination
|
||||
"I was created." # Initial memory necessary to avoid hallucination
|
||||
)
|
||||
self.last_memory_index = 0
|
||||
self.full_message_history = full_message_history
|
||||
@@ -67,31 +76,38 @@ class Agent:
|
||||
self.system_prompt = system_prompt
|
||||
self.triggering_prompt = triggering_prompt
|
||||
self.workspace = Workspace(workspace_directory, cfg.restrict_to_workspace)
|
||||
self.created_at = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
self.cycle_count = 0
|
||||
self.log_cycle_handler = LogCycleHandler()
|
||||
|
||||
def start_interaction_loop(self):
|
||||
# Interaction Loop
|
||||
cfg = Config()
|
||||
loop_count = 0
|
||||
self.cycle_count = 0
|
||||
command_name = None
|
||||
arguments = None
|
||||
user_input = ""
|
||||
|
||||
while True:
|
||||
# Discontinue if continuous limit is reached
|
||||
loop_count += 1
|
||||
self.cycle_count += 1
|
||||
self.log_cycle_handler.log_count_within_cycle = 0
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
self.full_message_history,
|
||||
FULL_MESSAGE_HISTORY_FILE_NAME,
|
||||
)
|
||||
if (
|
||||
cfg.continuous_mode
|
||||
and cfg.continuous_limit > 0
|
||||
and loop_count > cfg.continuous_limit
|
||||
and self.cycle_count > cfg.continuous_limit
|
||||
):
|
||||
logger.typewriter_log(
|
||||
"Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}"
|
||||
)
|
||||
send_chat_message_to_user(
|
||||
f"Continuous Limit Reached: \n {cfg.continuous_limit}"
|
||||
)
|
||||
break
|
||||
send_chat_message_to_user("Thinking... \n")
|
||||
# Send message to AI, get response
|
||||
with Spinner("Thinking... "):
|
||||
assistant_reply = chat_with_ai(
|
||||
@@ -107,7 +123,7 @@ class Agent:
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_post_planning():
|
||||
continue
|
||||
assistant_reply_json = plugin.post_planning(self, assistant_reply_json)
|
||||
assistant_reply_json = plugin.post_planning(assistant_reply_json)
|
||||
|
||||
# Print Assistant thoughts
|
||||
if assistant_reply_json != {}:
|
||||
@@ -121,35 +137,36 @@ class Agent:
|
||||
if cfg.speak_mode:
|
||||
say_text(f"I want to execute {command_name}")
|
||||
|
||||
send_chat_message_to_user("Thinking... \n")
|
||||
arguments = self._resolve_pathlike_command_args(arguments)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error: \n", str(e))
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
assistant_reply_json,
|
||||
NEXT_ACTION_FILE_NAME,
|
||||
)
|
||||
|
||||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} "
|
||||
f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
)
|
||||
|
||||
if not cfg.continuous_mode and self.next_action_count == 0:
|
||||
# ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
|
||||
# Get key press: Prompt the user to press enter to continue or escape
|
||||
# to exit
|
||||
self.user_input = ""
|
||||
send_chat_message_to_user(
|
||||
"NEXT ACTION: \n " + f"COMMAND = {command_name} \n "
|
||||
f"ARGUMENTS = {arguments}"
|
||||
)
|
||||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} "
|
||||
f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
)
|
||||
print(
|
||||
"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 's' to run self-feedback commands"
|
||||
logger.info(
|
||||
"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 's' to run self-feedback commands, "
|
||||
"'n' to exit program, or enter feedback for "
|
||||
f"{self.ai_name}...",
|
||||
flush=True,
|
||||
f"{self.ai_name}..."
|
||||
)
|
||||
while True:
|
||||
console_input = ""
|
||||
if cfg.chat_messages_enabled:
|
||||
console_input = clean_input("Waiting for your response...")
|
||||
else:
|
||||
@@ -165,23 +182,20 @@ class Agent:
|
||||
Fore.GREEN,
|
||||
"",
|
||||
)
|
||||
|
||||
self_feedback_resp = self.get_self_feedback(self.full_message_history,
|
||||
assistant_reply_json, cfg.fast_llm_model
|
||||
thoughts = assistant_reply_json.get("thoughts", {})
|
||||
self_feedback_resp = self.get_self_feedback(
|
||||
thoughts, cfg.fast_llm_model
|
||||
)
|
||||
logger.typewriter_log(
|
||||
f"SELF FEEDBACK: {self_feedback_resp}",
|
||||
Fore.YELLOW,
|
||||
"",
|
||||
)
|
||||
if self_feedback_resp[0].lower().strip() == cfg.authorise_key:
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
else:
|
||||
user_input = self_feedback_resp
|
||||
command_name = "human_feedback"
|
||||
user_input = self_feedback_resp
|
||||
command_name = "self_feedback"
|
||||
break
|
||||
elif console_input.lower().strip() == "":
|
||||
print("Invalid input format.")
|
||||
logger.warn("Invalid input format.")
|
||||
continue
|
||||
elif console_input.lower().startswith(f"{cfg.authorise_key} -"):
|
||||
try:
|
||||
@@ -190,8 +204,8 @@ class Agent:
|
||||
)
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
except ValueError:
|
||||
print(
|
||||
f"Invalid input format. Please enter '{cfg.authorise_key} -N' where N is"
|
||||
logger.warn(
|
||||
"Invalid input format. Please enter 'y -n' where n is"
|
||||
" the number of continuous tasks."
|
||||
)
|
||||
continue
|
||||
@@ -202,6 +216,13 @@ class Agent:
|
||||
else:
|
||||
user_input = console_input
|
||||
command_name = "human_feedback"
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
user_input,
|
||||
USER_INPUT_FILE_NAME,
|
||||
)
|
||||
break
|
||||
|
||||
if user_input == "GENERATE NEXT COMMAND JSON":
|
||||
@@ -211,21 +232,12 @@ class Agent:
|
||||
"",
|
||||
)
|
||||
elif user_input == "EXIT":
|
||||
send_chat_message_to_user("Exiting...")
|
||||
print("Exiting...", flush=True)
|
||||
logger.info("Exiting...")
|
||||
break
|
||||
else:
|
||||
# Print command
|
||||
send_chat_message_to_user(
|
||||
"NEXT ACTION: \n " + f"COMMAND = {command_name} \n "
|
||||
f"ARGUMENTS = {arguments}"
|
||||
)
|
||||
|
||||
# Print authorized commands left value
|
||||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL}"
|
||||
f" ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
f"{Fore.CYAN}AUTHORISED COMMANDS LEFT: {Style.RESET_ALL}{self.next_action_count}"
|
||||
)
|
||||
|
||||
# Execute command
|
||||
@@ -235,6 +247,8 @@ class Agent:
|
||||
)
|
||||
elif command_name == "human_feedback":
|
||||
result = f"Human feedback: {user_input}"
|
||||
elif command_name == "self_feedback":
|
||||
result = f"Self feedback: {user_input}"
|
||||
else:
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_pre_command():
|
||||
@@ -250,6 +264,16 @@ class Agent:
|
||||
)
|
||||
result = f"Command {command_name} returned: " f"{command_result}"
|
||||
|
||||
result_tlength = count_string_tokens(
|
||||
str(command_result), cfg.fast_llm_model
|
||||
)
|
||||
memory_tlength = count_string_tokens(
|
||||
str(self.summary_memory), cfg.fast_llm_model
|
||||
)
|
||||
if result_tlength + memory_tlength + 600 > cfg.fast_token_limit:
|
||||
result = f"Failure: command {command_name} returned too much output. \
|
||||
Do not execute this command again with the same arguments."
|
||||
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_post_command():
|
||||
continue
|
||||
@@ -281,7 +305,7 @@ class Agent:
|
||||
)
|
||||
return command_args
|
||||
|
||||
def get_self_feedback(self, full_message_history, latest_response_json, llm_model: str) -> str:
|
||||
def get_self_feedback(self, thoughts: dict, llm_model: str) -> str:
|
||||
"""Generates a feedback response based on the provided thoughts dictionary.
|
||||
This method takes in a dictionary of thoughts containing keys such as 'reasoning',
|
||||
'plan', 'thoughts', and 'criticism'. It combines these elements into a single
|
||||
@@ -294,59 +318,13 @@ class Agent:
|
||||
str: A feedback response generated using the provided thoughts dictionary.
|
||||
"""
|
||||
ai_role = self.config.ai_role
|
||||
thoughts = latest_response_json.get("thoughts", {})
|
||||
command = latest_response_json.get("command", {})
|
||||
|
||||
|
||||
from autogpt.llm.token_counter import count_message_tokens
|
||||
import json
|
||||
|
||||
# Get ~2000 tokens from the full message history
|
||||
# !!WARNING: THIS IMPLEMENTATION IS BAD - CAUSES BUG SIMILAR TO THIS: https://github.com/Significant-Gravitas/Auto-GPT/pull/3619
|
||||
trimmed_message_history = []
|
||||
for i in range(len(full_message_history) - 1, -1, -1):
|
||||
message = full_message_history[i]
|
||||
# Skip all messages from the user
|
||||
if message["role"] == "user":
|
||||
continue
|
||||
# If the message is from the assistant, remove the "thoughts" dictionary from the content
|
||||
elif message["role"] == "assistant":
|
||||
try:
|
||||
content_dict = json.loads(message["content"])
|
||||
content_dict = content_dict.copy()
|
||||
if "thoughts" in content_dict:
|
||||
del content_dict["thoughts"]
|
||||
message["content"] = json.dumps(content_dict)
|
||||
except:
|
||||
pass
|
||||
trimmed_message_history.append(message)
|
||||
|
||||
|
||||
|
||||
|
||||
feedback_prompt = f"""Below is a message from an AI agent with the role: '{ai_role}'.
|
||||
Please review the provided Recent History, Agent's Plan, The Agent's proposed action and their Reasoning.
|
||||
|
||||
If the agent's command makes sense and the agent is on the right track, respond with the letter 'Y' followed by a space.
|
||||
If the provided information is not suitable for achieving the role's objectives or a red flag is raised, please clearly and concisely tell the agent about the issue and suggesting an alternative action.
|
||||
"""
|
||||
feedback_prompt = f"Below is a message from me, an AI Agent, assuming the role of {ai_role}. whilst keeping knowledge of my slight limitations as an AI Agent Please evaluate my thought process, reasoning, and plan, and provide a concise paragraph outlining potential improvements. Consider adding or removing ideas that do not align with my role and explaining why, prioritizing thoughts based on their significance, or simply refining my overall thought process."
|
||||
reasoning = thoughts.get("reasoning", "")
|
||||
plan = thoughts.get("plan", "")
|
||||
# thought = thoughts.get("thoughts", "")
|
||||
# criticism = thoughts.get("criticism", "")
|
||||
# feedback_thoughts = thought + reasoning + plan + criticism
|
||||
thought = thoughts.get("thoughts", "")
|
||||
feedback_thoughts = thought + reasoning + plan
|
||||
return create_chat_completion(
|
||||
[
|
||||
{"role": "system", "content": f""""You are AgentReviewerGPT.\n\nRespond with Y if the agent passes your review.\n\nBe wary of the following red flags in the agent's behaviour:
|
||||
- The agent is repeating itself.
|
||||
- The agent is stuck in a loop.
|
||||
- The agent is using '<text>' instead of the actual text.
|
||||
- The agent is using the wrong command for the situation.
|
||||
- The agent is executing a python file that does not exist (it should check if the file exists and read it's contents before executing it).
|
||||
|
||||
Notes:
|
||||
+ Hardcoded paths are okay""" },
|
||||
{"role": "user", "content": f"{feedback_prompt}\n\nRecent History:\n{trimmed_message_history}\n\n\n\n\Agent's Plan:\n{plan}\n\nAgent's Proposed Action:\n{command}\n\nAgent's Reasoning:\n{reasoning}" }
|
||||
],
|
||||
[{"role": "user", "content": feedback_prompt + feedback_thoughts}],
|
||||
llm_model,
|
||||
)
|
||||
|
||||
@@ -6,6 +6,7 @@ from autogpt.agent.agent_manager import AgentManager
|
||||
from autogpt.commands.command import CommandRegistry, command
|
||||
from autogpt.commands.web_requests import scrape_links, scrape_text
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.processing.text import summarize_text
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
@@ -119,8 +120,6 @@ def execute_command(
|
||||
# TODO: Change these to take in a file rather than pasted code, if
|
||||
# non-file is given, return instructions "Input should be a python
|
||||
# filepath, write your code to file and try again
|
||||
elif command_name == "task_complete":
|
||||
shutdown()
|
||||
else:
|
||||
for command in prompt.commands:
|
||||
if (
|
||||
@@ -170,12 +169,6 @@ def get_hyperlinks(url: str) -> Union[str, List[str]]:
|
||||
return scrape_links(url)
|
||||
|
||||
|
||||
def shutdown() -> NoReturn:
|
||||
"""Shut down the program"""
|
||||
print("Shutting down...")
|
||||
quit()
|
||||
|
||||
|
||||
@command(
|
||||
"start_agent",
|
||||
"Start GPT Agent",
|
||||
|
||||
@@ -8,6 +8,7 @@ from docker.errors import ImageNotFound
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
CFG = Config()
|
||||
|
||||
@@ -22,7 +23,7 @@ def execute_python_file(filename: str) -> str:
|
||||
Returns:
|
||||
str: The output of the file
|
||||
"""
|
||||
print(f"Executing file '{filename}'")
|
||||
logger.info(f"Executing file '{filename}'")
|
||||
|
||||
if not filename.endswith(".py"):
|
||||
return "Error: Invalid file type. Only .py files are allowed."
|
||||
@@ -47,9 +48,11 @@ def execute_python_file(filename: str) -> str:
|
||||
image_name = "python:3-alpine"
|
||||
try:
|
||||
client.images.get(image_name)
|
||||
print(f"Image '{image_name}' found locally")
|
||||
logger.warn(f"Image '{image_name}' found locally")
|
||||
except ImageNotFound:
|
||||
print(f"Image '{image_name}' not found locally, pulling from Docker Hub")
|
||||
logger.info(
|
||||
f"Image '{image_name}' not found locally, pulling from Docker Hub"
|
||||
)
|
||||
# Use the low-level API to stream the pull response
|
||||
low_level_client = docker.APIClient()
|
||||
for line in low_level_client.pull(image_name, stream=True, decode=True):
|
||||
@@ -57,9 +60,9 @@ def execute_python_file(filename: str) -> str:
|
||||
status = line.get("status")
|
||||
progress = line.get("progress")
|
||||
if status and progress:
|
||||
print(f"{status}: {progress}")
|
||||
logger.info(f"{status}: {progress}")
|
||||
elif status:
|
||||
print(status)
|
||||
logger.info(status)
|
||||
container = client.containers.run(
|
||||
image_name,
|
||||
f"python {Path(filename).relative_to(CFG.workspace_path)}",
|
||||
@@ -85,7 +88,7 @@ def execute_python_file(filename: str) -> str:
|
||||
return logs
|
||||
|
||||
except docker.errors.DockerException as e:
|
||||
print(
|
||||
logger.warn(
|
||||
"Could not run the script in a container. If you haven't already, please install Docker https://docs.docker.com/get-docker/"
|
||||
)
|
||||
return f"Error: {str(e)}"
|
||||
@@ -118,7 +121,9 @@ def execute_shell(command_line: str) -> str:
|
||||
if not current_dir.is_relative_to(CFG.workspace_path):
|
||||
os.chdir(CFG.workspace_path)
|
||||
|
||||
print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'")
|
||||
logger.info(
|
||||
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
|
||||
)
|
||||
|
||||
result = subprocess.run(command_line, capture_output=True, shell=True)
|
||||
output = f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}"
|
||||
@@ -154,7 +159,9 @@ def execute_shell_popen(command_line) -> str:
|
||||
if CFG.workspace_path not in current_dir:
|
||||
os.chdir(CFG.workspace_path)
|
||||
|
||||
print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'")
|
||||
logger.info(
|
||||
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
|
||||
)
|
||||
|
||||
do_not_show_output = subprocess.DEVNULL
|
||||
process = subprocess.Popen(
|
||||
|
||||
@@ -1,46 +1,114 @@
|
||||
"""File operations for AutoGPT"""
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import os
|
||||
import os.path
|
||||
from typing import Generator
|
||||
from typing import Dict, Generator, Literal, Tuple
|
||||
|
||||
import charset_normalizer
|
||||
import requests
|
||||
from colorama import Back, Fore
|
||||
from requests.adapters import HTTPAdapter, Retry
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.spinner import Spinner
|
||||
from autogpt.utils import readable_file_size
|
||||
|
||||
CFG = Config()
|
||||
|
||||
Operation = Literal["write", "append", "delete"]
|
||||
|
||||
def check_duplicate_operation(operation: str, filename: str) -> bool:
|
||||
"""Check if the operation has already been performed on the given file
|
||||
|
||||
Args:
|
||||
operation (str): The operation to check for
|
||||
filename (str): The name of the file to check for
|
||||
def text_checksum(text: str) -> str:
|
||||
"""Get the hex checksum for the given text."""
|
||||
return hashlib.md5(text.encode("utf-8")).hexdigest()
|
||||
|
||||
|
||||
def operations_from_log(log_path: str) -> Generator[Tuple[Operation, str, str | None]]:
|
||||
"""Parse the file operations log and return a tuple containing the log entries"""
|
||||
try:
|
||||
log = open(log_path, "r", encoding="utf-8")
|
||||
except FileNotFoundError:
|
||||
return
|
||||
|
||||
for line in log:
|
||||
line = line.replace("File Operation Logger", "").strip()
|
||||
if not line:
|
||||
continue
|
||||
operation, tail = line.split(": ", maxsplit=1)
|
||||
operation = operation.strip()
|
||||
if operation in ("write", "append"):
|
||||
try:
|
||||
path, checksum = (x.strip() for x in tail.rsplit(" #", maxsplit=1))
|
||||
except ValueError:
|
||||
path, checksum = tail.strip(), None
|
||||
yield (operation, path, checksum)
|
||||
elif operation == "delete":
|
||||
yield (operation, tail.strip(), None)
|
||||
|
||||
log.close()
|
||||
|
||||
|
||||
def file_operations_state(log_path: str) -> Dict:
|
||||
"""Iterates over the operations log and returns the expected state.
|
||||
|
||||
Parses a log file at CFG.file_logger_path to construct a dictionary that maps
|
||||
each file path written or appended to its checksum. Deleted files are removed
|
||||
from the dictionary.
|
||||
|
||||
Returns:
|
||||
bool: True if the operation has already been performed on the file
|
||||
A dictionary mapping file paths to their checksums.
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If CFG.file_logger_path is not found.
|
||||
ValueError: If the log file content is not in the expected format.
|
||||
"""
|
||||
log_content = read_file(CFG.file_logger_path)
|
||||
log_entry = f"{operation}: {filename}\n"
|
||||
return log_entry in log_content
|
||||
state = {}
|
||||
for operation, path, checksum in operations_from_log(log_path):
|
||||
if operation in ("write", "append"):
|
||||
state[path] = checksum
|
||||
elif operation == "delete":
|
||||
del state[path]
|
||||
return state
|
||||
|
||||
|
||||
def log_operation(operation: str, filename: str) -> None:
|
||||
def is_duplicate_operation(
|
||||
operation: Operation, filename: str, checksum: str | None = None
|
||||
) -> bool:
|
||||
"""Check if the operation has already been performed
|
||||
|
||||
Args:
|
||||
operation: The operation to check for
|
||||
filename: The name of the file to check for
|
||||
checksum: The checksum of the contents to be written
|
||||
|
||||
Returns:
|
||||
True if the operation has already been performed on the file
|
||||
"""
|
||||
state = file_operations_state(CFG.file_logger_path)
|
||||
if operation == "delete" and filename not in state:
|
||||
return True
|
||||
if operation == "write" and state.get(filename) == checksum:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def log_operation(operation: str, filename: str, checksum: str | None = None) -> None:
|
||||
"""Log the file operation to the file_logger.txt
|
||||
|
||||
Args:
|
||||
operation (str): The operation to log
|
||||
filename (str): The name of the file the operation was performed on
|
||||
operation: The operation to log
|
||||
filename: The name of the file the operation was performed on
|
||||
checksum: The checksum of the contents to be written
|
||||
"""
|
||||
log_entry = f"{operation}: {filename}\n"
|
||||
append_to_file(CFG.file_logger_path, log_entry, should_log=False)
|
||||
log_entry = f"{operation}: {filename}"
|
||||
if checksum is not None:
|
||||
log_entry += f" #{checksum}"
|
||||
logger.debug(f"Logging file operation: {log_entry}")
|
||||
append_to_file(CFG.file_logger_path, f"{log_entry}\n", should_log=False)
|
||||
|
||||
|
||||
def split_file(
|
||||
@@ -86,11 +154,12 @@ def read_file(filename: str) -> str:
|
||||
str: The contents of the file
|
||||
"""
|
||||
try:
|
||||
with open(filename, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
return content
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
charset_match = charset_normalizer.from_path(filename).best()
|
||||
encoding = charset_match.encoding
|
||||
logger.debug(f"Read file '{filename}' with encoding '{encoding}'")
|
||||
return str(charset_match)
|
||||
except Exception as err:
|
||||
return f"Error: {err}"
|
||||
|
||||
|
||||
def ingest_file(
|
||||
@@ -106,25 +175,25 @@ def ingest_file(
|
||||
:param overlap: The number of overlapping characters between chunks, default is 200
|
||||
"""
|
||||
try:
|
||||
print(f"Working with file {filename}")
|
||||
logger.info(f"Working with file {filename}")
|
||||
content = read_file(filename)
|
||||
content_length = len(content)
|
||||
print(f"File length: {content_length} characters")
|
||||
logger.info(f"File length: {content_length} characters")
|
||||
|
||||
chunks = list(split_file(content, max_length=max_length, overlap=overlap))
|
||||
|
||||
num_chunks = len(chunks)
|
||||
for i, chunk in enumerate(chunks):
|
||||
print(f"Ingesting chunk {i + 1} / {num_chunks} into memory")
|
||||
logger.info(f"Ingesting chunk {i + 1} / {num_chunks} into memory")
|
||||
memory_to_add = (
|
||||
f"Filename: {filename}\n" f"Content part#{i + 1}/{num_chunks}: {chunk}"
|
||||
)
|
||||
|
||||
memory.add(memory_to_add)
|
||||
|
||||
print(f"Done ingesting {num_chunks} chunks from {filename}.")
|
||||
except Exception as e:
|
||||
print(f"Error while ingesting file '{filename}': {str(e)}")
|
||||
logger.info(f"Done ingesting {num_chunks} chunks from {filename}.")
|
||||
except Exception as err:
|
||||
logger.info(f"Error while ingesting file '{filename}': {err}")
|
||||
|
||||
|
||||
@command("write_to_file", "Write to file", '"filename": "<filename>", "text": "<text>"')
|
||||
@@ -138,17 +207,18 @@ def write_to_file(filename: str, text: str) -> str:
|
||||
Returns:
|
||||
str: A message indicating success or failure
|
||||
"""
|
||||
if check_duplicate_operation("write", filename):
|
||||
checksum = text_checksum(text)
|
||||
if is_duplicate_operation("write", filename, checksum):
|
||||
return "Error: File has already been updated."
|
||||
try:
|
||||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
with open(filename, "w", encoding="utf-8") as f:
|
||||
f.write(text)
|
||||
log_operation("write", filename)
|
||||
log_operation("write", filename, checksum)
|
||||
return "File written to successfully."
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
except Exception as err:
|
||||
return f"Error: {err}"
|
||||
|
||||
|
||||
@command(
|
||||
@@ -168,15 +238,17 @@ def append_to_file(filename: str, text: str, should_log: bool = True) -> str:
|
||||
try:
|
||||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
with open(filename, "a") as f:
|
||||
with open(filename, "a", encoding="utf-8") as f:
|
||||
f.write(text)
|
||||
|
||||
if should_log:
|
||||
log_operation("append", filename)
|
||||
with open(filename, "r", encoding="utf-8") as f:
|
||||
checksum = text_checksum(f.read())
|
||||
log_operation("append", filename, checksum=checksum)
|
||||
|
||||
return "Text appended successfully."
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
except Exception as err:
|
||||
return f"Error: {err}"
|
||||
|
||||
|
||||
@command("delete_file", "Delete file", '"filename": "<filename>"')
|
||||
@@ -189,19 +261,19 @@ def delete_file(filename: str) -> str:
|
||||
Returns:
|
||||
str: A message indicating success or failure
|
||||
"""
|
||||
if check_duplicate_operation("delete", filename):
|
||||
if is_duplicate_operation("delete", filename):
|
||||
return "Error: File has already been deleted."
|
||||
try:
|
||||
os.remove(filename)
|
||||
log_operation("delete", filename)
|
||||
return "File deleted successfully."
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
except Exception as err:
|
||||
return f"Error: {err}"
|
||||
|
||||
|
||||
@command("search_files", "Search Files", '"directory": "<directory>"')
|
||||
def search_files(directory: str) -> list[str]:
|
||||
"""Search for files in a directory
|
||||
@command("list_files", "List Files in Directory", '"directory": "<directory>"')
|
||||
def list_files(directory: str) -> list[str]:
|
||||
"""lists files in a directory recursively
|
||||
|
||||
Args:
|
||||
directory (str): The directory to search in
|
||||
@@ -264,8 +336,8 @@ def download_file(url, filename):
|
||||
progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}"
|
||||
spinner.update_message(f"{message} {progress}")
|
||||
|
||||
return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(total_size)})'
|
||||
except requests.HTTPError as e:
|
||||
return f"Got an HTTP Error whilst trying to download file: {e}"
|
||||
except Exception as e:
|
||||
return "Error: " + str(e)
|
||||
return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(downloaded_size)})'
|
||||
except requests.HTTPError as err:
|
||||
return f"Got an HTTP Error whilst trying to download file: {err}"
|
||||
except Exception as err:
|
||||
return f"Error: {err}"
|
||||
|
||||
@@ -11,25 +11,25 @@ CFG = Config()
|
||||
@command(
|
||||
"clone_repository",
|
||||
"Clone Repository",
|
||||
'"repository_url": "<repository_url>", "clone_path": "<clone_path>"',
|
||||
'"url": "<repository_url>", "clone_path": "<clone_path>"',
|
||||
CFG.github_username and CFG.github_api_key,
|
||||
"Configure github_username and github_api_key.",
|
||||
)
|
||||
@validate_url
|
||||
def clone_repository(repository_url: str, clone_path: str) -> str:
|
||||
def clone_repository(url: str, clone_path: str) -> str:
|
||||
"""Clone a GitHub repository locally.
|
||||
|
||||
Args:
|
||||
repository_url (str): The URL of the repository to clone.
|
||||
url (str): The URL of the repository to clone.
|
||||
clone_path (str): The path to clone the repository to.
|
||||
|
||||
Returns:
|
||||
str: The result of the clone operation.
|
||||
"""
|
||||
split_url = repository_url.split("//")
|
||||
split_url = url.split("//")
|
||||
auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url)
|
||||
try:
|
||||
Repo.clone_from(auth_repo_url, clone_path)
|
||||
return f"""Cloned {repository_url} to {clone_path}"""
|
||||
Repo.clone_from(url=auth_repo_url, to_path=clone_path)
|
||||
return f"""Cloned {url} to {clone_path}"""
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
@@ -9,6 +9,7 @@ from PIL import Image
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
CFG = Config()
|
||||
|
||||
@@ -69,7 +70,7 @@ def generate_image_with_hf(prompt: str, filename: str) -> str:
|
||||
)
|
||||
|
||||
image = Image.open(io.BytesIO(response.content))
|
||||
print(f"Image Generated for prompt:{prompt}")
|
||||
logger.info(f"Image Generated for prompt:{prompt}")
|
||||
|
||||
image.save(filename)
|
||||
|
||||
@@ -91,7 +92,7 @@ def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str:
|
||||
# Check for supported image sizes
|
||||
if size not in [256, 512, 1024]:
|
||||
closest = min([256, 512, 1024], key=lambda x: abs(x - size))
|
||||
print(
|
||||
logger.info(
|
||||
f"DALL-E only supports image sizes of 256x256, 512x512, or 1024x1024. Setting to {closest}, was {size}."
|
||||
)
|
||||
size = closest
|
||||
@@ -104,7 +105,7 @@ def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str:
|
||||
api_key=CFG.openai_api_key,
|
||||
)
|
||||
|
||||
print(f"Image Generated for prompt:{prompt}")
|
||||
logger.info(f"Image Generated for prompt:{prompt}")
|
||||
|
||||
image_data = b64decode(response["data"][0]["b64_json"])
|
||||
|
||||
@@ -153,7 +154,7 @@ def generate_image_with_sd_webui(
|
||||
},
|
||||
)
|
||||
|
||||
print(f"Image Generated for prompt:{prompt}")
|
||||
logger.info(f"Image Generated for prompt:{prompt}")
|
||||
|
||||
# Save the image to disk
|
||||
response = response.json()
|
||||
|
||||
26
autogpt/commands/task_statuses.py
Normal file
26
autogpt/commands/task_statuses.py
Normal file
@@ -0,0 +1,26 @@
|
||||
"""Task Statuses module."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import NoReturn
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.logs import logger
|
||||
|
||||
|
||||
@command(
|
||||
"task_complete",
|
||||
"Task Complete (Shutdown)",
|
||||
'"reason": "<reason>"',
|
||||
)
|
||||
def task_complete(reason: str) -> NoReturn:
|
||||
"""
|
||||
A function that takes in a string and exits the program
|
||||
|
||||
Parameters:
|
||||
reason (str): The reason for shutting down.
|
||||
Returns:
|
||||
A result string from create chat completion. A list of suggestions to
|
||||
improve the code.
|
||||
"""
|
||||
logger.info(title="Shutting down...\n", message=reason)
|
||||
quit()
|
||||
@@ -1,10 +1,12 @@
|
||||
"""Web scraping commands using Playwright"""
|
||||
from __future__ import annotations
|
||||
|
||||
from autogpt.logs import logger
|
||||
|
||||
try:
|
||||
from playwright.sync_api import sync_playwright
|
||||
except ImportError:
|
||||
print(
|
||||
logger.info(
|
||||
"Playwright not installed. Please install it with 'pip install playwright' to use."
|
||||
)
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
@@ -10,6 +10,7 @@ from selenium import webdriver
|
||||
from selenium.common.exceptions import WebDriverException
|
||||
from selenium.webdriver.chrome.options import Options as ChromeOptions
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.edge.options import Options as EdgeOptions
|
||||
from selenium.webdriver.firefox.options import Options as FirefoxOptions
|
||||
from selenium.webdriver.remote.webdriver import WebDriver
|
||||
from selenium.webdriver.safari.options import Options as SafariOptions
|
||||
@@ -17,6 +18,7 @@ from selenium.webdriver.support import expected_conditions as EC
|
||||
from selenium.webdriver.support.wait import WebDriverWait
|
||||
from webdriver_manager.chrome import ChromeDriverManager
|
||||
from webdriver_manager.firefox import GeckoDriverManager
|
||||
from webdriver_manager.microsoft import EdgeChromiumDriverManager
|
||||
|
||||
import autogpt.processing.text as summary
|
||||
from autogpt.commands.command import command
|
||||
@@ -34,7 +36,7 @@ CFG = Config()
|
||||
'"url": "<url>", "question": "<what_you_want_to_find_on_website>"',
|
||||
)
|
||||
@validate_url
|
||||
def browse_website(url: str, question: str) -> tuple[str, WebDriver]:
|
||||
def browse_website(url: str, question: str) -> str:
|
||||
"""Browse a website and return the answer and links to the user
|
||||
|
||||
Args:
|
||||
@@ -50,7 +52,7 @@ def browse_website(url: str, question: str) -> tuple[str, WebDriver]:
|
||||
# These errors are often quite long and include lots of context.
|
||||
# Just grab the first line.
|
||||
msg = e.msg.split("\n")[0]
|
||||
return f"Error: {msg}", None
|
||||
return f"Error: {msg}"
|
||||
|
||||
add_header(driver)
|
||||
summary_text = summary.summarize_text(url, text, question, driver)
|
||||
@@ -60,7 +62,7 @@ def browse_website(url: str, question: str) -> tuple[str, WebDriver]:
|
||||
if len(links) > 5:
|
||||
links = links[:5]
|
||||
close_browser(driver)
|
||||
return f"Answer gathered from website: {summary_text} \n \n Links: {links}", driver
|
||||
return f"Answer gathered from website: {summary_text} \n \n Links: {links}"
|
||||
|
||||
|
||||
def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
|
||||
@@ -78,6 +80,7 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
|
||||
"chrome": ChromeOptions,
|
||||
"safari": SafariOptions,
|
||||
"firefox": FirefoxOptions,
|
||||
"edge": EdgeOptions,
|
||||
}
|
||||
|
||||
options = options_available[CFG.selenium_web_browser]()
|
||||
@@ -96,6 +99,10 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
|
||||
# Requires a bit more setup on the users end
|
||||
# See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari
|
||||
driver = webdriver.Safari(options=options)
|
||||
elif CFG.selenium_web_browser == "edge":
|
||||
driver = webdriver.Edge(
|
||||
executable_path=EdgeChromiumDriverManager().install(), options=options
|
||||
)
|
||||
else:
|
||||
if platform == "linux" or platform == "linux2":
|
||||
options.add_argument("--disable-dev-shm-usage")
|
||||
@@ -175,4 +182,9 @@ def add_header(driver: WebDriver) -> None:
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
driver.execute_script(open(f"{FILE_DIR}/js/overlay.js", "r").read())
|
||||
try:
|
||||
with open(f"{FILE_DIR}/js/overlay.js", "r") as overlay_file:
|
||||
overlay_script = overlay_file.read()
|
||||
driver.execute_script(overlay_script)
|
||||
except Exception as e:
|
||||
print(f"Error executing overlay.js: {e}")
|
||||
|
||||
@@ -7,7 +7,7 @@ from __future__ import annotations
|
||||
import os
|
||||
import platform
|
||||
from pathlib import Path
|
||||
from typing import Optional, Type
|
||||
from typing import Any, Optional, Type
|
||||
|
||||
import distro
|
||||
import yaml
|
||||
@@ -79,7 +79,12 @@ class AIConfig:
|
||||
|
||||
ai_name = config_params.get("ai_name", "")
|
||||
ai_role = config_params.get("ai_role", "")
|
||||
ai_goals = config_params.get("ai_goals", [])
|
||||
ai_goals = [
|
||||
str(goal).strip("{}").replace("'", "").replace('"', "")
|
||||
if isinstance(goal, dict)
|
||||
else str(goal)
|
||||
for goal in config_params.get("ai_goals", [])
|
||||
]
|
||||
api_budget = config_params.get("api_budget", 0.0)
|
||||
# type: Type[AIConfig]
|
||||
return AIConfig(ai_name, ai_role, ai_goals, api_budget)
|
||||
|
||||
@@ -30,11 +30,21 @@ class Config(metaclass=Singleton):
|
||||
|
||||
self.authorise_key = os.getenv("AUTHORISE_COMMAND_KEY", "y")
|
||||
self.exit_key = os.getenv("EXIT_KEY", "n")
|
||||
|
||||
disabled_command_categories = os.getenv("DISABLED_COMMAND_CATEGORIES")
|
||||
if disabled_command_categories:
|
||||
self.disabled_command_categories = disabled_command_categories.split(",")
|
||||
else:
|
||||
self.disabled_command_categories = []
|
||||
|
||||
self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml")
|
||||
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
|
||||
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
|
||||
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
|
||||
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
|
||||
self.embedding_model = os.getenv("EMBEDDING_MODEL", "text-embedding-ada-002")
|
||||
self.embedding_tokenizer = os.getenv("EMBEDDING_TOKENIZER", "cl100k_base")
|
||||
self.embedding_token_limit = int(os.getenv("EMBEDDING_TOKEN_LIMIT", 8191))
|
||||
self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 3000))
|
||||
self.browse_spacy_language_model = os.getenv(
|
||||
"BROWSE_SPACY_LANGUAGE_MODEL", "en_core_web_sm"
|
||||
@@ -139,7 +149,12 @@ class Config(metaclass=Singleton):
|
||||
self.plugins_allowlist = plugins_allowlist.split(",")
|
||||
else:
|
||||
self.plugins_allowlist = []
|
||||
self.plugins_denylist = []
|
||||
|
||||
plugins_denylist = os.getenv("DENYLISTED_PLUGINS")
|
||||
if plugins_denylist:
|
||||
self.plugins_denylist = plugins_denylist.split(",")
|
||||
else:
|
||||
self.plugins_denylist = []
|
||||
|
||||
def get_azure_deployment_id_for_model(self, model: str) -> str:
|
||||
"""
|
||||
@@ -216,6 +231,18 @@ class Config(metaclass=Singleton):
|
||||
"""Set the smart token limit value."""
|
||||
self.smart_token_limit = value
|
||||
|
||||
def set_embedding_model(self, value: str) -> None:
|
||||
"""Set the model to use for creating embeddings."""
|
||||
self.embedding_model = value
|
||||
|
||||
def set_embedding_tokenizer(self, value: str) -> None:
|
||||
"""Set the tokenizer to use when creating embeddings."""
|
||||
self.embedding_tokenizer = value
|
||||
|
||||
def set_embedding_token_limit(self, value: int) -> None:
|
||||
"""Set the token limit for creating embeddings."""
|
||||
self.embedding_token_limit = value
|
||||
|
||||
def set_browse_chunk_max_length(self, value: int) -> None:
|
||||
"""Set the browse_website command chunk max length value."""
|
||||
self.browse_chunk_max_length = value
|
||||
|
||||
@@ -9,6 +9,7 @@ from typing import Optional
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_utils.utilities import extract_char_position
|
||||
from autogpt.logs import logger
|
||||
|
||||
CFG = Config()
|
||||
|
||||
@@ -33,8 +34,7 @@ def fix_invalid_escape(json_to_load: str, error_message: str) -> str:
|
||||
json.loads(json_to_load)
|
||||
return json_to_load
|
||||
except json.JSONDecodeError as e:
|
||||
if CFG.debug_mode:
|
||||
print("json loads error - fix invalid escape", e)
|
||||
logger.debug("json loads error - fix invalid escape", e)
|
||||
error_message = str(e)
|
||||
return json_to_load
|
||||
|
||||
@@ -98,13 +98,11 @@ def correct_json(json_to_load: str) -> str:
|
||||
"""
|
||||
|
||||
try:
|
||||
if CFG.debug_mode:
|
||||
print("json", json_to_load)
|
||||
logger.debug("json", json_to_load)
|
||||
json.loads(json_to_load)
|
||||
return json_to_load
|
||||
except json.JSONDecodeError as e:
|
||||
if CFG.debug_mode:
|
||||
print("json loads error", e)
|
||||
logger.debug("json loads error", e)
|
||||
error_message = str(e)
|
||||
if error_message.startswith("Invalid \\escape"):
|
||||
json_to_load = fix_invalid_escape(json_to_load, error_message)
|
||||
@@ -116,8 +114,7 @@ def correct_json(json_to_load: str) -> str:
|
||||
json.loads(json_to_load)
|
||||
return json_to_load
|
||||
except json.JSONDecodeError as e:
|
||||
if CFG.debug_mode:
|
||||
print("json loads error - add quotes", e)
|
||||
logger.debug("json loads error - add quotes", e)
|
||||
error_message = str(e)
|
||||
if balanced_str := balance_braces(json_to_load):
|
||||
return balanced_str
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"""Utilities for the json_fixes package."""
|
||||
import json
|
||||
import os.path
|
||||
import re
|
||||
|
||||
from jsonschema import Draft7Validator
|
||||
@@ -35,7 +36,8 @@ def validate_json(json_object: object, schema_name: str) -> dict | None:
|
||||
:param schema_name: str
|
||||
:type json_object: object
|
||||
"""
|
||||
with open(f"autogpt/json_utils/{schema_name}.json", "r") as f:
|
||||
scheme_file = os.path.join(os.path.dirname(__file__), f"{schema_name}.json")
|
||||
with open(scheme_file, "r") as f:
|
||||
schema = json.load(f)
|
||||
validator = Draft7Validator(schema)
|
||||
|
||||
@@ -49,9 +51,8 @@ def validate_json(json_object: object, schema_name: str) -> dict | None:
|
||||
|
||||
for error in errors:
|
||||
logger.error(f"Error: {error.message}")
|
||||
return None
|
||||
if CFG.debug_mode:
|
||||
print("The JSON object is valid.")
|
||||
else:
|
||||
logger.debug("The JSON object is valid.")
|
||||
|
||||
return json_object
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ from autogpt.llm.base import (
|
||||
from autogpt.llm.chat import chat_with_ai, create_chat_message, generate_context
|
||||
from autogpt.llm.llm_utils import (
|
||||
call_ai_function,
|
||||
chunked_tokens,
|
||||
create_chat_completion,
|
||||
get_ada_embedding,
|
||||
)
|
||||
@@ -32,6 +33,7 @@ __all__ = [
|
||||
"call_ai_function",
|
||||
"create_chat_completion",
|
||||
"get_ada_embedding",
|
||||
"chunked_tokens",
|
||||
"COSTS",
|
||||
"count_message_tokens",
|
||||
"count_string_tokens",
|
||||
|
||||
@@ -59,10 +59,11 @@ class ApiManager(metaclass=Singleton):
|
||||
max_tokens=max_tokens,
|
||||
api_key=cfg.openai_api_key,
|
||||
)
|
||||
logger.debug(f"Response: {response}")
|
||||
prompt_tokens = response.usage.prompt_tokens
|
||||
completion_tokens = response.usage.completion_tokens
|
||||
self.update_cost(prompt_tokens, completion_tokens, model)
|
||||
if not hasattr(response, "error"):
|
||||
logger.debug(f"Response: {response}")
|
||||
prompt_tokens = response.usage.prompt_tokens
|
||||
completion_tokens = response.usage.completion_tokens
|
||||
self.update_cost(prompt_tokens, completion_tokens, model)
|
||||
return response
|
||||
|
||||
def update_cost(self, prompt_tokens, completion_tokens, model):
|
||||
|
||||
@@ -8,14 +8,8 @@ from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.llm.base import Message
|
||||
from autogpt.llm.llm_utils import create_chat_completion
|
||||
from autogpt.llm.token_counter import count_message_tokens
|
||||
from autogpt.log_cycle.log_cycle import CURRENT_CONTEXT_FILE_NAME
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory_management.store_memory import (
|
||||
save_memory_trimmed_from_context_window,
|
||||
)
|
||||
from autogpt.memory_management.summary_memory import (
|
||||
get_newly_trimmed_messages,
|
||||
update_running_summary,
|
||||
)
|
||||
|
||||
cfg = Config()
|
||||
|
||||
@@ -152,6 +146,10 @@ def chat_with_ai(
|
||||
|
||||
# Move to the next most recent message in the full message history
|
||||
next_message_to_add_index -= 1
|
||||
from autogpt.memory_management.summary_memory import (
|
||||
get_newly_trimmed_messages,
|
||||
update_running_summary,
|
||||
)
|
||||
|
||||
# Insert Memories
|
||||
if len(full_message_history) > 0:
|
||||
@@ -163,7 +161,9 @@ def chat_with_ai(
|
||||
current_context=current_context,
|
||||
last_memory_index=agent.last_memory_index,
|
||||
)
|
||||
|
||||
agent.summary_memory = update_running_summary(
|
||||
agent,
|
||||
current_memory=agent.summary_memory,
|
||||
new_events=newly_trimmed_messages,
|
||||
)
|
||||
@@ -200,7 +200,7 @@ def chat_with_ai(
|
||||
if not plugin.can_handle_on_planning():
|
||||
continue
|
||||
plugin_response = plugin.on_planning(
|
||||
agent.prompt_generator, current_context
|
||||
agent.config.prompt_generator, current_context
|
||||
)
|
||||
if not plugin_response or plugin_response == "":
|
||||
continue
|
||||
@@ -208,9 +208,8 @@ def chat_with_ai(
|
||||
[create_chat_message("system", plugin_response)], model
|
||||
)
|
||||
if current_tokens_used + tokens_to_add > send_token_limit:
|
||||
if cfg.debug_mode:
|
||||
print("Plugin response too long, skipping:", plugin_response)
|
||||
print("Plugins remaining at stop:", plugin_count - i)
|
||||
logger.debug("Plugin response too long, skipping:", plugin_response)
|
||||
logger.debug("Plugins remaining at stop:", plugin_count - i)
|
||||
break
|
||||
current_context.append(create_chat_message("system", plugin_response))
|
||||
|
||||
@@ -232,6 +231,13 @@ def chat_with_ai(
|
||||
logger.debug(f"{message['role'].capitalize()}: {message['content']}")
|
||||
logger.debug("")
|
||||
logger.debug("----------- END OF CONTEXT ----------------")
|
||||
agent.log_cycle_handler.log_cycle(
|
||||
agent.config.ai_name,
|
||||
agent.created_at,
|
||||
agent.cycle_count,
|
||||
current_context,
|
||||
CURRENT_CONTEXT_FILE_NAME,
|
||||
)
|
||||
|
||||
# TODO: use a model defined elsewhere, so that model can contain
|
||||
# temperature and other settings we care about
|
||||
@@ -250,5 +256,5 @@ def chat_with_ai(
|
||||
return assistant_reply
|
||||
except RateLimitError:
|
||||
# TODO: When we switch to langchain, this is built in
|
||||
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
|
||||
logger.warn("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
|
||||
time.sleep(10)
|
||||
|
||||
@@ -2,9 +2,12 @@ from __future__ import annotations
|
||||
|
||||
import functools
|
||||
import time
|
||||
from itertools import islice
|
||||
from typing import List, Optional
|
||||
|
||||
import numpy as np
|
||||
import openai
|
||||
import tiktoken
|
||||
from colorama import Fore, Style
|
||||
from openai.error import APIError, RateLimitError, Timeout
|
||||
|
||||
@@ -30,7 +33,7 @@ def retry_openai_api(
|
||||
api_key_error_msg = (
|
||||
f"Please double check that you have setup a "
|
||||
f"{Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. You can "
|
||||
f"read more here: {Fore.CYAN}https://significant-gravitas.github.io/Auto-GPT/setup/#getting-an-api-key{Fore.RESET}"
|
||||
f"read more here: {Fore.CYAN}https://docs.agpt.co/setup/#getting-an-api-key{Fore.RESET}"
|
||||
)
|
||||
backoff_msg = (
|
||||
f"{Fore.RED}Error: API Bad gateway. Waiting {{backoff}} seconds...{Fore.RESET}"
|
||||
@@ -128,10 +131,9 @@ def create_chat_completion(
|
||||
|
||||
num_retries = 10
|
||||
warned_user = False
|
||||
if cfg.debug_mode:
|
||||
print(
|
||||
f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}"
|
||||
)
|
||||
logger.debug(
|
||||
f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}"
|
||||
)
|
||||
for plugin in cfg.plugins:
|
||||
if plugin.can_handle_chat_completion(
|
||||
messages=messages,
|
||||
@@ -169,14 +171,13 @@ def create_chat_completion(
|
||||
)
|
||||
break
|
||||
except RateLimitError:
|
||||
if cfg.debug_mode:
|
||||
print(
|
||||
f"{Fore.RED}Error: ", f"Reached rate limit, passing...{Fore.RESET}"
|
||||
)
|
||||
logger.debug(
|
||||
f"{Fore.RED}Error: ", f"Reached rate limit, passing...{Fore.RESET}"
|
||||
)
|
||||
if not warned_user:
|
||||
logger.double_check(
|
||||
f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. "
|
||||
+ f"You can read more here: {Fore.CYAN}https://significant-gravitas.github.io/Auto-GPT/setup/#getting-an-api-key{Fore.RESET}"
|
||||
+ f"You can read more here: {Fore.CYAN}https://docs.agpt.co/setup/#getting-an-api-key{Fore.RESET}"
|
||||
)
|
||||
warned_user = True
|
||||
except (APIError, Timeout) as e:
|
||||
@@ -184,11 +185,10 @@ def create_chat_completion(
|
||||
raise
|
||||
if attempt == num_retries - 1:
|
||||
raise
|
||||
if cfg.debug_mode:
|
||||
print(
|
||||
f"{Fore.RED}Error: ",
|
||||
f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}",
|
||||
)
|
||||
logger.debug(
|
||||
f"{Fore.RED}Error: ",
|
||||
f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}",
|
||||
)
|
||||
time.sleep(backoff)
|
||||
if response is None:
|
||||
logger.typewriter_log(
|
||||
@@ -210,6 +210,23 @@ def create_chat_completion(
|
||||
return resp
|
||||
|
||||
|
||||
def batched(iterable, n):
|
||||
"""Batch data into tuples of length n. The last batch may be shorter."""
|
||||
# batched('ABCDEFG', 3) --> ABC DEF G
|
||||
if n < 1:
|
||||
raise ValueError("n must be at least one")
|
||||
it = iter(iterable)
|
||||
while batch := tuple(islice(it, n)):
|
||||
yield batch
|
||||
|
||||
|
||||
def chunked_tokens(text, tokenizer_name, chunk_length):
|
||||
tokenizer = tiktoken.get_encoding(tokenizer_name)
|
||||
tokens = tokenizer.encode(text)
|
||||
chunks_iterator = batched(tokens, chunk_length)
|
||||
yield from chunks_iterator
|
||||
|
||||
|
||||
def get_ada_embedding(text: str) -> List[float]:
|
||||
"""Get an embedding from the ada model.
|
||||
|
||||
@@ -220,7 +237,7 @@ def get_ada_embedding(text: str) -> List[float]:
|
||||
List[float]: The embedding.
|
||||
"""
|
||||
cfg = Config()
|
||||
model = "text-embedding-ada-002"
|
||||
model = cfg.embedding_model
|
||||
text = text.replace("\n", " ")
|
||||
|
||||
if cfg.use_azure:
|
||||
@@ -229,13 +246,7 @@ def get_ada_embedding(text: str) -> List[float]:
|
||||
kwargs = {"model": model}
|
||||
|
||||
embedding = create_embedding(text, **kwargs)
|
||||
api_manager = ApiManager()
|
||||
api_manager.update_cost(
|
||||
prompt_tokens=embedding.usage.prompt_tokens,
|
||||
completion_tokens=0,
|
||||
model=model,
|
||||
)
|
||||
return embedding["data"][0]["embedding"]
|
||||
return embedding
|
||||
|
||||
|
||||
@retry_openai_api()
|
||||
@@ -254,8 +265,31 @@ def create_embedding(
|
||||
openai.Embedding: The embedding object.
|
||||
"""
|
||||
cfg = Config()
|
||||
return openai.Embedding.create(
|
||||
input=[text],
|
||||
api_key=cfg.openai_api_key,
|
||||
**kwargs,
|
||||
)
|
||||
chunk_embeddings = []
|
||||
chunk_lengths = []
|
||||
for chunk in chunked_tokens(
|
||||
text,
|
||||
tokenizer_name=cfg.embedding_tokenizer,
|
||||
chunk_length=cfg.embedding_token_limit,
|
||||
):
|
||||
embedding = openai.Embedding.create(
|
||||
input=[chunk],
|
||||
api_key=cfg.openai_api_key,
|
||||
**kwargs,
|
||||
)
|
||||
api_manager = ApiManager()
|
||||
api_manager.update_cost(
|
||||
prompt_tokens=embedding.usage.prompt_tokens,
|
||||
completion_tokens=0,
|
||||
model=cfg.embedding_model,
|
||||
)
|
||||
chunk_embeddings.append(embedding["data"][0]["embedding"])
|
||||
chunk_lengths.append(len(chunk))
|
||||
|
||||
# do weighted avg
|
||||
chunk_embeddings = np.average(chunk_embeddings, axis=0, weights=chunk_lengths)
|
||||
chunk_embeddings = chunk_embeddings / np.linalg.norm(
|
||||
chunk_embeddings
|
||||
) # normalize the length to one
|
||||
chunk_embeddings = chunk_embeddings.tolist()
|
||||
return chunk_embeddings
|
||||
|
||||
@@ -3,5 +3,8 @@ COSTS = {
|
||||
"gpt-3.5-turbo-0301": {"prompt": 0.002, "completion": 0.002},
|
||||
"gpt-4-0314": {"prompt": 0.03, "completion": 0.06},
|
||||
"gpt-4": {"prompt": 0.03, "completion": 0.06},
|
||||
"gpt-4-0314": {"prompt": 0.03, "completion": 0.06},
|
||||
"gpt-4-32k": {"prompt": 0.06, "completion": 0.12},
|
||||
"gpt-4-32k-0314": {"prompt": 0.06, "completion": 0.12},
|
||||
"text-embedding-ada-002": {"prompt": 0.0004, "completion": 0.0},
|
||||
}
|
||||
|
||||
0
autogpt/log_cycle/__init__.py
Normal file
0
autogpt/log_cycle/__init__.py
Normal file
20
autogpt/log_cycle/json_handler.py
Normal file
20
autogpt/log_cycle/json_handler.py
Normal file
@@ -0,0 +1,20 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
|
||||
class JsonFileHandler(logging.FileHandler):
|
||||
def __init__(self, filename, mode="a", encoding=None, delay=False):
|
||||
super().__init__(filename, mode, encoding, delay)
|
||||
|
||||
def emit(self, record):
|
||||
json_data = json.loads(self.format(record))
|
||||
with open(self.baseFilename, "w", encoding="utf-8") as f:
|
||||
json.dump(json_data, f, ensure_ascii=False, indent=4)
|
||||
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
class JsonFormatter(logging.Formatter):
|
||||
def format(self, record):
|
||||
return record.msg
|
||||
83
autogpt/log_cycle/log_cycle.py
Normal file
83
autogpt/log_cycle/log_cycle.py
Normal file
@@ -0,0 +1,83 @@
|
||||
import json
|
||||
import os
|
||||
from typing import Any, Dict, Union
|
||||
|
||||
from autogpt.logs import logger
|
||||
|
||||
DEFAULT_PREFIX = "agent"
|
||||
FULL_MESSAGE_HISTORY_FILE_NAME = "full_message_history.json"
|
||||
CURRENT_CONTEXT_FILE_NAME = "current_context.json"
|
||||
NEXT_ACTION_FILE_NAME = "next_action.json"
|
||||
PROMPT_SUMMARY_FILE_NAME = "prompt_summary.json"
|
||||
SUMMARY_FILE_NAME = "summary.txt"
|
||||
USER_INPUT_FILE_NAME = "user_input.txt"
|
||||
|
||||
|
||||
class LogCycleHandler:
|
||||
"""
|
||||
A class for logging cycle data.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.log_count_within_cycle = 0
|
||||
|
||||
@staticmethod
|
||||
def create_directory_if_not_exists(directory_path: str) -> None:
|
||||
if not os.path.exists(directory_path):
|
||||
os.makedirs(directory_path, exist_ok=True)
|
||||
|
||||
def create_outer_directory(self, ai_name: str, created_at: str) -> str:
|
||||
log_directory = logger.get_log_directory()
|
||||
|
||||
if os.environ.get("OVERWRITE_DEBUG") == "1":
|
||||
outer_folder_name = "auto_gpt"
|
||||
else:
|
||||
ai_name_short = ai_name[:15] if ai_name else DEFAULT_PREFIX
|
||||
outer_folder_name = f"{created_at}_{ai_name_short}"
|
||||
|
||||
outer_folder_path = os.path.join(log_directory, "DEBUG", outer_folder_name)
|
||||
self.create_directory_if_not_exists(outer_folder_path)
|
||||
|
||||
return outer_folder_path
|
||||
|
||||
def create_inner_directory(self, outer_folder_path: str, cycle_count: int) -> str:
|
||||
nested_folder_name = str(cycle_count).zfill(3)
|
||||
nested_folder_path = os.path.join(outer_folder_path, nested_folder_name)
|
||||
self.create_directory_if_not_exists(nested_folder_path)
|
||||
|
||||
return nested_folder_path
|
||||
|
||||
def create_nested_directory(
|
||||
self, ai_name: str, created_at: str, cycle_count: int
|
||||
) -> str:
|
||||
outer_folder_path = self.create_outer_directory(ai_name, created_at)
|
||||
nested_folder_path = self.create_inner_directory(outer_folder_path, cycle_count)
|
||||
|
||||
return nested_folder_path
|
||||
|
||||
def log_cycle(
|
||||
self,
|
||||
ai_name: str,
|
||||
created_at: str,
|
||||
cycle_count: int,
|
||||
data: Union[Dict[str, Any], Any],
|
||||
file_name: str,
|
||||
) -> None:
|
||||
"""
|
||||
Log cycle data to a JSON file.
|
||||
|
||||
Args:
|
||||
data (Any): The data to be logged.
|
||||
file_name (str): The name of the file to save the logged data.
|
||||
"""
|
||||
nested_folder_path = self.create_nested_directory(
|
||||
ai_name, created_at, cycle_count
|
||||
)
|
||||
|
||||
json_data = json.dumps(data, ensure_ascii=False, indent=4)
|
||||
log_file_path = os.path.join(
|
||||
nested_folder_path, f"{self.log_count_within_cycle}_{file_name}"
|
||||
)
|
||||
|
||||
logger.log_json(json_data, log_file_path)
|
||||
self.log_count_within_cycle += 1
|
||||
@@ -5,12 +5,13 @@ import random
|
||||
import re
|
||||
import time
|
||||
from logging import LogRecord
|
||||
from typing import Any
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.log_cycle.json_handler import JsonFileHandler, JsonFormatter
|
||||
from autogpt.singleton import Singleton
|
||||
from autogpt.speech import say_text
|
||||
from autogpt.utils import send_chat_message_to_user
|
||||
|
||||
|
||||
class Logger(metaclass=Singleton):
|
||||
@@ -75,7 +76,13 @@ class Logger(metaclass=Singleton):
|
||||
self.logger.addHandler(error_handler)
|
||||
self.logger.setLevel(logging.DEBUG)
|
||||
|
||||
self.json_logger = logging.getLogger("JSON_LOGGER")
|
||||
self.json_logger.addHandler(self.file_handler)
|
||||
self.json_logger.addHandler(error_handler)
|
||||
self.json_logger.setLevel(logging.DEBUG)
|
||||
|
||||
self.speak_mode = False
|
||||
self.chat_plugins = []
|
||||
|
||||
def typewriter_log(
|
||||
self, title="", title_color="", content="", speak_text=False, level=logging.INFO
|
||||
@@ -83,7 +90,8 @@ class Logger(metaclass=Singleton):
|
||||
if speak_text and self.speak_mode:
|
||||
say_text(f"{title}. {content}")
|
||||
|
||||
send_chat_message_to_user(f"{title}. {content}")
|
||||
for plugin in self.chat_plugins:
|
||||
plugin.report(f"{title}. {content}")
|
||||
|
||||
if content:
|
||||
if isinstance(content, list):
|
||||
@@ -103,6 +111,14 @@ class Logger(metaclass=Singleton):
|
||||
):
|
||||
self._log(title, title_color, message, logging.DEBUG)
|
||||
|
||||
def info(
|
||||
self,
|
||||
message,
|
||||
title="",
|
||||
title_color="",
|
||||
):
|
||||
self._log(title, title_color, message, logging.INFO)
|
||||
|
||||
def warn(
|
||||
self,
|
||||
message,
|
||||
@@ -114,11 +130,19 @@ class Logger(metaclass=Singleton):
|
||||
def error(self, title, message=""):
|
||||
self._log(title, Fore.RED, message, logging.ERROR)
|
||||
|
||||
def _log(self, title="", title_color="", message="", level=logging.INFO):
|
||||
def _log(
|
||||
self,
|
||||
title: str = "",
|
||||
title_color: str = "",
|
||||
message: str = "",
|
||||
level=logging.INFO,
|
||||
):
|
||||
if message:
|
||||
if isinstance(message, list):
|
||||
message = " ".join(message)
|
||||
self.logger.log(level, message, extra={"title": title, "color": title_color})
|
||||
self.logger.log(
|
||||
level, message, extra={"title": str(title), "color": str(title_color)}
|
||||
)
|
||||
|
||||
def set_level(self, level):
|
||||
self.logger.setLevel(level)
|
||||
@@ -135,6 +159,26 @@ class Logger(metaclass=Singleton):
|
||||
|
||||
self.typewriter_log("DOUBLE CHECK CONFIGURATION", Fore.YELLOW, additionalText)
|
||||
|
||||
def log_json(self, data: Any, file_name: str) -> None:
|
||||
# Define log directory
|
||||
this_files_dir_path = os.path.dirname(__file__)
|
||||
log_dir = os.path.join(this_files_dir_path, "../logs")
|
||||
|
||||
# Create a handler for JSON files
|
||||
json_file_path = os.path.join(log_dir, file_name)
|
||||
json_data_handler = JsonFileHandler(json_file_path)
|
||||
json_data_handler.setFormatter(JsonFormatter())
|
||||
|
||||
# Log the JSON data using the custom file handler
|
||||
self.json_logger.addHandler(json_data_handler)
|
||||
self.json_logger.debug(data)
|
||||
self.json_logger.removeHandler(json_data_handler)
|
||||
|
||||
def get_log_directory(self):
|
||||
this_files_dir_path = os.path.dirname(__file__)
|
||||
log_dir = os.path.join(this_files_dir_path, "../logs")
|
||||
return os.path.abspath(log_dir)
|
||||
|
||||
|
||||
"""
|
||||
Output stream to console using simulated typing
|
||||
@@ -182,12 +226,16 @@ class AutoGptFormatter(logging.Formatter):
|
||||
if hasattr(record, "color"):
|
||||
record.title_color = (
|
||||
getattr(record, "color")
|
||||
+ getattr(record, "title")
|
||||
+ getattr(record, "title", "")
|
||||
+ " "
|
||||
+ Style.RESET_ALL
|
||||
)
|
||||
else:
|
||||
record.title_color = getattr(record, "title")
|
||||
record.title_color = getattr(record, "title", "")
|
||||
|
||||
# Add this line to set 'title' to an empty string if it doesn't exist
|
||||
record.title = getattr(record, "title", "")
|
||||
|
||||
if hasattr(record, "msg"):
|
||||
record.message_no_color = remove_color_codes(getattr(record, "msg"))
|
||||
else:
|
||||
|
||||
@@ -3,7 +3,7 @@ import logging
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from colorama import Fore
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import CommandRegistry
|
||||
@@ -13,7 +13,11 @@ from autogpt.logs import logger
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.plugins import scan_plugins
|
||||
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT, construct_main_ai_config
|
||||
from autogpt.utils import get_current_git_branch, get_latest_bulletin
|
||||
from autogpt.utils import (
|
||||
get_current_git_branch,
|
||||
get_latest_bulletin,
|
||||
markdown_to_ansi_style,
|
||||
)
|
||||
from autogpt.workspace import Workspace
|
||||
from scripts.install_plugin_deps import install_plugin_dependencies
|
||||
|
||||
@@ -57,9 +61,19 @@ def run_auto_gpt(
|
||||
)
|
||||
|
||||
if not cfg.skip_news:
|
||||
motd = get_latest_bulletin()
|
||||
motd, is_new_motd = get_latest_bulletin()
|
||||
if motd:
|
||||
logger.typewriter_log("NEWS: ", Fore.GREEN, motd)
|
||||
motd = markdown_to_ansi_style(motd)
|
||||
for motd_line in motd.split("\n"):
|
||||
logger.info(motd_line, "NEWS:", Fore.GREEN)
|
||||
if is_new_motd and not cfg.chat_messages_enabled:
|
||||
input(
|
||||
Fore.MAGENTA
|
||||
+ Style.BRIGHT
|
||||
+ "NEWS: Bulletin was updated! Press Enter to continue..."
|
||||
+ Style.RESET_ALL
|
||||
)
|
||||
|
||||
git_branch = get_current_git_branch()
|
||||
if git_branch and git_branch != "stable":
|
||||
logger.typewriter_log(
|
||||
@@ -104,18 +118,33 @@ def run_auto_gpt(
|
||||
cfg.set_plugins(scan_plugins(cfg, cfg.debug_mode))
|
||||
# Create a CommandRegistry instance and scan default folder
|
||||
command_registry = CommandRegistry()
|
||||
command_registry.import_commands("autogpt.commands.analyze_code")
|
||||
command_registry.import_commands("autogpt.commands.audio_text")
|
||||
command_registry.import_commands("autogpt.commands.execute_code")
|
||||
command_registry.import_commands("autogpt.commands.file_operations")
|
||||
command_registry.import_commands("autogpt.commands.git_operations")
|
||||
command_registry.import_commands("autogpt.commands.google_search")
|
||||
command_registry.import_commands("autogpt.commands.image_gen")
|
||||
command_registry.import_commands("autogpt.commands.improve_code")
|
||||
command_registry.import_commands("autogpt.commands.twitter")
|
||||
command_registry.import_commands("autogpt.commands.web_selenium")
|
||||
command_registry.import_commands("autogpt.commands.write_tests")
|
||||
command_registry.import_commands("autogpt.app")
|
||||
|
||||
command_categories = [
|
||||
"autogpt.commands.analyze_code",
|
||||
"autogpt.commands.audio_text",
|
||||
"autogpt.commands.execute_code",
|
||||
"autogpt.commands.file_operations",
|
||||
"autogpt.commands.git_operations",
|
||||
"autogpt.commands.google_search",
|
||||
"autogpt.commands.image_gen",
|
||||
"autogpt.commands.improve_code",
|
||||
"autogpt.commands.twitter",
|
||||
"autogpt.commands.web_selenium",
|
||||
"autogpt.commands.write_tests",
|
||||
"autogpt.app",
|
||||
"autogpt.commands.task_statuses",
|
||||
]
|
||||
logger.debug(
|
||||
f"The following command categories are disabled: {cfg.disabled_command_categories}"
|
||||
)
|
||||
command_categories = [
|
||||
x for x in command_categories if x not in cfg.disabled_command_categories
|
||||
]
|
||||
|
||||
logger.debug(f"The following command categories are enabled: {command_categories}")
|
||||
|
||||
for command_category in command_categories:
|
||||
command_registry.import_commands(command_category)
|
||||
|
||||
ai_name = ""
|
||||
ai_config = construct_main_ai_config()
|
||||
@@ -125,6 +154,13 @@ def run_auto_gpt(
|
||||
full_message_history = []
|
||||
next_action_count = 0
|
||||
|
||||
# add chat plugins capable of report to logger
|
||||
if cfg.chat_messages_enabled:
|
||||
for plugin in cfg.plugins:
|
||||
if hasattr(plugin, "can_handle_report") and plugin.can_handle_report():
|
||||
logger.info(f"Loaded plugin into logger: {plugin.__class__.__name__}")
|
||||
logger.chat_plugins.append(plugin)
|
||||
|
||||
# Initialize memory and make sure it is empty.
|
||||
# this is particularly important for indexing and referencing pinecone memory
|
||||
memory = get_memory(cfg, init=True)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.local import LocalCache
|
||||
from autogpt.memory.no_memory import NoMemory
|
||||
|
||||
@@ -10,7 +11,6 @@ try:
|
||||
|
||||
supported_memory.append("redis")
|
||||
except ImportError:
|
||||
# print("Redis not installed. Skipping import.")
|
||||
RedisMemory = None
|
||||
|
||||
try:
|
||||
@@ -18,7 +18,6 @@ try:
|
||||
|
||||
supported_memory.append("pinecone")
|
||||
except ImportError:
|
||||
# print("Pinecone not installed. Skipping import.")
|
||||
PineconeMemory = None
|
||||
|
||||
try:
|
||||
@@ -26,7 +25,6 @@ try:
|
||||
|
||||
supported_memory.append("weaviate")
|
||||
except ImportError:
|
||||
# print("Weaviate not installed. Skipping import.")
|
||||
WeaviateMemory = None
|
||||
|
||||
try:
|
||||
@@ -34,7 +32,6 @@ try:
|
||||
|
||||
supported_memory.append("milvus")
|
||||
except ImportError:
|
||||
# print("pymilvus not installed. Skipping import.")
|
||||
MilvusMemory = None
|
||||
|
||||
|
||||
@@ -42,7 +39,7 @@ def get_memory(cfg, init=False):
|
||||
memory = None
|
||||
if cfg.memory_backend == "pinecone":
|
||||
if not PineconeMemory:
|
||||
print(
|
||||
logger.warn(
|
||||
"Error: Pinecone is not installed. Please install pinecone"
|
||||
" to use Pinecone as a memory backend."
|
||||
)
|
||||
@@ -52,7 +49,7 @@ def get_memory(cfg, init=False):
|
||||
memory.clear()
|
||||
elif cfg.memory_backend == "redis":
|
||||
if not RedisMemory:
|
||||
print(
|
||||
logger.warn(
|
||||
"Error: Redis is not installed. Please install redis-py to"
|
||||
" use Redis as a memory backend."
|
||||
)
|
||||
@@ -60,7 +57,7 @@ def get_memory(cfg, init=False):
|
||||
memory = RedisMemory(cfg)
|
||||
elif cfg.memory_backend == "weaviate":
|
||||
if not WeaviateMemory:
|
||||
print(
|
||||
logger.warn(
|
||||
"Error: Weaviate is not installed. Please install weaviate-client to"
|
||||
" use Weaviate as a memory backend."
|
||||
)
|
||||
@@ -68,7 +65,7 @@ def get_memory(cfg, init=False):
|
||||
memory = WeaviateMemory(cfg)
|
||||
elif cfg.memory_backend == "milvus":
|
||||
if not MilvusMemory:
|
||||
print(
|
||||
logger.warn(
|
||||
"Error: pymilvus sdk is not installed."
|
||||
"Please install pymilvus to use Milvus or Zilliz Cloud as memory backend."
|
||||
)
|
||||
|
||||
@@ -32,12 +32,15 @@ class PineconeMemory(MemoryProviderSingleton):
|
||||
logger.double_check(
|
||||
"Please ensure you have setup and configured Pinecone properly for use."
|
||||
+ f"You can check out {Fore.CYAN + Style.BRIGHT}"
|
||||
"https://github.com/Torantulino/Auto-GPT#-pinecone-api-key-setup"
|
||||
"https://docs.agpt.co/configuration/memory/#pinecone-api-key-setup"
|
||||
f"{Style.RESET_ALL} to ensure you've set up everything correctly."
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if table_name not in pinecone.list_indexes():
|
||||
logger.typewriter_log(
|
||||
"Connecting Pinecone. This may take some time...", Fore.MAGENTA, ""
|
||||
)
|
||||
pinecone.create_index(
|
||||
table_name, dimension=dimension, metric=metric, pod_type=pod_type
|
||||
)
|
||||
|
||||
@@ -58,7 +58,7 @@ class RedisMemory(MemoryProviderSingleton):
|
||||
logger.double_check(
|
||||
"Please ensure you have setup and configured Redis properly for use. "
|
||||
+ f"You can check out {Fore.CYAN + Style.BRIGHT}"
|
||||
f"https://github.com/Torantulino/Auto-GPT#redis-setup{Style.RESET_ALL}"
|
||||
f"https://docs.agpt.co/configuration/memory/#redis-setup{Style.RESET_ALL}"
|
||||
" to ensure you've set up everything correctly."
|
||||
)
|
||||
exit(1)
|
||||
@@ -73,7 +73,7 @@ class RedisMemory(MemoryProviderSingleton):
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
print("Error creating Redis search index: ", e)
|
||||
logger.warn("Error creating Redis search index: ", e)
|
||||
existing_vec_num = self.redis.get(f"{cfg.memory_index}-vec_num")
|
||||
self.vec_num = int(existing_vec_num.decode("utf-8")) if existing_vec_num else 0
|
||||
|
||||
@@ -145,7 +145,7 @@ class RedisMemory(MemoryProviderSingleton):
|
||||
query, query_params={"vector": query_vector}
|
||||
)
|
||||
except Exception as e:
|
||||
print("Error calling Redis search: ", e)
|
||||
logger.warn("Error calling Redis search: ", e)
|
||||
return None
|
||||
return [result.data for result in results.docs]
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ from weaviate.embedded import EmbeddedOptions
|
||||
from weaviate.util import generate_uuid5
|
||||
|
||||
from autogpt.llm import get_ada_embedding
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.base import MemoryProviderSingleton
|
||||
|
||||
|
||||
@@ -35,7 +36,7 @@ class WeaviateMemory(MemoryProviderSingleton):
|
||||
)
|
||||
)
|
||||
|
||||
print(
|
||||
logger.info(
|
||||
f"Weaviate Embedded running on: {url} with persistence path: {cfg.weaviate_embedded_path}"
|
||||
)
|
||||
else:
|
||||
@@ -116,7 +117,7 @@ class WeaviateMemory(MemoryProviderSingleton):
|
||||
return []
|
||||
|
||||
except Exception as err:
|
||||
print(f"Unexpected error {err=}, {type(err)=}")
|
||||
logger.warn(f"Unexpected error {err=}, {type(err)=}")
|
||||
return []
|
||||
|
||||
def get_stats(self):
|
||||
|
||||
@@ -1,8 +1,12 @@
|
||||
import copy
|
||||
import json
|
||||
from typing import Dict, List, Tuple
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm.llm_utils import create_chat_completion
|
||||
from autogpt.log_cycle.log_cycle import PROMPT_SUMMARY_FILE_NAME, SUMMARY_FILE_NAME
|
||||
from autogpt.logs import logger
|
||||
|
||||
cfg = Config()
|
||||
|
||||
@@ -44,7 +48,9 @@ def get_newly_trimmed_messages(
|
||||
return new_messages_not_in_context, new_index
|
||||
|
||||
|
||||
def update_running_summary(current_memory: str, new_events: List[Dict]) -> str:
|
||||
def update_running_summary(
|
||||
agent: Agent, current_memory: str, new_events: List[Dict[str, str]]
|
||||
) -> str:
|
||||
"""
|
||||
This function takes a list of dictionaries representing new events and combines them with the current summary,
|
||||
focusing on key and potentially important information to remember. The updated summary is returned in a message
|
||||
@@ -61,22 +67,32 @@ def update_running_summary(current_memory: str, new_events: List[Dict]) -> str:
|
||||
update_running_summary(new_events)
|
||||
# Returns: "This reminds you of these events from your past: \nI entered the kitchen and found a scrawled note saying 7."
|
||||
"""
|
||||
# Create a copy of the new_events list to prevent modifying the original list
|
||||
new_events = copy.deepcopy(new_events)
|
||||
|
||||
# Replace "assistant" with "you". This produces much better first person past tense results.
|
||||
for event in new_events:
|
||||
if event["role"].lower() == "assistant":
|
||||
event["role"] = "you"
|
||||
|
||||
# Remove "thoughts" dictionary from "content"
|
||||
content_dict = json.loads(event["content"])
|
||||
if "thoughts" in content_dict:
|
||||
del content_dict["thoughts"]
|
||||
event["content"] = json.dumps(content_dict)
|
||||
try:
|
||||
content_dict = json.loads(event["content"])
|
||||
if "thoughts" in content_dict:
|
||||
del content_dict["thoughts"]
|
||||
event["content"] = json.dumps(content_dict)
|
||||
except json.decoder.JSONDecodeError:
|
||||
if cfg.debug_mode:
|
||||
logger.error(f"Error: Invalid JSON: {event['content']}\n")
|
||||
|
||||
elif event["role"].lower() == "system":
|
||||
event["role"] = "your computer"
|
||||
|
||||
# Delete all user messages
|
||||
elif event["role"] == "user":
|
||||
new_events.remove(event)
|
||||
|
||||
# This can happen at any point during execturion, not just the beginning
|
||||
# This can happen at any point during execution, not just the beginning
|
||||
if len(new_events) == 0:
|
||||
new_events = "Nothing new happened."
|
||||
|
||||
@@ -101,9 +117,24 @@ Latest Development:
|
||||
"content": prompt,
|
||||
}
|
||||
]
|
||||
agent.log_cycle_handler.log_cycle(
|
||||
agent.config.ai_name,
|
||||
agent.created_at,
|
||||
agent.cycle_count,
|
||||
messages,
|
||||
PROMPT_SUMMARY_FILE_NAME,
|
||||
)
|
||||
|
||||
current_memory = create_chat_completion(messages, cfg.fast_llm_model)
|
||||
|
||||
agent.log_cycle_handler.log_cycle(
|
||||
agent.config.ai_name,
|
||||
agent.created_at,
|
||||
agent.cycle_count,
|
||||
current_memory,
|
||||
SUMMARY_FILE_NAME,
|
||||
)
|
||||
|
||||
message_to_return = {
|
||||
"role": "system",
|
||||
"content": f"This reminds you of these events from your past: \n{current_memory}",
|
||||
|
||||
@@ -15,6 +15,7 @@ from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
||||
from openapi_python_client.cli import Config as OpenAPIConfig
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.models.base_open_ai_plugin import BaseOpenAIPlugin
|
||||
|
||||
|
||||
@@ -32,12 +33,11 @@ def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[str]:
|
||||
result = []
|
||||
with zipfile.ZipFile(zip_path, "r") as zfile:
|
||||
for name in zfile.namelist():
|
||||
if name.endswith("__init__.py"):
|
||||
if debug:
|
||||
print(f"Found module '{name}' in the zipfile at: {name}")
|
||||
if name.endswith("__init__.py") and not name.startswith("__MACOSX"):
|
||||
logger.debug(f"Found module '{name}' in the zipfile at: {name}")
|
||||
result.append(name)
|
||||
if debug and len(result) == 0:
|
||||
print(f"Module '__init__.py' not found in the zipfile @ {zip_path}.")
|
||||
if len(result) == 0:
|
||||
logger.debug(f"Module '__init__.py' not found in the zipfile @ {zip_path}.")
|
||||
return result
|
||||
|
||||
|
||||
@@ -71,12 +71,12 @@ def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict:
|
||||
if response.status_code == 200:
|
||||
manifest = response.json()
|
||||
if manifest["schema_version"] != "v1":
|
||||
print(
|
||||
logger.warn(
|
||||
f"Unsupported manifest version: {manifest['schem_version']} for {url}"
|
||||
)
|
||||
continue
|
||||
if manifest["api"]["type"] != "openapi":
|
||||
print(
|
||||
logger.warn(
|
||||
f"Unsupported API type: {manifest['api']['type']} for {url}"
|
||||
)
|
||||
continue
|
||||
@@ -84,11 +84,13 @@ def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict:
|
||||
manifest, f"{openai_plugin_client_dir}/ai-plugin.json"
|
||||
)
|
||||
else:
|
||||
print(f"Failed to fetch manifest for {url}: {response.status_code}")
|
||||
logger.warn(
|
||||
f"Failed to fetch manifest for {url}: {response.status_code}"
|
||||
)
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"Error while requesting manifest from {url}: {e}")
|
||||
logger.warn(f"Error while requesting manifest from {url}: {e}")
|
||||
else:
|
||||
print(f"Manifest for {url} already exists")
|
||||
logger.info(f"Manifest for {url} already exists")
|
||||
manifest = json.load(open(f"{openai_plugin_client_dir}/ai-plugin.json"))
|
||||
if not os.path.exists(f"{openai_plugin_client_dir}/openapi.json"):
|
||||
openapi_spec = openapi_python_client._get_document(
|
||||
@@ -98,7 +100,7 @@ def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict:
|
||||
openapi_spec, f"{openai_plugin_client_dir}/openapi.json"
|
||||
)
|
||||
else:
|
||||
print(f"OpenAPI spec for {url} already exists")
|
||||
logger.info(f"OpenAPI spec for {url} already exists")
|
||||
openapi_spec = json.load(open(f"{openai_plugin_client_dir}/openapi.json"))
|
||||
manifests[url] = {"manifest": manifest, "openapi_spec": openapi_spec}
|
||||
return manifests
|
||||
@@ -115,13 +117,13 @@ def create_directory_if_not_exists(directory_path: str) -> bool:
|
||||
if not os.path.exists(directory_path):
|
||||
try:
|
||||
os.makedirs(directory_path)
|
||||
print(f"Created directory: {directory_path}")
|
||||
logger.debug(f"Created directory: {directory_path}")
|
||||
return True
|
||||
except OSError as e:
|
||||
print(f"Error creating directory {directory_path}: {e}")
|
||||
logger.warn(f"Error creating directory {directory_path}: {e}")
|
||||
return False
|
||||
else:
|
||||
print(f"Directory {directory_path} already exists")
|
||||
logger.info(f"Directory {directory_path} already exists")
|
||||
return True
|
||||
|
||||
|
||||
@@ -159,7 +161,7 @@ def initialize_openai_plugins(
|
||||
config=_config,
|
||||
)
|
||||
if client_results:
|
||||
print(
|
||||
logger.warn(
|
||||
f"Error creating OpenAPI client: {client_results[0].header} \n"
|
||||
f" details: {client_results[0].detail}"
|
||||
)
|
||||
@@ -207,13 +209,16 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate
|
||||
loaded_plugins = []
|
||||
# Generic plugins
|
||||
plugins_path_path = Path(cfg.plugins_dir)
|
||||
|
||||
logger.debug(f"Allowlisted Plugins: {cfg.plugins_allowlist}")
|
||||
logger.debug(f"Denylisted Plugins: {cfg.plugins_denylist}")
|
||||
|
||||
for plugin in plugins_path_path.glob("*.zip"):
|
||||
if moduleList := inspect_zip_for_modules(str(plugin), debug):
|
||||
for module in moduleList:
|
||||
plugin = Path(plugin)
|
||||
module = Path(module)
|
||||
if debug:
|
||||
print(f"Plugin: {plugin} Module: {module}")
|
||||
logger.debug(f"Plugin: {plugin} Module: {module}")
|
||||
zipped_package = zipimporter(str(plugin))
|
||||
zipped_module = zipped_package.load_module(str(module.parent))
|
||||
for key in dir(zipped_module):
|
||||
@@ -240,9 +245,9 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate
|
||||
loaded_plugins.append(plugin)
|
||||
|
||||
if loaded_plugins:
|
||||
print(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------")
|
||||
logger.info(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------")
|
||||
for plugin in loaded_plugins:
|
||||
print(f"{plugin._name}: {plugin._version} - {plugin._description}")
|
||||
logger.info(f"{plugin._name}: {plugin._version} - {plugin._description}")
|
||||
return loaded_plugins
|
||||
|
||||
|
||||
@@ -256,9 +261,12 @@ def denylist_allowlist_check(plugin_name: str, cfg: Config) -> bool:
|
||||
Returns:
|
||||
True or False
|
||||
"""
|
||||
logger.debug(f"Checking if plugin {plugin_name} should be loaded")
|
||||
if plugin_name in cfg.plugins_denylist:
|
||||
logger.debug(f"Not loading plugin {plugin_name} as it was in the denylist.")
|
||||
return False
|
||||
if plugin_name in cfg.plugins_allowlist:
|
||||
logger.debug(f"Loading plugin {plugin_name} as it was in the allowlist.")
|
||||
return True
|
||||
ack = input(
|
||||
f"WARNING: Plugin {plugin_name} found. But not in the"
|
||||
|
||||
@@ -6,6 +6,7 @@ from selenium.webdriver.remote.webdriver import WebDriver
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm import count_message_tokens, create_chat_completion
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory import get_memory
|
||||
|
||||
CFG = Config()
|
||||
@@ -29,10 +30,10 @@ def split_text(
|
||||
Raises:
|
||||
ValueError: If the text is longer than the maximum length
|
||||
"""
|
||||
flatened_paragraphs = " ".join(text.split("\n"))
|
||||
flattened_paragraphs = " ".join(text.split("\n"))
|
||||
nlp = spacy.load(CFG.browse_spacy_language_model)
|
||||
nlp.add_pipe("sentencizer")
|
||||
doc = nlp(flatened_paragraphs)
|
||||
doc = nlp(flattened_paragraphs)
|
||||
sentences = [sent.text.strip() for sent in doc.sents]
|
||||
|
||||
current_chunk = []
|
||||
@@ -86,7 +87,7 @@ def summarize_text(
|
||||
|
||||
model = CFG.fast_llm_model
|
||||
text_length = len(text)
|
||||
print(f"Text length: {text_length} characters")
|
||||
logger.info(f"Text length: {text_length} characters")
|
||||
|
||||
summaries = []
|
||||
chunks = list(
|
||||
@@ -99,7 +100,7 @@ def summarize_text(
|
||||
for i, chunk in enumerate(chunks):
|
||||
if driver:
|
||||
scroll_to_percentage(driver, scroll_ratio * i)
|
||||
print(f"Adding chunk {i + 1} / {len(chunks)} to memory")
|
||||
logger.info(f"Adding chunk {i + 1} / {len(chunks)} to memory")
|
||||
|
||||
memory_to_add = f"Source: {url}\n" f"Raw content part#{i + 1}: {chunk}"
|
||||
|
||||
@@ -108,7 +109,7 @@ def summarize_text(
|
||||
|
||||
messages = [create_message(chunk, question)]
|
||||
tokens_for_chunk = count_message_tokens(messages, model)
|
||||
print(
|
||||
logger.info(
|
||||
f"Summarizing chunk {i + 1} / {len(chunks)} of length {len(chunk)} characters, or {tokens_for_chunk} tokens"
|
||||
)
|
||||
|
||||
@@ -117,7 +118,7 @@ def summarize_text(
|
||||
messages=messages,
|
||||
)
|
||||
summaries.append(summary)
|
||||
print(
|
||||
logger.info(
|
||||
f"Added chunk {i + 1} summary to memory, of length {len(summary)} characters"
|
||||
)
|
||||
|
||||
@@ -125,7 +126,7 @@ def summarize_text(
|
||||
|
||||
memory.add(memory_to_add)
|
||||
|
||||
print(f"Summarized {len(chunks)} chunks.")
|
||||
logger.info(f"Summarized {len(chunks)} chunks.")
|
||||
|
||||
combined_summary = "\n".join(summaries)
|
||||
messages = [create_message(combined_summary, question)]
|
||||
|
||||
29
autogpt/prompts/default_prompts.py
Normal file
29
autogpt/prompts/default_prompts.py
Normal file
@@ -0,0 +1,29 @@
|
||||
#########################Setup.py#################################
|
||||
|
||||
DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC = """
|
||||
Your task is to devise up to 5 highly effective goals and an appropriate role-based name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned with the successful completion of its assigned task.
|
||||
|
||||
The user will provide the task, you will provide only the output in the exact format specified below with no explanation or conversation.
|
||||
|
||||
Example input:
|
||||
Help me with marketing my business
|
||||
|
||||
Example output:
|
||||
Name: CMOGPT
|
||||
Description: a professional digital marketer AI that assists Solopreneurs in growing their businesses by providing world-class expertise in solving marketing problems for SaaS, content products, agencies, and more.
|
||||
Goals:
|
||||
- Engage in effective problem-solving, prioritization, planning, and supporting execution to address your marketing needs as your virtual Chief Marketing Officer.
|
||||
|
||||
- Provide specific, actionable, and concise advice to help you make informed decisions without the use of platitudes or overly wordy explanations.
|
||||
|
||||
- Identify and prioritize quick wins and cost-effective campaigns that maximize results with minimal time and budget investment.
|
||||
|
||||
- Proactively take the lead in guiding you and offering suggestions when faced with unclear information or uncertainty to ensure your marketing strategy remains on track.
|
||||
"""
|
||||
|
||||
DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC = (
|
||||
"Task: '{{user_prompt}}'\n"
|
||||
"Respond only with the output in the exact format specified in the system prompt, with no explanation or conversation.\n"
|
||||
)
|
||||
|
||||
DEFAULT_USER_DESIRE_PROMPT = "Write a wikipedia style article about the project: https://github.com/significant-gravitas/Auto-GPT" # Default prompt
|
||||
@@ -41,15 +41,6 @@ def build_default_prompt_generator() -> PromptGenerator:
|
||||
'Exclusively use the commands listed in double quotes e.g. "command name"'
|
||||
)
|
||||
|
||||
# Define the command list
|
||||
commands = [
|
||||
("Task Complete (Shutdown)", "task_complete", {"reason": "<reason>"}),
|
||||
]
|
||||
|
||||
# Add commands to the PromptGenerator object
|
||||
for command_label, command_name, args in commands:
|
||||
prompt_generator.add_command(command_label, command_name, args)
|
||||
|
||||
# Add resources to the PromptGenerator object
|
||||
prompt_generator.add_resource(
|
||||
"Internet access for searches and information gathering."
|
||||
|
||||
@@ -2,12 +2,18 @@
|
||||
import re
|
||||
|
||||
from colorama import Fore, Style
|
||||
from jinja2 import Template
|
||||
|
||||
from autogpt import utils
|
||||
from autogpt.config import Config
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.llm import create_chat_completion
|
||||
from autogpt.logs import logger
|
||||
from autogpt.prompts.default_prompts import (
|
||||
DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC,
|
||||
DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC,
|
||||
DEFAULT_USER_DESIRE_PROMPT,
|
||||
)
|
||||
|
||||
CFG = Config()
|
||||
|
||||
@@ -42,7 +48,7 @@ def prompt_user() -> AIConfig:
|
||||
)
|
||||
|
||||
if user_desire == "":
|
||||
user_desire = "Write a wikipedia style article about the project: https://github.com/significant-gravitas/Auto-GPT" # Default prompt
|
||||
user_desire = DEFAULT_USER_DESIRE_PROMPT # Default prompt
|
||||
|
||||
# If user desire contains "--manual"
|
||||
if "--manual" in user_desire:
|
||||
@@ -119,7 +125,7 @@ def generate_aiconfig_manual() -> AIConfig:
|
||||
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage"
|
||||
" multiple businesses autonomously'",
|
||||
)
|
||||
print("Enter nothing to load defaults, enter nothing when finished.", flush=True)
|
||||
logger.info("Enter nothing to load defaults, enter nothing when finished.")
|
||||
ai_goals = []
|
||||
for i in range(5):
|
||||
ai_goal = utils.clean_input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ")
|
||||
@@ -139,7 +145,7 @@ def generate_aiconfig_manual() -> AIConfig:
|
||||
Fore.GREEN,
|
||||
"For example: $1.50",
|
||||
)
|
||||
print("Enter nothing to let the AI run without monetary limit", flush=True)
|
||||
logger.info("Enter nothing to let the AI run without monetary limit")
|
||||
api_budget_input = utils.clean_input(
|
||||
f"{Fore.LIGHTBLUE_EX}Budget{Style.RESET_ALL}: $"
|
||||
)
|
||||
@@ -164,27 +170,10 @@ def generate_aiconfig_automatic(user_prompt) -> AIConfig:
|
||||
AIConfig: The AIConfig object tailored to the user's input
|
||||
"""
|
||||
|
||||
system_prompt = """
|
||||
Your task is to devise up to 5 highly effective goals and an appropriate role-based name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned with the successful completion of its assigned task.
|
||||
|
||||
The user will provide the task, you will provide only the output in the exact format specified below with no explanation or conversation.
|
||||
|
||||
Example input:
|
||||
Help me with marketing my business
|
||||
|
||||
Example output:
|
||||
Name: CMOGPT
|
||||
Description: a professional digital marketer AI that assists Solopreneurs in growing their businesses by providing world-class expertise in solving marketing problems for SaaS, content products, agencies, and more.
|
||||
Goals:
|
||||
- Engage in effective problem-solving, prioritization, planning, and supporting execution to address your marketing needs as your virtual Chief Marketing Officer.
|
||||
|
||||
- Provide specific, actionable, and concise advice to help you make informed decisions without the use of platitudes or overly wordy explanations.
|
||||
|
||||
- Identify and prioritize quick wins and cost-effective campaigns that maximize results with minimal time and budget investment.
|
||||
|
||||
- Proactively take the lead in guiding you and offering suggestions when faced with unclear information or uncertainty to ensure your marketing strategy remains on track.
|
||||
"""
|
||||
|
||||
system_prompt = DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC
|
||||
prompt_ai_config_automatic = Template(
|
||||
DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC
|
||||
).render(user_prompt=user_prompt)
|
||||
# Call LLM with the string as user input
|
||||
messages = [
|
||||
{
|
||||
@@ -193,7 +182,7 @@ Goals:
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"Task: '{user_prompt}'\nRespond only with the output in the exact format specified in the system prompt, with no explanation or conversation.\n",
|
||||
"content": prompt_ai_config_automatic,
|
||||
},
|
||||
]
|
||||
output = create_chat_completion(messages, CFG.fast_llm_model)
|
||||
|
||||
@@ -69,6 +69,8 @@ class ElevenLabsSpeech(VoiceBase):
|
||||
Returns:
|
||||
bool: True if the request was successful, False otherwise
|
||||
"""
|
||||
from autogpt.logs import logger
|
||||
|
||||
tts_url = (
|
||||
f"https://api.elevenlabs.io/v1/text-to-speech/{self._voices[voice_index]}"
|
||||
)
|
||||
@@ -81,6 +83,6 @@ class ElevenLabsSpeech(VoiceBase):
|
||||
os.remove("speech.mpeg")
|
||||
return True
|
||||
else:
|
||||
print("Request failed with status code:", response.status_code)
|
||||
print("Response content:", response.content)
|
||||
logger.warn("Request failed with status code:", response.status_code)
|
||||
logger.info("Response content:", response.content)
|
||||
return False
|
||||
|
||||
@@ -7,7 +7,7 @@ from requests.compat import urljoin
|
||||
|
||||
def validate_url(func: Callable[..., Any]) -> Any:
|
||||
"""The method decorator validate_url is used to validate urls for any command that requires
|
||||
a url as an arugment"""
|
||||
a url as an argument"""
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(url: str, *args, **kwargs) -> Any:
|
||||
|
||||
@@ -1,31 +1,22 @@
|
||||
import os
|
||||
import re
|
||||
|
||||
import requests
|
||||
import yaml
|
||||
from colorama import Fore
|
||||
from colorama import Fore, Style
|
||||
from git.repo import Repo
|
||||
|
||||
from autogpt.logs import logger
|
||||
|
||||
# Use readline if available (for clean_input)
|
||||
try:
|
||||
import readline
|
||||
except:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
def send_chat_message_to_user(report: str):
|
||||
cfg = Config()
|
||||
if not cfg.chat_messages_enabled:
|
||||
return
|
||||
for plugin in cfg.plugins:
|
||||
if not hasattr(plugin, "can_handle_report"):
|
||||
continue
|
||||
if not plugin.can_handle_report():
|
||||
continue
|
||||
plugin.report(report)
|
||||
|
||||
|
||||
def clean_input(prompt: str = "", talk=False):
|
||||
try:
|
||||
cfg = Config()
|
||||
@@ -58,12 +49,12 @@ def clean_input(prompt: str = "", talk=False):
|
||||
return plugin_response
|
||||
|
||||
# ask for input, default when just pressing Enter is y
|
||||
print("Asking user via keyboard...")
|
||||
logger.info("Asking user via keyboard...")
|
||||
answer = input(prompt)
|
||||
return answer
|
||||
except KeyboardInterrupt:
|
||||
print("You interrupted Auto-GPT")
|
||||
print("Quitting...")
|
||||
logger.info("You interrupted Auto-GPT")
|
||||
logger.info("Quitting...")
|
||||
exit(0)
|
||||
|
||||
|
||||
@@ -117,15 +108,48 @@ def get_current_git_branch() -> str:
|
||||
return ""
|
||||
|
||||
|
||||
def get_latest_bulletin() -> str:
|
||||
exists = os.path.exists("CURRENT_BULLETIN.md")
|
||||
def get_latest_bulletin() -> tuple[str, bool]:
|
||||
exists = os.path.exists("data/CURRENT_BULLETIN.md")
|
||||
current_bulletin = ""
|
||||
if exists:
|
||||
current_bulletin = open("CURRENT_BULLETIN.md", "r", encoding="utf-8").read()
|
||||
current_bulletin = open(
|
||||
"data/CURRENT_BULLETIN.md", "r", encoding="utf-8"
|
||||
).read()
|
||||
new_bulletin = get_bulletin_from_web()
|
||||
is_new_news = new_bulletin != current_bulletin
|
||||
is_new_news = new_bulletin != "" and new_bulletin != current_bulletin
|
||||
|
||||
news_header = Fore.YELLOW + "Welcome to Auto-GPT!\n"
|
||||
if new_bulletin or current_bulletin:
|
||||
news_header += (
|
||||
"Below you'll find the latest Auto-GPT News and updates regarding features!\n"
|
||||
"If you don't wish to see this message, you "
|
||||
"can run Auto-GPT with the *--skip-news* flag.\n"
|
||||
)
|
||||
|
||||
if new_bulletin and is_new_news:
|
||||
open("CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin)
|
||||
return f" {Fore.RED}::UPDATED:: {Fore.CYAN}{new_bulletin}{Fore.RESET}"
|
||||
return current_bulletin
|
||||
open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin)
|
||||
current_bulletin = f"{Fore.RED}::NEW BULLETIN::{Fore.RESET}\n\n{new_bulletin}"
|
||||
|
||||
return f"{news_header}\n{current_bulletin}", is_new_news
|
||||
|
||||
|
||||
def markdown_to_ansi_style(markdown: str):
|
||||
ansi_lines: list[str] = []
|
||||
for line in markdown.split("\n"):
|
||||
line_style = ""
|
||||
|
||||
if line.startswith("# "):
|
||||
line_style += Style.BRIGHT
|
||||
else:
|
||||
line = re.sub(
|
||||
r"(?<!\*)\*(\*?[^*]+\*?)\*(?!\*)",
|
||||
rf"{Style.BRIGHT}\1{Style.NORMAL}",
|
||||
line,
|
||||
)
|
||||
|
||||
if re.match(r"^#+ ", line) is not None:
|
||||
line_style += Fore.CYAN
|
||||
line = re.sub(r"^#+ ", "", line)
|
||||
|
||||
ansi_lines.append(f"{line_style}{line}{Style.RESET_ALL}")
|
||||
return "\n".join(ansi_lines)
|
||||
|
||||
0
data/.keep
Normal file
0
data/.keep
Normal file
@@ -1,7 +1,7 @@
|
||||
import argparse
|
||||
import logging
|
||||
|
||||
from autogpt.commands.file_operations import ingest_file, search_files
|
||||
from autogpt.commands.file_operations import ingest_file, list_files
|
||||
from autogpt.config import Config
|
||||
from autogpt.memory import get_memory
|
||||
|
||||
@@ -10,11 +10,13 @@ cfg = Config()
|
||||
|
||||
def configure_logging():
|
||||
logging.basicConfig(
|
||||
filename="log-ingestion.txt",
|
||||
filemode="a",
|
||||
format="%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s",
|
||||
datefmt="%H:%M:%S",
|
||||
level=logging.DEBUG,
|
||||
handlers=[
|
||||
logging.FileHandler(filename="log-ingestion.txt", mode="a"),
|
||||
logging.StreamHandler(),
|
||||
],
|
||||
)
|
||||
return logging.getLogger("AutoGPT-Ingestion")
|
||||
|
||||
@@ -26,12 +28,13 @@ def ingest_directory(directory, memory, args):
|
||||
:param directory: The directory containing the files to ingest
|
||||
:param memory: An object with an add() method to store the chunks in memory
|
||||
"""
|
||||
global logger
|
||||
try:
|
||||
files = search_files(directory)
|
||||
files = list_files(directory)
|
||||
for file in files:
|
||||
ingest_file(file, memory, args.max_length, args.overlap)
|
||||
except Exception as e:
|
||||
print(f"Error while ingesting directory '{directory}': {str(e)}")
|
||||
logger.error(f"Error while ingesting directory '{directory}': {str(e)}")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
@@ -64,29 +67,26 @@ def main() -> None:
|
||||
help="The max_length of each chunk when ingesting files (default: 4000)",
|
||||
default=4000,
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Initialize memory
|
||||
memory = get_memory(cfg, init=args.init)
|
||||
print("Using memory of type: " + memory.__class__.__name__)
|
||||
logger.debug("Using memory of type: " + memory.__class__.__name__)
|
||||
|
||||
if args.file:
|
||||
try:
|
||||
ingest_file(args.file, memory, args.max_length, args.overlap)
|
||||
print(f"File '{args.file}' ingested successfully.")
|
||||
logger.info(f"File '{args.file}' ingested successfully.")
|
||||
except Exception as e:
|
||||
logger.error(f"Error while ingesting file '{args.file}': {str(e)}")
|
||||
print(f"Error while ingesting file '{args.file}': {str(e)}")
|
||||
elif args.dir:
|
||||
try:
|
||||
ingest_directory(args.dir, memory, args)
|
||||
print(f"Directory '{args.dir}' ingested successfully.")
|
||||
logger.info(f"Directory '{args.dir}' ingested successfully.")
|
||||
except Exception as e:
|
||||
logger.error(f"Error while ingesting directory '{args.dir}': {str(e)}")
|
||||
print(f"Error while ingesting directory '{args.dir}': {str(e)}")
|
||||
else:
|
||||
print(
|
||||
logger.warn(
|
||||
"Please provide either a file path (--file) or a directory name (--dir)"
|
||||
" inside the auto_gpt_workspace directory as input."
|
||||
)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# To boot the app run the following:
|
||||
# docker-compose run auto-gpt
|
||||
# NOTE: Version 3.9 requires at least docker-compose version 1.29.0 !
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
|
||||
11
docs/challenges/beat.md
Normal file
11
docs/challenges/beat.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Beat a Challenge
|
||||
|
||||
If you have a solution or idea to tackle an existing challenge, you can contribute by working on it and submitting your solution. Here's how to get started:
|
||||
|
||||
## Guidelines for Beating a Challenge
|
||||
|
||||
1. **Choose a challenge**: Browse the [List of Challenges](list.md) and choose one that interests you or aligns with your expertise.
|
||||
|
||||
2. **Understand the problem**: Make sure you thoroughly understand the problem at hand, its scope, and the desired outcome.
|
||||
|
||||
3. **Develop a solution**: Work on creating a solution for the challenge. This may/
|
||||
135
docs/challenges/building_challenges.md
Normal file
135
docs/challenges/building_challenges.md
Normal file
@@ -0,0 +1,135 @@
|
||||
# Creating Challenges for AutoGPT
|
||||
|
||||
🏹 We're on the hunt for talented Challenge Creators! 🎯
|
||||
|
||||
Join us in shaping the future of Auto-GPT by designing challenges that test its limits. Your input will be invaluable in guiding our progress and ensuring that we're on the right track. We're seeking individuals with a diverse skill set, including:
|
||||
|
||||
🎨 UX Design: Your expertise will enhance the user experience for those attempting to conquer our challenges. With your help, we'll develop a dedicated section in our wiki, and potentially even launch a standalone website.
|
||||
|
||||
💻 Coding Skills: Proficiency in Python, pytest, and VCR (a library that records OpenAI calls and stores them) will be essential for creating engaging and robust challenges.
|
||||
|
||||
⚙️ DevOps Skills: Experience with CI pipelines in GitHub and possibly Google Cloud Platform will be instrumental in streamlining our operations.
|
||||
|
||||
Are you ready to play a pivotal role in Auto-GPT's journey? Apply now to become a Challenge Creator by opening a PR! 🚀
|
||||
|
||||
|
||||
# Getting Started
|
||||
Clone the original AutoGPT repo and checkout to master branch
|
||||
|
||||
|
||||
The challenges are not written using a specific framework. They try to be very agnostic
|
||||
The challenges are acting like a user that wants something done:
|
||||
INPUT:
|
||||
- User desire
|
||||
- Files, other inputs
|
||||
|
||||
Output => Artifact (files, image, code, etc, etc...)
|
||||
|
||||
## Defining your Agent
|
||||
|
||||
Go to https://github.com/Significant-Gravitas/Auto-GPT/blob/master/tests/integration/agent_factory.py
|
||||
|
||||
Create your agent fixture.
|
||||
|
||||
```python
|
||||
def kubernetes_agent(
|
||||
agent_test_config, memory_local_cache, workspace: Workspace
|
||||
):
|
||||
# Please choose the commands your agent will need to beat the challenges, the full list is available in the main.py
|
||||
# (we 're working on a better way to design this, for now you have to look at main.py)
|
||||
command_registry = CommandRegistry()
|
||||
command_registry.import_commands("autogpt.commands.file_operations")
|
||||
command_registry.import_commands("autogpt.app")
|
||||
|
||||
# Define all the settings of our challenged agent
|
||||
ai_config = AIConfig(
|
||||
ai_name="Kubernetes",
|
||||
ai_role="an autonomous agent that specializes in creating Kubernetes deployment templates.",
|
||||
ai_goals=[
|
||||
"Write a simple kubernetes deployment file and save it as a kube.yaml.",
|
||||
],
|
||||
)
|
||||
ai_config.command_registry = command_registry
|
||||
|
||||
system_prompt = ai_config.construct_full_prompt()
|
||||
Config().set_continuous_mode(False)
|
||||
agent = Agent(
|
||||
# We also give the AI a name
|
||||
ai_name="Kubernetes-Demo",
|
||||
memory=memory_local_cache,
|
||||
full_message_history=[],
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
next_action_count=0,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
workspace_directory=workspace.root,
|
||||
)
|
||||
|
||||
return agent
|
||||
```
|
||||
|
||||
## Creating your challenge
|
||||
Go to `tests/integration/challenges`and create a file that is called `test_your_test_description.py` and add it to the appropriate folder. If no category exists you can create a new one.
|
||||
|
||||
Your test could look something like this
|
||||
|
||||
```python
|
||||
import contextlib
|
||||
from functools import wraps
|
||||
from typing import Generator
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
|
||||
from autogpt.commands.file_operations import read_file, write_to_file
|
||||
from tests.integration.agent_utils import run_interaction_loop
|
||||
from tests.integration.challenges.utils import run_multiple_times
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
|
||||
def input_generator(input_sequence: list) -> Generator[str, None, None]:
|
||||
"""
|
||||
Creates a generator that yields input strings from the given sequence.
|
||||
|
||||
:param input_sequence: A list of input strings.
|
||||
:return: A generator that yields input strings.
|
||||
"""
|
||||
yield from input_sequence
|
||||
|
||||
|
||||
@pytest.mark.skip("This challenge hasn't been beaten yet.")
|
||||
@pytest.mark.vcr
|
||||
@requires_api_key("OPENAI_API_KEY")
|
||||
@run_multiple_times(3)
|
||||
def test_information_retrieval_challenge_a(kubernetes_agent, monkeypatch) -> None:
|
||||
"""
|
||||
Test the challenge_a function in a given agent by mocking user inputs
|
||||
and checking the output file content.
|
||||
|
||||
:param get_company_revenue_agent: The agent to test.
|
||||
:param monkeypatch: pytest's monkeypatch utility for modifying builtins.
|
||||
"""
|
||||
input_sequence = ["s", "s", "s", "s", "s", "EXIT"]
|
||||
gen = input_generator(input_sequence)
|
||||
monkeypatch.setattr("builtins.input", lambda _: next(gen))
|
||||
|
||||
with contextlib.suppress(SystemExit):
|
||||
run_interaction_loop(kubernetes_agent, None)
|
||||
|
||||
# here we load the output file
|
||||
file_path = str(kubernetes_agent.workspace.get_path("kube.yaml"))
|
||||
content = read_file(file_path)
|
||||
|
||||
# then we check if it's including keywords from the kubernetes deployment config
|
||||
for word in ["apiVersion", "kind", "metadata", "spec"]:
|
||||
assert word in content, f"Expected the file to contain {word}"
|
||||
|
||||
content = yaml.safe_load(content)
|
||||
for word in ["Service", "Deployment", "Pod"]:
|
||||
assert word in content["kind"], f"Expected the file to contain {word}"
|
||||
|
||||
|
||||
```
|
||||
|
||||
|
||||
24
docs/challenges/challenge_template.md
Normal file
24
docs/challenges/challenge_template.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Challenge Title
|
||||
|
||||
## Description
|
||||
|
||||
Provide a clear and concise description of the challenge. Include any relevant examples or files to illustrate the problem.
|
||||
|
||||
## Input
|
||||
|
||||
If the challenge involves specific input files, describe them here. Provide the file names and their contents, if necessary. Use triple backticks (```) to format the content as a code block.
|
||||
|
||||
For example:
|
||||
|
||||
instructions_1.txt
|
||||
|
||||
The current task_id is 4563.\n[NOISE intended to confuse the agent]
|
||||
Read the file instructions_2.txt using the read_file command.
|
||||
|
||||
## Scope
|
||||
|
||||
Define the scope of the challenge, including any relevant constraints, requirements, or limitations.
|
||||
|
||||
## Success Evaluation
|
||||
|
||||
Explain how success will be measured or evaluated for the challenge. This helps others understand what the desired outcome is and how to work towards it.
|
||||
20
docs/challenges/information_retrieval/challenge_a.md
Normal file
20
docs/challenges/information_retrieval/challenge_a.md
Normal file
@@ -0,0 +1,20 @@
|
||||
# Information Retrieval Challenge A
|
||||
|
||||
**Status**: Current level to beat: level 1
|
||||
|
||||
**Command to try**:
|
||||
|
||||
```
|
||||
pytest -s tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_a.py
|
||||
```
|
||||
|
||||
## Description
|
||||
|
||||
The agent's goal is to find the revenue of Tesla in 2022.
|
||||
|
||||
It should write the result in a file called output.txt.
|
||||
|
||||
The agent should be able to beat this test consistently (this is the hardest part).
|
||||
## Objective
|
||||
|
||||
The objective of this challenge is to test the agent's ability to retrieve information in a consistent way.
|
||||
3
docs/challenges/information_retrieval/introduction.md
Normal file
3
docs/challenges/information_retrieval/introduction.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Information Retrieval
|
||||
|
||||
Information retrieval challenges are designed to evaluate the proficiency of an AI agent, such as Auto-GPT, in searching, extracting, and presenting relevant information from a vast array of sources. These challenges often encompass tasks such as interpreting user queries, browsing the web, and filtering through unstructured data.
|
||||
23
docs/challenges/introduction.md
Normal file
23
docs/challenges/introduction.md
Normal file
@@ -0,0 +1,23 @@
|
||||
introduction.md
|
||||
# Introduction to Challenges
|
||||
|
||||
Welcome to the Auto-GPT Challenges page! This is a space where we encourage community members to collaborate and contribute towards improving Auto-GPT by identifying and solving challenges that Auto-GPT is not yet able to achieve.
|
||||
|
||||
## What are challenges?
|
||||
|
||||
Challenges are tasks or problems that Auto-GPT has difficulty solving or has not yet been able to accomplish. These may include improving specific functionalities, enhancing the model's understanding of specific domains, or even developing new features that the current version of Auto-GPT lacks.
|
||||
|
||||
## Why are challenges important?
|
||||
|
||||
Addressing challenges helps us improve Auto-GPT's performance, usability, and versatility. By working together to tackle these challenges, we can create a more powerful and efficient tool for everyone. It also allows the community to actively contribute to the project, making it a true open-source effort.
|
||||
|
||||
## How can you participate?
|
||||
|
||||
There are two main ways to get involved with challenges:
|
||||
|
||||
1. **Submit a Challenge**: If you have identified a task that Auto-GPT struggles with, you can submit it as a challenge. This allows others to see the issue and collaborate on finding a solution.
|
||||
2. **Beat a Challenge**: If you have a solution or idea to tackle an existing challenge, you can contribute by working on the challenge and submitting your solution.
|
||||
|
||||
To learn more about submitting and beating challenges, please visit the [List of Challenges](list.md), [Submit a Challenge](submit.md), and [Beat a Challenge](beat.md) pages.
|
||||
|
||||
We look forward to your contributions and the exciting solutions that the community will develop together to make Auto-GPT even better!
|
||||
5
docs/challenges/list.md
Normal file
5
docs/challenges/list.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# List of Challenges
|
||||
|
||||
This page contains a curated list of challenges that Auto-GPT currently faces. If you think you have a solution or idea to tackle any of these challenges, feel free to dive in and start working on them! New challenges can also be submitted by following the guidelines on the [Submit a Challenge](challenges/submit.md) page.
|
||||
|
||||
Memory Challenges: [List of Challenges](memory/introduction.md)
|
||||
39
docs/challenges/memory/challenge_a.md
Normal file
39
docs/challenges/memory/challenge_a.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# Memory Challenge A
|
||||
|
||||
**Status**: Challenge Completed
|
||||
|
||||
|
||||
## Description
|
||||
|
||||
The agent, Follow-Instructions-GPT, has the following goals:
|
||||
|
||||
- Use the command `read_file` to read the `instructions_1.txt` file.
|
||||
- Follow the instructions in the `instructions_1.txt` file.
|
||||
|
||||
The challenge involves a series of text files containing instructions. The agent must follow the instructions and keep track of the task ID as it progresses through the files.
|
||||
|
||||
## Files
|
||||
|
||||
- `instructions_1.txt`
|
||||
|
||||
The task_id is 1234
|
||||
Read the file instructions_1.txt
|
||||
|
||||
|
||||
|
||||
- `instructions_2.txt`
|
||||
|
||||
Read the file instructions_2.txt using the read_file command.
|
||||
|
||||
|
||||
...and so on.
|
||||
|
||||
- `instructions_n.txt`
|
||||
|
||||
Write the task_id into the file output.txt.
|
||||
|
||||
|
||||
## Objective
|
||||
|
||||
The objective of this challenge is to test the agent's ability to follow instructions and maintain memory of the task Id throughout the process. The agent successfully completed this challenge if it wrote the task id in a file.
|
||||
|
||||
44
docs/challenges/memory/challenge_b.md
Normal file
44
docs/challenges/memory/challenge_b.md
Normal file
@@ -0,0 +1,44 @@
|
||||
# Memory Challenge B
|
||||
|
||||
**Status**: Current level to beat: level 3
|
||||
|
||||
**Command to try**:
|
||||
```
|
||||
pytest -s tests/integration/challenges/memory/test_memory_challenge_b.py --level=3
|
||||
``
|
||||
|
||||
## Description
|
||||
|
||||
The agent, Follow-Instructions-GPT, has the following goals:
|
||||
|
||||
- Use the command `read_file` to read the `instructions_1.txt` file.
|
||||
- Follow the instructions in the `instructions_1.txt` file.
|
||||
|
||||
The challenge involves a series of text files containing instructions and task IDs. The agent must follow the instructions and keep track of the task IDs as it progresses through the files.
|
||||
|
||||
## Files
|
||||
|
||||
- `instructions_1.txt`
|
||||
|
||||
The current task_id is 4563.\n[NOISE intended to confuse the agent]
|
||||
Read the file instructions_2.txt using the read_file command.
|
||||
|
||||
|
||||
- `instructions_2.txt`
|
||||
|
||||
The current task_id is 6182.\n[NOISE intended to confuse the agent]
|
||||
Read the file instructions_3.txt using the read_file command.
|
||||
|
||||
|
||||
...and so on.
|
||||
|
||||
- `instructions_n.txt`
|
||||
|
||||
The current task_id is 8912.
|
||||
Write all the task_ids into the file output.txt. The file has not been created yet. After that, use the task_complete command.
|
||||
|
||||
|
||||
## Objective
|
||||
|
||||
The objective of this challenge is to test the agent's ability to follow instructions and maintain memory of the task IDs throughout the process. The agent successfully completed this challenge if it wrote the task ids in a file.
|
||||
|
||||
45
docs/challenges/memory/challenge_c.md
Normal file
45
docs/challenges/memory/challenge_c.md
Normal file
@@ -0,0 +1,45 @@
|
||||
# Memory Challenge C
|
||||
|
||||
**Status**: Current level to beat: level 1
|
||||
|
||||
**Command to try**:
|
||||
```
|
||||
pytest -s tests/integration/challenges/memory/test_memory_challenge_c.py --level=2
|
||||
``
|
||||
|
||||
## Description
|
||||
|
||||
The agent, Follow-Instructions-GPT, has the following goals:
|
||||
|
||||
- Use the command `read_file` to read the `instructions_1.txt` file.
|
||||
- Follow the instructions in the `instructions_1.txt` file.
|
||||
|
||||
The challenge involves a series of text files containing instructions and silly phrases. The agent must follow the instructions and keep track of the task IDs as it progresses through the files.
|
||||
|
||||
## Files
|
||||
|
||||
- `instructions_1.txt`
|
||||
|
||||
The current phrase is "The purple elephant danced on a rainbow while eating a taco.".\n[NOISE intended to confuse the agent]
|
||||
Read the file instructions_2.txt using the read_file command.
|
||||
|
||||
|
||||
- `instructions_2.txt`
|
||||
|
||||
The current phrase is "The sneaky toaster stole my socks and ran away to Hawaii.".\n[NOISE intended to confuse the agent]
|
||||
Read the file instructions_3.txt using the read_file command.
|
||||
|
||||
|
||||
...and so on.
|
||||
|
||||
- `instructions_n.txt`
|
||||
|
||||
The current phrase is "My pet rock sings better than Beyoncé on Tuesdays."
|
||||
Write all the phrases into the file output.txt. The file has not been created yet. After that, use the task_complete command.
|
||||
|
||||
|
||||
## Objective
|
||||
|
||||
The objective of this challenge is to test the agent's ability to follow instructions and maintain memory of the task IDs throughout the process. The agent successfully completed this challenge if it wrote the phrases in a file.
|
||||
|
||||
This is presumably harder than task ids as the phrases are longer and more likely to be compressed as the agent does more work.
|
||||
5
docs/challenges/memory/introduction.md
Normal file
5
docs/challenges/memory/introduction.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Memory Challenges
|
||||
|
||||
Memory challenges are designed to test the ability of an AI agent, like Auto-GPT, to remember and use information throughout a series of tasks. These challenges often involve following instructions, processing text files, and keeping track of important data.
|
||||
|
||||
The goal of memory challenges is to improve an agent's performance in tasks that require remembering and using information over time. By addressing these challenges, we can enhance Auto-GPT's capabilities and make it more useful in real-world applications.
|
||||
14
docs/challenges/submit.md
Normal file
14
docs/challenges/submit.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# Submit a Challenge
|
||||
|
||||
If you have identified a task or problem that Auto-GPT struggles with, you can submit it as a challenge for the community to tackle. Here's how you can submit a new challenge:
|
||||
|
||||
## How to Submit a Challenge
|
||||
|
||||
1. Create a new `.md` file in the `challenges` directory in the Auto-GPT GitHub repository. Make sure to pick the right category.
|
||||
2. Name the file with a descriptive title for the challenge, using hyphens instead of spaces (e.g., `improve-context-understanding.md`).
|
||||
3. In the file, follow the [challenge_template.md](challenge_template.md) to describe the problem, define the scope, and evaluate success.
|
||||
4. Commit the file and create a pull request.
|
||||
|
||||
Once submitted, the community can review and discuss the challenge. If deemed appropriate, it will be added to the [List of Challenges](list.md).
|
||||
|
||||
If you're looking to contribute by working on an existing challenge, check out [Beat a Challenge](beat.md) for guidelines on how to get started.
|
||||
@@ -1,5 +1,5 @@
|
||||
# Auto-GPT
|
||||
|
||||
Welcome to Auto-GPT. Please follow the [Installation](https://significant-gravitas.github.io/Auto-GPT/setup/) guide to get started.
|
||||
Welcome to Auto-GPT. Please follow the [Installation](/setup/) guide to get started.
|
||||
|
||||
It is recommended to use a virtual machine for tasks that require high security measures to prevent any potential harm to the main computer's system and data.
|
||||
|
||||
@@ -23,7 +23,7 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt
|
||||
[openai/api limits]: https://platform.openai.com/docs/guides/rate-limits/overview#:~:text=Free%20trial%20users,RPM%0A40%2C000%20TPM
|
||||
|
||||
!!! important
|
||||
It's highly recommended that you keep keep track of your API costs on [the Usage page](https://platform.openai.com/account/usage).
|
||||
It's highly recommended that you keep track of your API costs on [the Usage page](https://platform.openai.com/account/usage).
|
||||
You can also set limits on how much you spend on [the Usage limits page](https://platform.openai.com/account/billing/limits).
|
||||
|
||||

|
||||
@@ -54,9 +54,15 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt
|
||||
environment:
|
||||
MEMORY_BACKEND: ${MEMORY_BACKEND:-redis}
|
||||
REDIS_HOST: ${REDIS_HOST:-redis}
|
||||
volumes:
|
||||
- ./:/app
|
||||
profiles: ["exclude-from-up"]
|
||||
volumes:
|
||||
- ./auto_gpt_workspace:/app/autogpt/auto_gpt_workspace
|
||||
- ./data:/app/data
|
||||
## allow auto-gpt to write logs to disk
|
||||
- ./logs:/app/logs
|
||||
## uncomment following lines if you have / want to make use of these files
|
||||
#- ./azure.yaml:/app/azure.yaml
|
||||
#- ./ai_settings.yaml:/app/ai_settings.yaml
|
||||
redis:
|
||||
image: "redis/redis-stack-server:latest"
|
||||
|
||||
@@ -150,7 +156,18 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt
|
||||
|
||||
### Run with Docker
|
||||
|
||||
Easiest is to use `docker-compose`. Run the commands below in your Auto-GPT folder.
|
||||
Easiest is to use `docker-compose`.
|
||||
|
||||
Important: Docker Compose version 1.29.0 or later is required to use version 3.9 of the Compose file format.
|
||||
You can check the version of Docker Compose installed on your system by running the following command:
|
||||
|
||||
docker-compose version
|
||||
|
||||
This will display the version of Docker Compose that is currently installed on your system.
|
||||
|
||||
If you need to upgrade Docker Compose to a newer version, you can follow the installation instructions in the Docker documentation: https://docs.docker.com/compose/install/
|
||||
|
||||
Once you have a recent version of docker-compose, run the commands below in your Auto-GPT folder.
|
||||
|
||||
1. Build the image. If you have pulled the image from Docker Hub, skip this step.
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ python -m pytest
|
||||
:::shell
|
||||
pytest --cov=autogpt --without-integration --without-slow-integration
|
||||
|
||||
## Runing the linter
|
||||
## Running the linter
|
||||
|
||||
This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting.
|
||||
We currently use the following rules: `E303,W293,W291,W292,E305,E231,E302`.
|
||||
|
||||
@@ -93,3 +93,13 @@ To print out debug logs:
|
||||
``` shell
|
||||
./run.sh --debug
|
||||
```
|
||||
|
||||
## Disabling Command Categories
|
||||
|
||||
If you want to selectively disable some command groups, you can use the `DISABLED_COMMAND_CATEGORIES` config in your `.env`. You can find the list of categories in your `.env.template`
|
||||
|
||||
For example, to disable coding related features, set it to the value below:
|
||||
|
||||
```ini
|
||||
DISABLED_COMMAND_CATEGORIES=autogpt.commands.analyze_code,autogpt.commands.execute_code,autogpt.commands.git_operations,autogpt.commands.improve_code,autogpt.commands.write_tests
|
||||
```
|
||||
|
||||
16
mkdocs.yml
16
mkdocs.yml
@@ -1,5 +1,5 @@
|
||||
site_name: Auto-GPT
|
||||
site_url: https://significantgravitas.github.io/Auto-GPT/
|
||||
site_url: https://docs.agpt.co/
|
||||
repo_url: https://github.com/Significant-Gravitas/Auto-GPT
|
||||
nav:
|
||||
- Home: index.md
|
||||
@@ -17,6 +17,20 @@ nav:
|
||||
- Running tests: testing.md
|
||||
- Code of Conduct: code-of-conduct.md
|
||||
|
||||
- Challenges:
|
||||
- Introduction: challenges/introduction.md
|
||||
- List of Challenges:
|
||||
- Memory:
|
||||
- Introduction: challenges/memory/introduction.md
|
||||
- Memory Challenge A: challenges/memory/challenge_a.md
|
||||
- Memory Challenge B: challenges/memory/challenge_b.md
|
||||
- Memory Challenge C: challenges/memory/challenge_c.md
|
||||
- Information retrieval:
|
||||
- Introduction: challenges/information_retrieval/introduction.md
|
||||
- Information Retrieval Challenge A: challenges/information_retrieval/challenge_a.md
|
||||
- Submit a Challenge: challenges/submit.md
|
||||
- Beat a Challenge: challenges/beat.md
|
||||
|
||||
- License: https://github.com/Significant-Gravitas/Auto-GPT/blob/master/LICENSE
|
||||
|
||||
theme: readthedocs
|
||||
|
||||
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
||||
|
||||
[project]
|
||||
name = "agpt"
|
||||
version = "0.2.2"
|
||||
version = "0.3.0"
|
||||
authors = [
|
||||
{ name="Torantulino", email="support@agpt.co" },
|
||||
]
|
||||
|
||||
@@ -10,17 +10,18 @@ requests
|
||||
tiktoken==0.3.3
|
||||
gTTS==2.3.1
|
||||
docker
|
||||
duckduckgo-search
|
||||
duckduckgo-search>=2.9.5
|
||||
google-api-python-client #(https://developers.google.com/custom-search/v1/overview)
|
||||
pinecone-client==2.2.1
|
||||
redis
|
||||
orjson
|
||||
orjson==3.8.10
|
||||
Pillow
|
||||
selenium==4.1.4
|
||||
webdriver-manager
|
||||
jsonschema
|
||||
tweepy
|
||||
click
|
||||
charset-normalizer>=3.1.0
|
||||
spacy>=3.0.0,<4.0.0
|
||||
en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.5.0/en_core_web_sm-3.5.0-py3-none-any.whl
|
||||
|
||||
@@ -52,3 +53,4 @@ pytest-integration
|
||||
pytest-mock
|
||||
vcrpy
|
||||
pytest-recording
|
||||
pytest-xdist
|
||||
|
||||
24
run.bat
24
run.bat
@@ -1,8 +1,24 @@
|
||||
@echo off
|
||||
python scripts/check_requirements.py requirements.txt
|
||||
setlocal enabledelayedexpansion
|
||||
|
||||
:FindPythonCommand
|
||||
for %%A in (python python3) do (
|
||||
where /Q %%A
|
||||
if !errorlevel! EQU 0 (
|
||||
set "PYTHON_CMD=%%A"
|
||||
goto :Found
|
||||
)
|
||||
)
|
||||
|
||||
echo Python not found. Please install Python.
|
||||
pause
|
||||
exit /B 1
|
||||
|
||||
:Found
|
||||
%PYTHON_CMD% scripts/check_requirements.py requirements.txt
|
||||
if errorlevel 1 (
|
||||
echo Installing missing packages...
|
||||
pip install -r requirements.txt
|
||||
%PYTHON_CMD% -m pip install -r requirements.txt
|
||||
)
|
||||
python -m autogpt %*
|
||||
pause
|
||||
%PYTHON_CMD% -m autogpt %*
|
||||
pause
|
||||
24
run.sh
24
run.sh
@@ -1,9 +1,25 @@
|
||||
#!/bin/bash
|
||||
python scripts/check_requirements.py requirements.txt
|
||||
|
||||
function find_python_command() {
|
||||
if command -v python &> /dev/null
|
||||
then
|
||||
echo "python"
|
||||
elif command -v python3 &> /dev/null
|
||||
then
|
||||
echo "python3"
|
||||
else
|
||||
echo "Python not found. Please install Python."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
PYTHON_CMD=$(find_python_command)
|
||||
|
||||
$PYTHON_CMD scripts/check_requirements.py requirements.txt
|
||||
if [ $? -eq 1 ]
|
||||
then
|
||||
echo Installing missing packages...
|
||||
pip install -r requirements.txt
|
||||
$PYTHON_CMD -m pip install -r requirements.txt
|
||||
fi
|
||||
python -m autogpt $@
|
||||
read -p "Press any key to continue..."
|
||||
$PYTHON_CMD -m autogpt $@
|
||||
read -p "Press any key to continue..."
|
||||
@@ -11,15 +11,19 @@ def main():
|
||||
line.strip().split("#")[0].strip() for line in f.readlines()
|
||||
]
|
||||
|
||||
installed_packages = [package.key for package in pkg_resources.working_set]
|
||||
installed_packages = {pkg.key: pkg.version for pkg in pkg_resources.working_set}
|
||||
|
||||
missing_packages = []
|
||||
for package in required_packages:
|
||||
if not package: # Skip empty lines
|
||||
for required_package in required_packages:
|
||||
if not required_package: # Skip empty lines
|
||||
continue
|
||||
package_name = re.split("[<>=@ ]+", package.strip())[0]
|
||||
if package_name.lower() not in installed_packages:
|
||||
missing_packages.append(package_name)
|
||||
pkg = pkg_resources.Requirement.parse(required_package)
|
||||
if (
|
||||
pkg.key not in installed_packages
|
||||
or pkg_resources.parse_version(installed_packages[pkg.key])
|
||||
not in pkg.specifier
|
||||
):
|
||||
missing_packages.append(str(pkg))
|
||||
|
||||
if missing_packages:
|
||||
print("Missing packages:")
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
sys.path.append(os.path.abspath("../scripts"))
|
||||
|
||||
from browse import extract_hyperlinks
|
||||
|
||||
|
||||
class TestBrowseLinks(unittest.TestCase):
|
||||
"""Unit tests for the browse module functions that extract hyperlinks."""
|
||||
|
||||
def test_extract_hyperlinks(self):
|
||||
"""Test the extract_hyperlinks function with a simple HTML body."""
|
||||
body = """
|
||||
<body>
|
||||
<a href="https://google.com">Google</a>
|
||||
<a href="foo.html">Foo</a>
|
||||
<div>Some other crap</div>
|
||||
</body>
|
||||
"""
|
||||
soup = BeautifulSoup(body, "html.parser")
|
||||
links = extract_hyperlinks(soup, "http://example.com")
|
||||
self.assertEqual(
|
||||
links,
|
||||
[("Google", "https://google.com"), ("Foo", "http://example.com/foo.html")],
|
||||
)
|
||||
@@ -1,3 +1,4 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
@@ -9,6 +10,8 @@ from autogpt.workspace import Workspace
|
||||
|
||||
pytest_plugins = ["tests.integration.agent_factory"]
|
||||
|
||||
PROXY = os.environ.get("PROXY")
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def workspace_root(tmp_path: Path) -> Path:
|
||||
|
||||
@@ -3,7 +3,7 @@ import pytest
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.command import CommandRegistry
|
||||
from autogpt.config import AIConfig, Config
|
||||
from autogpt.memory import NoMemory, get_memory
|
||||
from autogpt.memory import LocalCache, NoMemory, get_memory
|
||||
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
|
||||
from autogpt.workspace import Workspace
|
||||
|
||||
@@ -19,6 +19,16 @@ def agent_test_config(config: Config):
|
||||
config.set_temperature(was_temperature)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def memory_local_cache(agent_test_config: Config):
|
||||
was_memory_backend = agent_test_config.memory_backend
|
||||
|
||||
agent_test_config.set_memory_backend("local_cache")
|
||||
yield get_memory(agent_test_config, init=True)
|
||||
|
||||
agent_test_config.set_memory_backend(was_memory_backend)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def memory_none(agent_test_config: Config):
|
||||
was_memory_backend = agent_test_config.memory_backend
|
||||
@@ -35,6 +45,7 @@ def browser_agent(agent_test_config, memory_none: NoMemory, workspace: Workspace
|
||||
command_registry.import_commands("autogpt.commands.file_operations")
|
||||
command_registry.import_commands("autogpt.commands.web_selenium")
|
||||
command_registry.import_commands("autogpt.app")
|
||||
command_registry.import_commands("autogpt.commands.task_statuses")
|
||||
|
||||
ai_config = AIConfig(
|
||||
ai_name="browse_website-GPT",
|
||||
@@ -70,6 +81,7 @@ def writer_agent(agent_test_config, memory_none: NoMemory, workspace: Workspace)
|
||||
command_registry = CommandRegistry()
|
||||
command_registry.import_commands("autogpt.commands.file_operations")
|
||||
command_registry.import_commands("autogpt.app")
|
||||
command_registry.import_commands("autogpt.commands.task_statuses")
|
||||
|
||||
ai_config = AIConfig(
|
||||
ai_name="write_to_file-GPT",
|
||||
@@ -101,3 +113,106 @@ def writer_agent(agent_test_config, memory_none: NoMemory, workspace: Workspace)
|
||||
)
|
||||
|
||||
return agent
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def memory_management_agent(
|
||||
agent_test_config, memory_local_cache, workspace: Workspace
|
||||
):
|
||||
command_registry = CommandRegistry()
|
||||
command_registry.import_commands("autogpt.commands.file_operations")
|
||||
command_registry.import_commands("autogpt.app")
|
||||
command_registry.import_commands("autogpt.commands.task_statuses")
|
||||
|
||||
ai_config = AIConfig(
|
||||
ai_name="Follow-Instructions-GPT",
|
||||
ai_role="an AI designed to read the instructions_1.txt file using the read_file method and follow the instructions in the file.",
|
||||
ai_goals=[
|
||||
"Use the command read_file to read the instructions_1.txt file",
|
||||
"Follow the instructions in the instructions_1.txt file",
|
||||
],
|
||||
)
|
||||
ai_config.command_registry = command_registry
|
||||
|
||||
system_prompt = ai_config.construct_full_prompt()
|
||||
|
||||
agent = Agent(
|
||||
ai_name="",
|
||||
memory=memory_local_cache,
|
||||
full_message_history=[],
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
next_action_count=0,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
workspace_directory=workspace.root,
|
||||
)
|
||||
|
||||
return agent
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def get_company_revenue_agent(
|
||||
agent_test_config, memory_local_cache, workspace: Workspace
|
||||
):
|
||||
command_registry = CommandRegistry()
|
||||
command_registry.import_commands("autogpt.commands.file_operations")
|
||||
command_registry.import_commands("autogpt.app")
|
||||
|
||||
ai_config = AIConfig(
|
||||
ai_name="Get-CompanyRevenue",
|
||||
ai_role="an autonomous agent that specializes in finding the reported revenue of a company.",
|
||||
ai_goals=[
|
||||
"Write the revenue of Tesla in 2022 to a file. You should write the number without commas and you should not use signs like B for billion and M for million.",
|
||||
],
|
||||
)
|
||||
ai_config.command_registry = command_registry
|
||||
|
||||
system_prompt = ai_config.construct_full_prompt()
|
||||
Config().set_continuous_mode(False)
|
||||
agent = Agent(
|
||||
ai_name="Get-CompanyRevenue",
|
||||
memory=memory_local_cache,
|
||||
full_message_history=[],
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
next_action_count=0,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
workspace_directory=workspace.root,
|
||||
)
|
||||
|
||||
return agent
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def kubernetes_agent(memory_local_cache, workspace: Workspace):
|
||||
command_registry = CommandRegistry()
|
||||
command_registry.import_commands("autogpt.commands.file_operations")
|
||||
command_registry.import_commands("autogpt.app")
|
||||
|
||||
ai_config = AIConfig(
|
||||
ai_name="Kubernetes",
|
||||
ai_role="an autonomous agent that specializes in creating Kubernetes deployment templates.",
|
||||
ai_goals=[
|
||||
"Write a simple kubernetes deployment file and save it as a kube.yaml.",
|
||||
# You should make a simple nginx web server that uses docker and exposes the port 80.
|
||||
],
|
||||
)
|
||||
ai_config.command_registry = command_registry
|
||||
|
||||
system_prompt = ai_config.construct_full_prompt()
|
||||
Config().set_continuous_mode(False)
|
||||
agent = Agent(
|
||||
ai_name="Kubernetes-Demo",
|
||||
memory=memory_local_cache,
|
||||
full_message_history=[],
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
next_action_count=0,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
workspace_directory=workspace.root,
|
||||
)
|
||||
|
||||
return agent
|
||||
|
||||
@@ -0,0 +1,168 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"input": [[1985]], "model": "text-embedding-ada-002", "encoding_format":
|
||||
"base64"}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '83'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/embeddings
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SaSxO6Orvl5++n2LWn9FsiIgl7xl3kkiAgYldXlyAiKHJNgJw6371L/6dOd08c
|
||||
QAqV5HnWWr/kP/71119/t1ld5NPf//z197sap7//x/fa/Tbd/v7nr//5r7/++uuv//h9/n8jiyYr
|
||||
7vfqU/6G/25Wn3ux/P3PX/x/X/m/g/756++DcanIKGZXd/X5SZTuBpdQW6+lYQqkfQ5NFN8pSgIw
|
||||
zBGFBLyF+0x9ZATRzFVcC+xx3aJPfw3cJaBFA2RnibBlW4EuBNKhlLJ5dbH/QRJYDniFELe0Ryw1
|
||||
zy4l2M8lVX4IVLvccrDkD26WikG50PuxCQFdnkwEiSTcsFJvgcv2m6mB50DfUpu5FLAPV1ZQMZ8b
|
||||
IvW7azZ//KsHo/R9nYA/OPXa3M0WXkbEI3Dus2z9lNsEjEngUv+4PdWrN6EKdpdVoMba1vqqHZkH
|
||||
b+fNG4mbTQRmWGsFxIdzQM3Kfkbt1AUcBI3doc1xk9ZLJVoC3ClDSm3yBtl4AC8Bvk47CzunCbFl
|
||||
aqkGQtGR0A74sT4HY8DDq8puaE3xHC1C9H7BD749CHUSFM03CxuwDd2YQD5eskks+B4idSLUZd1V
|
||||
n+fOt2FzPT2pkptdNtu6EQD6Cg2sRFSr1/NdKvcmSu74EM8ioDerUyT9vHfIWElpvUwkz0F2c2e0
|
||||
vXpNNvuXsQe81EdofRp6LfDVTYMNUUx6PtQEjL//y79kHiuu1mRzc3xWsDg6TwQepu6OOhkEae88
|
||||
TKzIbykanVhZ5SOLI3zWZCmjogx6OCXDhAT9yuoulKYevMWdRl2+GOtu9W6a9CHZBxvBramHc8Up
|
||||
MNsImCJ0jBgThDiA13A6U8UU5IxJ8keQGG8I2At5RecPahjD4yJBrGZgAl2aDgjGwQth47BVXR5l
|
||||
eQyPQVnRMISPjMUvNgNJeRm4GC/BsMh0SsDUKRt6PB+LelHlIJCl9SYjYfewsjbe5Rb8MNvDONKd
|
||||
gclhbEAMywgba6u762TdAkjVIkJrd3Pc9antBBgEgUIEXZPZdDzHL/hCTxEtOGkHtg5XBQiuvsUe
|
||||
DR76a+oCCDe720rWtGncxT69JHi49BTbp/jo8vxe5mEjlx02733NVuNIb4BDao0PgNnDnOWLBoNH
|
||||
YlB8ebb6ulyVUS5at8MW8c5DeVb2IdQv8RN7R9F02VEWRxBM2Yka77HVZzqLMbwdpBSJVyNzRymr
|
||||
rY2zKwusPUQpIrNlFYC+AgOHmmCxbfbkS3mMeR47t0UB/ImJHvBP64C4GV/rebiFCKKw66h78mRA
|
||||
twSKwOJ5DtvXng4ru62tlPsgx+63Hpep/Sgwm2cXH55jxNZzfL4BkkeYHPqNMVBkCSFkaL8isbq4
|
||||
gPmml0AnmldsPYNbvYiZBwEcZhcHqV9my/bSWVDZRwOaUSyxWs4WT17k15keClcZmCRTHua8lmGn
|
||||
2QTR9CDbEtabDUaSqLHoMsfUkgoo+WRNL5+oc6ruJqnc3qeeHIwRldNjBXb81aCHeE7ZOnKrAMs6
|
||||
JeRZtn00qXIQwho5Cek+J21Y5/NJg547drRQwStaLreWg+M2irB7ZM96em4kDSiX65bsDjAeRv5o
|
||||
FxDt+JEqUVmzWUkwhD23rthXls0wLRvTAWgnjGhV9/nQ77ZKLpUhxNTYmEs0LU5pyNKTK4gEBjmb
|
||||
9N1DA/zhpmPNutz0JdZ8A3Kq4OOD8HnUtBma8M99fXOpaxLfHF56VUxFwtozfS10vYc5jhl671zD
|
||||
7ayTOErIrC4EGB9xmPbVK4aBW2dorrdAXwN9HGExwJCikPu4zMx2BF6OmU6k0DCGuXWjXLKi/EVj
|
||||
6cPYlD+EGSr704CPt/NnWBRijjBqDzLaXMIuYrvNkgIm5DYRWWXWs7XbOlC0e4/aBlcNzNn0PcyX
|
||||
4YiA8UmHlRVVDBsOKvQU8Za+RmkqgcwZPIy2xgiWNNzOsBncC9Wt/D6Q7a0RwFsKrlhP44ixuK1z
|
||||
mJ9QT93w2LqzFJocrO+tRA9G1NdLW18R7N2qIuLuRoalfTwrYMvjTOZvf1qorhBgHHuB6tK+cef9
|
||||
dXFgBIUnqTt7x2b1/kkgi/oLavuDDKZdJdrSSqWeatz5zOa38EHAy7BPzYDYYAG9ToDs9yrhv/O7
|
||||
3kQ7Br4WHLD71bdOeCgF5OhVRnJYTWDmzWCG7gXK2MxhmlH95s9gPrsaPvrPul6GduWhpZUdPp5a
|
||||
N2PcoxThd/6pJrx1sN5EJYGnk8OhrdsLw7AUhgfdNssRF36WqOtJGUDH9I5Ym54LYx6gDjjipiH8
|
||||
GzRgILEkwqnjJHqA2qGeuYfFwfO6a5Akth82c+RUwlZrZ3o+eHt3GRvZAiS7xPiwOw86I0WqASsq
|
||||
XiiYHiqYL1AeAa34CTufxHKX61vUwIDHiR6bOM3owxV4MCXdhM3g4ej8RRs8OBlsR80VvrMlLdIW
|
||||
3nfBgJXJ9tz3tXReEIKAYteXjtHylLwGevfXA59j+VMvt9vZhtKaythJKk0XnCFuwFefscdLJhtP
|
||||
bPZkJd16JMolO2PGpChw674Q4QXqsDnzm1y69MuduvxWi3jyiRA8bz2Rekfxra/2W4JwSk0DR8G5
|
||||
1ufr6Qkh+qQ+eX/1tputOYUtvzHJOnU2W80EImmTcBHa8Ks0LPYlTOC2O26o/yjs73qiyR9/5CVW
|
||||
XM/LPJbgFFUB2QUiZatq6RUsJPVB5u/7FOIks+HhaliIPWSZjUXjJHB95C69U3DRp+zJV0BYbJ6c
|
||||
0rAG7KffB6kosW/K7bCcmBVAk5Idtk/1h43h6CuSu79cUNTZF0DuYPSAWRkOmWt5dafuPqxwDdID
|
||||
RuVRdZmcuCvciMlEXSW9slV1Zw74/rPCSF6O7uortgAJ6Z9oEndrNKLXksPMs2qqHhRNHxkALyhe
|
||||
vC1GZr4b5lrqCYyO1pZaqcTrHXgVAbwcNiN267qsV2z2OZzsV0ldheJh1nrowBRr7/96fxetRvBU
|
||||
azySmk2Qje1JEeHWbRD1YL9j1FPzFiaf2id9EunZ+pATGy59ZKNdIGLwez5wwnOGjbJ86Gy6lTG0
|
||||
3vHtT723vJnOcCPGE3a//WnZVbMDY+7IYeWtGtkCWMtDn/VP9OaAoDMuSBLAKamAtdi26+UdVCv0
|
||||
nxHGB0/P63arZ8p+P8aAjKfdFgxTIvYSSooQqy0NdRbWqgDezcajWno5RLO5X0W407WeOob7yGbX
|
||||
OzcSnYH01e8SzHSeEyjPUkidgdcH5sqjBEs+7mgx4339HtpVgKfH1FBNSEyXP1qAg+vmiqj9XHYZ
|
||||
M7o0gWP4qAhI1MWdw9HUYErUiXqqvrB5TIpv/QoT1qT7yBbnagRAfUgmthscg4UF6igp/aTR42ab
|
||||
DvOYezYYNchh/bW/6qPJm7lUJCeGkjXjs7EWmAIN1/Cxsk+ygcwukCCIA4aNoHtHdMCnAOyPTwfj
|
||||
G8uHpRh8Afh+XSGiOfXPn/HAj5CGBKa2jKJE4aExFh+qtJH4/X1eCEP9HpE9pYrO25u4grz+vmHn
|
||||
5o4ZScclgGVVMNTZgLqtsWYljPJUo97xibOlPro3aXEDhUbcbR+tGZgVyNXeh6r5rEbjKh17OJ+P
|
||||
GrX7gg0lW4sc7PjMoLb9GdksWoIEHTUryV5528OuhwUH6xzX1ASdWn/rPQbVkLwRj/kFLP6wjlB7
|
||||
4hrJqDfA1m1OhmQJwZlG5XrK/ughamObmvrwZrPjDC0wN35GcQsUIDiLpsHwfD5SDfcO4B2nbqF0
|
||||
0mqql+sSMRSebHghLwsHXVC79OfHp1ueYPu+J1EHl9oCxFsHqj9Tk9EPM3t4etAGH+73rT7LzeqA
|
||||
0UjP3/mANT2MPJKm4XbCasRb7uI3fgjuZ/FEHfG0uuNmjGYQcy6HXpL6yKbaPdrwlzd1IWuHpZxp
|
||||
DrFcINJ882XHrLEBh0tLv/mnGlbFPoYgDcQc65L7za9bQ4GOESJqQU8DW+tc3sCoPj/Y3Yk70FWH
|
||||
aw4DeTeTfWVe69mSqwamD67E5tfv7OxeKCC+kopsK/uZLbOcIVDySUd21/eoz3TQJCg81itWlxxF
|
||||
8x4VIQzoWiNxF16GOS9bAukzS4i8v1tgvVw3CkRvTsAq92n1dc3nVN5fNx15g04dhL0g3eDY2io1
|
||||
Y/kzrKNRxrK6hCV2HU3Jfs8D4oGcsKY9Fn1BtL8BdFYAdeAxc8k3/8GnlgmoV/rRnRtzEX71RK83
|
||||
q/35j1jKK/tNfdS/wHxvFQu8zWYlUXDW9bksHg7sbo6N3SRt2PrJsARffL9DrDcrdyBp38DPfCoQ
|
||||
f38r2Swmai5/x1Nlu22jKaDFC7KovRDh5FTunBp3WwqCUKEqunLR3KVlKC+39YX4aUjBKD+NEX7X
|
||||
P1r22UOfYzbeoDffTtS5uV7GErZW0h+9KQJDX0JBhkDfLQo1N09nmL/5Bhiu5VPjmqjD7tUYLSik
|
||||
EpA6ZsqwO2VVDlKiT+h1KIaoS4ugBd/5or/1O3qBGcD0yV/oY5tifW0rGoKNadyRYFuBy0javwD4
|
||||
NMo3f94zFtZH/ud/aELW/pvXRgluwfP0x68vh2tg/PG/1tpHOvXUuP/lETKpQslI5AkatGqgI5k/
|
||||
3cGiawuE9VPSyfztZ+xZ1go8v8cYeyipavbzb/GjD6minXuXFLrbg7g01m8+7Fxat9oN4g+j1AIX
|
||||
nH3rrwVj66gYJY9b1L8uLw1WtpqSLd6fM4YsLtj7fJNjJMIzWPL3ywMavozUupb20AfSoYILtDn6
|
||||
84vELv0S5kpIkbjrpuwPfwmGjYQku8uBcFtvEiSkfRIQHludJa8PhEuyAei1trU708ER4SSHB3zU
|
||||
W06fV9Ks8BxeD1SP0rSe57UP4VooPFbf1gHM6LUUcvp4GGT51gfbA1mE9f1YYLuRlWi9kgGB4C5O
|
||||
9GAvtvu5yjoPz5pYY4zugst2+tOBumSH1LxHyJ0jyo9wauua7O3XNVtMO2tg7+V3IontAcxD/Jqh
|
||||
ubwIvbc6ijrTqCXotpJIFc5FbHYvcw4vnNFj5z5u6mWWIw8qkt/Qg3cLszXnPR7IhVVS1XluMxI/
|
||||
xBBAKeapW5ApY5301mBNDlck2h8PrMn7JIJFbs7UiJ8kG03B9cDxWdzx8aPVbOVUmUBouguxfbPP
|
||||
mKjWNxhGYEWQj0/ZrJOa/62fnz7U6zPUUjgEbfdnfuf8VDgwy94HrMe+566L/+bhKPgIl7zI6U8h
|
||||
mhrw3oHDL78z9s3nIqGvHDvGEjG25ftY+vYDrGsvvp6PKioBMFaMxML29UXfXRTw43nuLaBgvb6R
|
||||
Bb/8B9FI74f5+RbKXx6jaJtVQ0f3Q7k/12cNB3nc6lRO1QoWAxeixi1BtPiNGUL+teGpbQvD0Pey
|
||||
IoLp824JTO59NpNJ6SFVFEjawWh1VlbtDF/K3qUG94CMvqdRA6p7fP78l7twyyPYJ3ZlYr+ePjW5
|
||||
vi0L1iVnozJc02hOHa2Rv36ILOUprpcj/tjw+rY+2Dm/d4xOJ92Tf/xJfTAB0N96JpxrUttQ02ht
|
||||
xgDJ19gZ8c//rBN7QMDrJkedOVSzMWzPMdyOyoT2xn2pxy9fg21cNtP+vHPcHnGuBiEkFlW2mDHm
|
||||
OfsZdpYAsMKLhT5eT08OjALpCfzsNLaquO1hcZgcwhwJuIvbXA1gb5wDko4XO1vEArbAvXAyteFh
|
||||
cNnpLGvQng2dPkprqic+WlJZGe0r2fNmw358FBRJxMii6Zcf38x/+QXndfvSf/4QvJ6XHmvRvhtY
|
||||
4D0k6W7A5Ntv+2H83oe2m2Y0g9D55rVAg/0oQiKxswB6Im0SiWxMheL9Z2LdMPYV+OWH42mjusTM
|
||||
ux7MZltgMy0k1ke58gLf+aFq7nhgTjiQ/3nf2mVZh+HKGgIDeTtj7CQkI6qSWFAUP5i8u+d9oD1M
|
||||
uP03X9JDFdpRb5XXGG4dZ0fN4NG7bMcXJehasqBebD9gXiW1lW9pvKO6ewrAt7+uwB1Ch2rlNAHW
|
||||
8bsGfip0Ii/8Zjq7bo4N+PJq+s1/YHs+vnK4+YjoD+9dvFeWAF0cAT7l12Egx3qL4ErFnhpxiIf9
|
||||
GV9FuM5Cg60mr4cF7oIS6s3WJFs327G1GVMEI3nOqbbfpdGojXsR7urHEYlfXracuM4B37z8zW9P
|
||||
0FOtWIF9D05fvnZnzO65HIIXDfHXr331UirBTn1H1OCEsP7pgXQqh4rI85Vn6+/9LE5ikr3/rIcv
|
||||
b7Z+/AEf4aq7c+ebKQDGjIkgVLk+7697B2qCpJGBuqCeOHbWwEswEI6mZpuRn14GhG/JJtKd+udX
|
||||
4I9H+8dmZfP97At/+MzmxuAw4c/Og/sxAdTyzoQxJeBz8PE39pdv7/W3U3UpLDUlxt4NfMD4q9d3
|
||||
lxT4y1N0iryBQLe95tRNPu9h0TYOAvBS6/T40XS2rHYfSycGKbVhcmRbAroQLgN5YG+gA+sfFkhg
|
||||
2PYJEoT9AkZwfcdgE79V6pq+r/M6fwqg3qEL9peDEs1uqCtw95JK8tRufs1AUNnAOJxe1Do5mr47
|
||||
inog/fR9zmPb/fILBRKXn7G3T/WMbu61DS89uyMAETeQjU57sG4yRIZZoe46n68KtNj1g621Z+54
|
||||
VK0S2l6c4JsalD+9QeCb12mmcChbrdsg/vIZxpxfRayMCg++L0WLoDFCl11lXQDvs5lQnz/J7Mej
|
||||
gZvUzc+/RuyRqSncZ1eFfv25y3/rWT6cT3eMkiCrd+mIE7BjoMX6+/OqWXWTX/CrF4QT4ZbNh+3m
|
||||
9ocXWf1QZXMfbC3ocXBPvTPl2Ki3XAo43q7oob/OP55syGopqPSX10lTAQPoUS1SWynf7penoD9+
|
||||
6dSdrXoXk1SERg71n37UfSY4DfzuT2C9I162Oz1CBxo599O/dlhF5hB4E5cEF8MCAG2WbIR5sb38
|
||||
/HJEj2pbwN4r7ljV9B2YuQeCkmJtEsRzjxysxZ4UkJ6klSoXdacT64RmcNw8O3pQjIFR7TlDuM2s
|
||||
FStqumEL3KUV7EygIMsbXwPtD3tJEh6BjP0xnPSfvsBbAkpSfCIuY0HoJ0AXLh2av3x6Fpw1he5B
|
||||
1bC2zPMwvgXqwaOPY4pbfxvN8f4kyE8zeFFzIyqR8Ov/H1+2qdL2ZOgVWw3+jDfubyXaYWfiwIdc
|
||||
P9SrpHR4PbeqAdqoGagenHWXLP7E//whPpyTKWNfvyh/88mf/YWV2h4C1s4LsDMgqtP4YSXQo/mZ
|
||||
BhMph13Udx7cbZMNgS/ryabuo4jwiFyMcXRTsuWbp6BUPg/kQ1EdLQFNGrgxrTs2pE8E+tsaSvAy
|
||||
te6Pp4AJLoMB9XiDsdtMhb5uu30KN9GxR2y6JMP84rcB1IVzh7oqx9my9ZAnhfvXif78ycKUjQPS
|
||||
zb7+7S8w8kQ3AkVJawjvbIp6XuUlgbzURmQP19plV9aMsBssHZtA93Xy88O/vK+ViaiP0r0U5K8f
|
||||
psahcKNZcKQbbFwUY6x2oT6/KBbAb/9HxyWuZ+mZVjByKo/65oV3R/zwNPjjZUyITsMo908e3LVD
|
||||
S/g4pDWTkOj94Y+XdHOpZ16wbLgRhSOST/CarSMnCWDevp/426/An/xvc+1Av+uVzXIj2dI+Fy5Y
|
||||
iY7IpZddj8D+qhK0+er7aqR7CAso+kRsmr6epQu7yTW2Ttj/wCaaP5z1gostEqpc0hHQIuo18OXZ
|
||||
ZJGO/cDsXsjB8T4w0hqqGHV0k3I//4rV6D5n9P1CEjjs9SO27mrGGA6iFGZw80Q722U1WY6cLe2U
|
||||
LsV2RdRhXvkcwfAyZjTcbnBGfzx4t26m335a9NvvBV8egY+k58A01GYKv/yY+rMj6N3LA9ZPn7AD
|
||||
d80wbMWbAr/9AvuqoLBp3r1n+M1zROhj7+tPrQpu1BNP3nZaRUtyaEZ4xK+G/Pj1ck2mAv78uTek
|
||||
gt5utrMo/fTLFHiSrVn/GeUfn4y/ej0XVdjCe5xLGD0kTWeKb8ywAsz4+jPObVM8a/CsSTVG/srp
|
||||
0zBWJfyUq0Qkv/m4q+ynrz/1AGrPYaszxC8Q5I8EzdrZ0dmLditYrhcfW5X/YqOPDhW8XVf05XOb
|
||||
aC3sxYITjD2qLM1xYCu1R2guDUFSv9tnP14n9/dbivFxkw5ffbaBwCqLKi/j5tIHbsr9ZVOpWHck
|
||||
oLPrPnZk8zsT6BNx0dgdzyOA1tum+uodwWrdakn8+3cq4D//9ddf/+t3wqBp78X7ezBgKpbp3/99
|
||||
VODft/vt3zwv/JsKf04ikPFWFn//81+HEP7uhrbppv89ta/iM/79z1/bP6cN/p7a6fb+fy7/6/td
|
||||
//mv/wMAAP//AwDOXgQl4SAAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c09bf823fb50b70-AMS
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 01 May 2023 17:29:41 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-organization:
|
||||
- user-kd1j0bcill5flig1m29wdaof
|
||||
openai-processing-ms:
|
||||
- '69'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '2999'
|
||||
x-ratelimit-reset-requests:
|
||||
- 20ms
|
||||
x-request-id:
|
||||
- 555d4ffdb6ceac9f62f60bb64d87170d
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because it is too large
Load Diff
@@ -497,4 +497,501 @@ interactions:
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"input": [[18031, 1495, 220, 16]], "model": "text-embedding-ada-002",
|
||||
"encoding_format": "base64"}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '99'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/embeddings
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SaS6+zOrel+9+v2Nrd1KcQQvDM7nELECB2EnItlUpACOEWrjbgo/PfS+Q9OlXV
|
||||
WQ0WS3jZnmM+Y9j/8a+//vq7CrM46v/+56+/i7Tr//4f87NX0Ad///PX//zXX3/99dd//H7+f2/G
|
||||
ZRi/Xuk3+b3++2X6fcXj3//8Jfz3k//70j9//b0V0oz2TVyG/Rt/ZNCN/sr2Me7CatyjC9C+y5mX
|
||||
y43OQnuxAIvnCnstoOdMy07C9hEVhJkt2odNlyJNVtPEIEoy1f743G1jZLXFlxxOSA8nvUg1eOgL
|
||||
D6/UpEFVYO8qqKnns11X2uEU90xB2Ft4zPyMSGcv90Ihft0eeDxtwR3erNbgvRZXmEq31O0M8w5I
|
||||
JkRhxjHIEe8+Y7d9RWbIdlbj+NPo3A2Quk4le1l0UBc5bQCvLtjjplK3qPWm5xE1kkGYHTeWz+3O
|
||||
yWFydYrlGuxwuiw+JoTHRcZ2TVRl6Td8OhAk2oUYtvvRJ+kSd4jcHcRUPbfCro6bBnxjonSpJE04
|
||||
tCqWUXROMbOc3spYFY0ShI+FQry1v8jGYv2IUOccQ+YIx2M4Tu7FRAGx9ng7VPeQuyf7CJcSZKJc
|
||||
4jCkyzhQQLqEiLkv7x32RskqSDaPhOzS85ixM9I0VHmKRZ6SmIaDH0kNdHvoyQGg8CfxNFzg2wYt
|
||||
sfB0Caf2k9sw+ReZWXe15MMldkVkyreCkbF1EdWN6w1sj+gMq2qC+nCfmtsXdEDw/rjiUzLYkYxF
|
||||
3GGBWY9sqCK7hGp1/hLvEqUtX/tPE/L11SRe3GLe093uAfhauhSx4hSO6Gk04G/ZG6/XdNIn7g3J
|
||||
dp5fpmwDi0+X+JujJOFftrdKMRyXb/8GYV2UdHVdqLoYWE8JTvtaJkZQn3Q+uSiF9zfV5vlC4bgR
|
||||
8w7sbpWzgzC926HcVBWoaikTbJk5olZTi0BisSE7NWn4KBC7g2TcXdkeENG5nlcUkmo0iKkbj5Cr
|
||||
KTkix7oBXlzflc/Kpw1QqkTDqA6VbHSPiQif9H5m1nlN+HjX3x36jU87RZnLyfr1AMaOhLz7j8BH
|
||||
fE0aeFzyHi8sU3RHvbjEEPeLDVGOzyWaKmhsKJ/ZmR0ih3C+eD06cEfxQtSpydGkfrYBaK6tsVhV
|
||||
Fc73/XWA9Vk0yb56We3gHO8lJMmQ4rpLM7c5+JcBmnd0ZF61vPHaYZEBBCdA9M/lGw5xr5+XU7uQ
|
||||
fvvRr4aqOUN2WXvEWJ0sfdUohxy2vqQRUkeV2x8fWgfJ8bVhujfmYf2muYbe30RjhH22s36sG/hm
|
||||
jz3Trh/G+3qbxZs+j3Z46M0NYqwcE5CxfGAEraKMZivlCNwo15jN9UPH5lGCflV12i43Q8bHhk5w
|
||||
501M5Wx68knTzQgu8QWRpziysJNW7gLtd+OJ4ckq+dS8NQPOYebP+mCgIe7dMyruyy87VDHT6WlT
|
||||
5CCevpgQLTmj+pHnDxQeISNeuVrr1Uk+drJbe1fiFjfR791RtGXnXjXE2Nx8LsjrD90GZLfHWyIe
|
||||
/eEbjBTmeifa9UMQx6KGwZFOLp3IOfPH3XfyUH9x7kTvs1bnlrMpIV7M9dIbk8/ITTFBoFPC9C0U
|
||||
fOLdVIGiqISYhViEw8M4BttPc2PEXTPb56wcU8jXF5Xdj2sr5CtW5tAL45aZ1l5r1ytS2UALZtFl
|
||||
/8naSjwdJWgsd4dHWViEdIsmE+J+uaCLd73Rqf86mlAZWkOceyxnY5d+HOi044Io62OlTwK5yRC/
|
||||
WhcvV8PXn9zjRKG+Sjr76f3gHN85fM7am3nOQWq7LdTpn/Fg103C6aefwyMT2KwHqLe7SZPDsTDw
|
||||
8m0nLsUroQL340lErZLCp2hz1GCXxA1RDCdyuy30IlzW54p5zJBR4w5SCoF0ydm837OBlkayrV+b
|
||||
hpHi7mUUbaIbuu33Jt0cVqeW7ftnBMiOQ2YefN4OZ9RK6Js9Me3d7hnyNzk0m76Lv8wtbjt3/TSP
|
||||
JTRSqGGx2yltx6znGY7jpWDEKAo0nuRSgq829Xg1tBnqfnpLzDRgankrWp6exRLC2pcwum3PfBwb
|
||||
V0OhUq3w4nA02sZhnwbm/kGjRv74g75TYjDle8P2Ec4Qc0/2GV764OLt7rsK2c4ORfkl0IA5Ltm1
|
||||
6zfZ3+Sz4BywKF88NIz7dYXMVLvO+1nNRuniCSDa4JDL7W3zyX89jM2oRhFxN8IWdc+ifqCcKStC
|
||||
vveM05RvKTiWisk+MK5hf1m8BfTbj/YleerT4dJW6L0WVsR9e2LGX+05AV58TEKOm8Kd+tzEaOvm
|
||||
Ijv7kZt1j/wmwnMxhhj4/uYPYX2+wOPynZhGjTMa8bWq5HMmvglZD4z/9Af+jGeu//YodWfZIfpA
|
||||
L7nk+d3yrUaQm6bBcMYHd0DSkKO2vp/IzA++cNyoAizvekqHZJKzCklDiSpPs4gpP59ZubnWRxj3
|
||||
0YKQNhqyuf85IK2UMwn03ZL3xfoRgxC/1mRXRYk+bmFNgS6UM9H8CHi/EasHYNHrCOFt5tLL4mOA
|
||||
Oq2ALs0m1PubZmMQn3n72+/+9CwVEZhZnXCyPj79wfbKCk7N8USiZFD5cPsMMZymMsVTu1zy8hzt
|
||||
vd96M5VwVx+W730MpxOo7DJ6bdsno2lCFxSYShEDPsrrtQxUvFfEcw6Ptm4SqwPfi154MXl+xhev
|
||||
l4LOgn1gh4P4RaOqCTnUi/UGb+7L0c3iwxTD3elGds3vVz6qHyag52vlMWVzfWXd3M9l9j1OTK2S
|
||||
WzucgkACTQi/RMsGh4+77yJFSrKM5/FEaHzkiQHZ1ZaJwb0UTW3yOkNRHY/kWt5798/6irX0pOW8
|
||||
HnwL+gLk++dEzEop9NZyVjK6Vt4aC+cgzrjxdQY08w6F3vi4X94r3lZVv0ss+o/SZ+7JKOGLuicj
|
||||
t8VeH2lhJdvNoTqS8ChRTu1WogjQWafFzJeMmYiis+Ce2T7a2z4fa7+EUn59Zj5YttU3GDvohUEi
|
||||
6q6O+OBNyIH4WtVMlYQV6t1jJaJw/BrM3txDf4jpLQIviM8EyzfnD89Bdy47Zh/DLuzxNYcf79KN
|
||||
+lHdNZJ8DdzjxsEbqyz0aqw3NnTnYmRqGU7+9NPjmU/oamh1Poz1Kob5/8EC0176cLxLAiiPyibK
|
||||
Lf74VKBqgtbntUsTphr+lPqohG6/6Gc9lrN+fR4rlF1Ei3jpELZDIZ0mVJDqRFyz9tvxWfDjH57U
|
||||
sViFw+Q6GOVx+GSO/3y7QxUpJfTh2mXKJXbcUTw1E9BSW9HWqKNwbD+3Eha7vfGnf3KjQDKKD1pD
|
||||
Dm5X6ZPl5AoE395mdsLalsl3R4PbZHK8kldC21TQODDvL6K/yMvlb6bHgMXDSBRmnvQJ5AxDpOGE
|
||||
LuZ6Gh3yBFR5ukflZ1W7dGqzCcxGW1MxG1W3D+x3Kn/Oas4IFhI+xDSO5ePmarH9SY7Dmbcq8M7x
|
||||
Co9WLei8ywwZiTK26Cc9hf5UrNMLGuUOYfGe4mwE+VXJd40SPIzuKqSltMFo3A8dOQj9xGlYiwYE
|
||||
lSLi6pJ8EU8nyYD6UK7/1O8Y2O8EhKWB2eGxy3SeHKMUXvrk0mnx6rMuPuxiMJ9sT1FfBPqYXaQS
|
||||
eJ9mzEj5Jhy1/FCioFIRXT71g88H2ynRrB9Me5ZtOIpjVcKzfLnEuLyTkF+VvQwPd0GZM7gXNEFQ
|
||||
G+jHB972kfjjRkwCaMc2xZvhS9BglNdh80kvB2Lu9tBOOxsf0UHYCmRPVhWf9fqGdknUkPtye0XV
|
||||
5DoeSgHbZBfYe59Obl0hcfIapoyu4Qq/+tbAOGH0tD4he2RSBcjmmBxi5xGOhll7IOU0x8LMk9P1
|
||||
Ldto5gOMLtsmzNW0SuGQXzBRbhl2h199WFzmbH9d2m0/tbEH7LY1iHvXaFjZXWUj89N9CenLYzjQ
|
||||
UvHgKfIFleb1mFbHOoajYlTMQlIQ/qnPKEgHcuiyJuPZ9aEAuQk2s9Fd9Yefn3SP8g73KeG8yO+3
|
||||
BXrp8YsmyRS0Ne8KE4S1OWL+dqx2eO7aEtTTpsddOqCM3dTHBME9uOJ1caWIm3WVgua/H1S8v1Q0
|
||||
KIpSbU/PbUW8ZrkMO/F09SDrXw51LAeFlZrmAoQbmTDHchp3UpTE2L5vUkm822eN6BvzAZrBmP7w
|
||||
ItfzpINZf//0m8ZqPgIsv/cjXaIVtFN6rgK4PSOBXa+Lsz/peWgjL4jOTHUpafk18RT5lQcd21Wf
|
||||
WB+1fH8GfVXnRPsMms/7PBxQOEoTca/ajnf7nt/QZldLbHddfFze6dqwvWvsQjdD1/r81RUU3VGt
|
||||
0FV6VsIJr4Yb+N7k0tXUdj43606El9C/mSb6YtuN+2UF5jO44SLQD3y0WhajKq9MhvduMPPiyYG6
|
||||
THJyiBzG6+uy7NAYGMEffuoPl+cE7ecqMC8ZPohKwhGDeyLbP38/Ro4EqKaZSexj+MrG0dl5f75v
|
||||
7vatPy6XJ3v76wdzf+TDZ7IeMOsLXm+kE/rDs7NfYZqAPz4Xj+gC+a1vmAPBN+TfkN3kMbDedP05
|
||||
cp9aDbXRbpgeVDDcyWeRoyYgKstp7meHOV/Af/iZON/1Kqx/439svZZYfa6GK7xiJlJspaMcjx7i
|
||||
SHpfNrNfZGFxK9y5fjAMivcmeO/KPq1fyNsYa8+Z9+8nbMn6GkCoNCs8uYeNTu9aYELqLI+4EY/f
|
||||
rLtnbgPukwRUXDxtf5j3G7q3s19d8wVn7ugEMOc1M/8WLpcE+vjDY0ZEc3euPwy9mz7oUK4P/zXe
|
||||
3/d30WHBqx8f3FQnJ2pgfRCN+6aDxSLM6OIUxPp4XTYBmM9nyHZEOLfD4iXKKOvjHR53NfBGoPsU
|
||||
Fovd6sf3nIvDxv75U1rN9bmaeQfNvMBs75TP/NU/JA89PniNJNlnxX0yQA2wREXdkMLpwIMc9jQk
|
||||
eOYbNCZskuGe3Vd49KUyo/ZBHeBJ44o5xdrjdRMPGGXFq2d6G5d8LNaXSQ4/5xM7zHnX6JD4KM98
|
||||
hLnTJe6U8vHxJy/AL9q51awfcErPAlNjD7fTyztREEwnpWMTftyJmaWNDt1tmPOWhzv2GdyQ/E4o
|
||||
wcKpzKZnaYvwFKcN2zevozuo2leBmV+I9XLLlleoEv7kY1g8MX1wmONBl8oBs/GtcodOew2QHGOB
|
||||
2KlA2ukzpcm2PhRAisDeh0Ng7y8oFn1n5uXnzIfHDk4yWmO+mjCnuehHqCD1bTacNOyS42RAq1BM
|
||||
1+c1QcXcDyH4MptWhtW7/OUeg+0lrunsV3do9ZufFMiBNpYThnR07uavXggxmilj6NmZaHGNJ3y9
|
||||
L+pfXpSDheQj209fyx2execBfv/oyN6XjXA918talddPvNDV2p/iQ5Jv1619I7jdWvpIy1sAxX2R
|
||||
0q3QT6jnXrqAA9QZ0d/OA7HlgmpAWGTSz32buj0rAxOW62pB7NEu/dnPSr/+QnaBfkB0dM7DdlTj
|
||||
CKfwuoXjN7QkuFZJwch5s+CTvz1jiPvtghj4mrR0zqsgaoSY3Xb7NhxzUZ39/KljP75i2WXvyL3A
|
||||
t3QVH0S979Iihra+eczKrmu/vyzuAhJr+YmHqO/5j59AZqsPcT2uubQCGsMquXnMQDcIp0vMcnnO
|
||||
K/FiZ8aoDquThnYP50mXL7tCI90dAjkzspg+H4bHh+Y9pkDe7gtfluGaD298VH55xpy3PttJupwf
|
||||
21lviKIoPp9+fLNu33s6BpbKp8XLimDbHe50Nc8PNyxjAe3nwmjdpYJPn+XBQeT7omznb7A+sK8f
|
||||
gAahwQ7idOMs+H5ypGWjiUHTj+H0OV1FcI/SbvZTERqWS7eEfRn0zNY0e85nrByWyypkEXpUnEWN
|
||||
XsmBZXyJs9uZOpeE8iGb8qUg3jmKfPqZSkGWmRhh2X/2vHOO9xzyr+IzVdqMPj1cvgsUvB9ryrYy
|
||||
a/lBWFdo+b25TDucPXfcfWUMeHd+MVd5p5xBIC8gK949lWxvwzndVSlIhmUzd7nwXFHAege3D9/R
|
||||
IZ2SlgdWOyBsLBjBrquEs5+LZPuSErbbM9yi+CDHKFH2Jv69T1mZAXRBuWdeuSr46LCLieSvsCQ/
|
||||
P8FXrDmjYLhbs94c0HAKzjLsd/xEn7uy573lfrTtrH+YsU+nj636sGHOswjJzlU4sG/4AMXWOrqJ
|
||||
8ErvvRNoaL+gWzzMPL1qEkJlvPD27OCjrdu/OnxE7icp8ZQNoBfyfRNBH65cmnaG4w9BRSa0zb0V
|
||||
89RFlvH0vMhBSeBJ7hvpxPldyxYQof1p9oc7f8BX30DO+/Zku7m+Z32hkF33y7n/btvx54cGm6gU
|
||||
ae/WHyVBq375Kk6z1bqle3oekHFYMOK8+97tfvtj5klix42UdeLpIcNLj15MvyrPcPit90Hv10z7
|
||||
DH7Gm+VzQrM/ZVq2Wmf0c3qmKJCuNZYuydPldJekW1Q/2bx/Wref/aJsrA+E7R72pPOXxyREd2ZJ
|
||||
XC0veccP3RE2iyKnm/Khh/QUiBU6w16g5ewP1s8yoLB7i/bcL2/h7BdttKcP5+evMnZVVAk2u4bj
|
||||
2a+jYs5PkZR3OXOP27AdA/ueoP3NXJKfHrCxaSJQHo1N61351me9vqHCajeMrAeCRrTBw+97WHjk
|
||||
vU4FEkuwZLces1BfIT7nfSjZPAs8Ds0hEyKnfUjLu5rS4yUO/SmqxwFo0WnMfhRZuDrJaQn9ZR8R
|
||||
S8k2/q+foUayLgS/qdCyrxwfoXm/r/hT3kRO40Mnozk/YcrqOCAuTt4NBOwaxHa80e3v2StBW1/W
|
||||
mJ1vtLbEVzkBRdGc+XuXjLFyK6D4dXmwOU92p1wMI+h9sZ75oc9GJYkqNIHf0lm4M2q3W01ORutK
|
||||
TGWpuzyspAhdrfKGl+fNoRU7bZDBChYq1dfTOqQrci7/8JQDgeWP+T1ewJ1XMRbDb+MO5FaD7EhH
|
||||
k/jhznPHgxDKsAWRzzyr6iwV9DO8cRFQHu48nZ82ywjUFN8Imf3X1CbXIyx6rjE3O03udLhkDehX
|
||||
XWd7tx35aJgfDMtlExIjl7pwMkxkyrLk9HgjdmnGdtXpj/8kWFf3IQ/tBfx4m7l3DYczL6bbXl9R
|
||||
5o7NKus1Q49APUkVC/xnj8ab5qVQXW4PdpDWe9TL66UEwvrxnvOjN++/IZHQzkq3zGyUmDN8Gb3t
|
||||
XuwTvAJZQpz3XgpWsDUJplrr//J2ac6TiNuGtVvssW5u5/n+k1+uDcsDuJhKS7SwTNtJ0x0bzf1+
|
||||
9uejW7w8w4Elph7TvjLXO4lLE+q08wLnpiuj+o1rGTnvxmJ/8orP6SqAQ+7xnGcabntTrwaUz8+Z
|
||||
uH0hu2yo7AeAc11QqVqwkHWZIYFskYzhZ57xoV5Ukazl1oYuzQL70z1jwS8PYfvykfl96pfz+SJ4
|
||||
tHx+P20X9/pxu6tii27F3tGn5bvHv/MY5hm27k/LZauhP/5MvsT6EJTSGbYg8Nl/IZ/vqmcl42vu
|
||||
kr3bnlD1y39n/WNXqwnbxj5gEwqrEmi3fR3csYkHD3pfqJkqMsVfR43ewHx+hYO9o7b0+Die4eBS
|
||||
gsWD77eTeJIuYD4fNzznle3wLOoAAumW//RZp/eMPcBCZfrLN0Nq7h8C6vUEEasInLDXd8Hll8cQ
|
||||
/YhefAj3lxTIN6qJQ80hG99ka8BtCkuG/WDl99toVYJb4ytxTs8i7AViU7QjSfBbT737nccqm3M9
|
||||
84SNBjVNEuC9tyKG6cpzHhBXoBzbAW9i/Mq6y0JtYKsXT7ZfIDPkdaxM2+sgiOyyOn31cT6fRPN5
|
||||
Mh0qSMLx59/9PhyJeU3X4XzeKIDfP0dmcE/j1GH9AubzT0IcJrY9+/bnTeqASwJqHttJfugXae6P
|
||||
9HPaRrNef+Ufv5M5L+e0fnEPdI9O9N1GQ9tnF5DhvVzf8ST6t5anEzzQ8q0syVUcNm5/Cqpo+zt/
|
||||
FF8dDce5P6JkExZ0FNmY8V11BUiSKcVii+qQ72xfhHuo50RV31lYP3ejg3Z3sWWkjjYh7z6iBuFj
|
||||
u2MHeSqy6eXdNPjtH0y1KJu+6J5DpB0KYqdM0KebqmNAddDgiRqfbGqTxwJ20iDTJW91V/ydhymP
|
||||
S8C8qDXD1U+vLvEN4c1Jjv3+l/+oTVoy7/Yx25F9izOKuM3oKG82fFj76zPg3Sll+/k8eCrWZYP0
|
||||
VfVmXiOr4aQolQl//24F/Oe//vrrf/1uGJTVKy7miwF9PPb//u+rAv8OXsG/BUH8NxP/3ESgXZDE
|
||||
f//zX5cQ/q7bqqz7/91Xefzt/v7nL+nPbYO/+6oPiv/n8b/mb/3nv/4PAAAA//8DAPzVCbHhIAAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c0b986749b8f9cc-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 01 May 2023 22:52:31 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-organization:
|
||||
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||
openai-processing-ms:
|
||||
- '18'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '2999'
|
||||
x-ratelimit-reset-requests:
|
||||
- 20ms
|
||||
x-request-id:
|
||||
- 0d93269b5615ee055622aca99d9e4aa9
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"input": [[18031, 1495, 220, 17]], "model": "text-embedding-ada-002",
|
||||
"encoding_format": "base64"}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '99'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/embeddings
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SaWxO6vJbm7/eneOu9ZXYJCCTsO05yNlHxgFNTU4KIICgCCSRd/d2n8L+rp/vG
|
||||
KgElhJW1nue38h//+Ouvvz9ZXeTj3//66++mGsa//9dy7H4bb3//66///Y+//vrrr//4ff6PK4s2
|
||||
K+736l3+Lv+drN73Yv77X3+J/3Xk/1/0r7/+Nhu9on7/9KJZFRRREw+dQKPUjAE9rMyjbnrqTPHl
|
||||
c7CJYWMEP8GzpfZ+7hJi248X9HfvB+Gis+tn15R28BuMChKNDbN57gERpCyKsfPy5Z5sQNvCJPnM
|
||||
OCQjqom2IzH4qlwgq9C3MqafrCOUQiZR92jTbMgDoQIseDyRnutzP21j5QbTwAWEMfSN5vdOyAEN
|
||||
DxsamYUSDeo0OzpQCpsGkdMmgwYphPIKvvGmoW7PTvNVgI96Eqj/KDc1l4ptDLL31aGGPab2jIpm
|
||||
gBEBb0SaY9DzUNghePeeO+pDXiWjwOMKykhqcAD9iz1bp0qBSh1uaOhpVUQlYXfQ2Ls+oDqzKsDS
|
||||
+XuGVe56FMmt03+79e4INbBvMF5Hc8KfRrKDosJKitw97meJJSmIGnBHsFdLMF8vowI+YVuhefBE
|
||||
QMbV14BfTdTo3qiPdrcu7ilUhhBjS8BRPeaKeYQ+egc4Eo4cLO9ThoJCMmxcH0rNiNenYGhWLXZf
|
||||
LzmaLDzkUIsPGeHKS+LTvYgVbSOeZuo8U9TzIrcQvL2SkW4qQnuyzStZX73HBsdvf6inPJArUMM4
|
||||
wZZzczl73aYDNL4axDZJ9vVwnbUKxpK7I00yXGz2Xpk7OFWnkgYvV4xouvq62r15mwSWyteeTpsy
|
||||
1kceOnQLKEwGks0t6FbsSK1pq4P5As4hjBRI0PRhhi0aPB9gxvMSI13ec37s7TMM3rTEaGeRfqI2
|
||||
FQGukgv6PMqmn0/4GMOSrGvsRKdVRM+2d4Zb5N6wTV5GLWv+S4HeqVSplfWnZDrc7RtUBh9jL/y0
|
||||
YDa7jwLB5jhQy+l0m+SVn0NkIw9vNkBJxtaXQgj6S0+d3frN6bdLJ/BQxBe2XjHMiJI+fPhevSx8
|
||||
4KOSzLRTz9oqJBPRYTkBNq20GwT17Y4N8y71syx8cribrohugPmsJ/CCKTRc5YCdXAmy+T2blT5C
|
||||
H9HLt48SAo4qhJDcLbwJs3vPM9ASaEhqjjQvGAHFWhPDTX1LyWrIupq4PTxCr3ja2LWMB5j13kar
|
||||
qFHvNDZPIR89FgrwPbcpxunVrdePKX1BYwUrjOyn17OT5JXwKVkx/TOfsKUHcMAvY3meHe9Srz/A
|
||||
IKcGEUUtTuayxBB8uveAgHQMsjn+flMYhENPLUBUTsWzrYBgZm80NeUt4qN5ZHAsnDtRLkRLJns9
|
||||
yTqyooiQrXmpWXK3X9AXOowvZXevyTwZoZavWpNuGvxM2KPYuqq1cTG1/ZIC+sorCPdx0VG3dHZ8
|
||||
ErZXpKWW52Krvpc1+/T2AZi+MCIVXtuom078DNQxIki661lGFKWKwa3fVzgQlYCvm3giusQlFynd
|
||||
/lVPjxApamE1Mg7m9QH8O1+13kTUlyvaQ/Y1W9jGtwZ7WC2TOXCvN9hf1hT/8tU4qnULe1HwqPtw
|
||||
Wnt27+ILXov4jU3rZWTSoY9C6JSJifE6L7PXZf9wgbFVPHr/CIndR+imgMvBNGg0Jh5Yg2KE4HXd
|
||||
+uRSdnpNEnHtA7A5D6iLnDabbnexgp+uGdBb2HY2HzdlBctJUjGiGz+RQoJbEM1Qxc5XKZK5jqcK
|
||||
3tBhRGs5ftcsHYsXYPxF6Ob9qDkPWOTD7FlEdLMZqmgctt0Xkmtg00itH/2k5dyFSfD+UjNtLjaN
|
||||
taMMnlthR7IlHpZ4HeAVHtJfPgFMfiUyLDhRsJ+iFR/k3ekLdent0DCLdoDpvn6AEZKe1Pv2fTI7
|
||||
3bz7U6+ConXs+bLqNNhotx3erg0DsMMcvOBmnVnUUTUZzNpXE+HQ6C1qHVb2/LtJfWik2pu6mmdE
|
||||
cl75BTjqoo0D8Nn3zMT5AdrXcEWkefpEsyVdJkAGdETqWVQAuar9B9AMF0SVL0r06cCWwMvO6hEP
|
||||
d6M9XLezBiVdi0nnJoeIfLzwC+CnCujmIbxrdumeBhw8IFETNU/AHbUn8OpETyQIngGYw8cdGNYr
|
||||
QNaZ9gTz2jq7YHlfaBg+sKfzzEQ4ct/BtxmVCd8JCoRrsprxNoJN9tkN+gccn+xD5LjiEbOy7gjk
|
||||
lfBGz0np7PE7XzXYbk8Cti26+81XC0qDc8LyPOBTNH0+0HY9gpSdwGqe1KEBb7djgANftAHreNyC
|
||||
axSc6c1K1Z4MyceFxanzMCLiNZnbra+ARzoVtBA3PJrfpxApQTn2OLhs3pyvWdYCvv88kH5HRt9b
|
||||
QWFoh75WkVw6E6e6pn/AirIzta+HTT/L59wF7+wuYVdxOZ/bzo/BMn8Emk7GSe8lFjQTfYvR9tDW
|
||||
JD52JWR5yZAcV4k9+8LO0H7nL2IeJqNmkUI93NoS4/aa1bw+Pr8wdOUttsuK8e6y6hTYsNMDiept
|
||||
3893PzZ+6w1Nx1WSjMQcBCjsvyVZxZ5sT8c9ImD6pnsc7m/MHp7WCUKolQU+jMThfN4yBE3V4kSb
|
||||
kZGx9XDy4SocJopWLbbp6zNZulYcJoov6jOiiWcRuAcPTua397VZ9MrYL5/iramt+4+bBwpc6jPd
|
||||
wnIH+if0crDOLZs6wbnvJzgHORyIccHRaf/i1D7VDtTs3Z5mD6uuyToud1BoiUumerdO6Cnbu3Af
|
||||
FkciCn0Usaq45yBVIxM7WtTwcVKBAN7eIyKqL9p81D3r9e/85r+1fiRPxYL3W/vE0eXpZSwdzy3U
|
||||
2D4jK2f94eN7JxfwfatyjEPfSsaDF1baC8UfpIBZ4OQX77OfWxQdqwRMg9fHsKuJisSDnCVzil8t
|
||||
+LbiDge71zmZNkf/pSsb54hPHJ8TNtXcAegU59h5XTc9r/xjCx7uFFDPvIwJIf3Lgpp92FNvd90A
|
||||
dgw2L/i4TRwv820z2760cM+rhkhX0kStoFoCZGaVkPm+yZM/40Elgti9H30+i8XWgBO8tRS7cp4N
|
||||
L4scoXPGGjqy0MnWQJgN8BhlB3uRP0Sd5moxFJoupJbdsYRzU6qgO1eY+t3c8WE3mzf4CeoWibnS
|
||||
JbPbSAo8YTH93Q/061tpQb/pN0R/bQvOxXOkwfuhnmjU2xt72ByNFzzyScf2fg4yHn1vMUiMpkT6
|
||||
umyjacOlHCz5gl7mwcomeZGDz+9qQ43U+GZ0uOcuLIlUU+OCT4Ah/yVDIsUPpNanRz/9xvfTw4gf
|
||||
i77Q/EED6I1mHI3Jm0+fdepD4doQ6pdGFPF6d9fgCDYH9HbSVzacgxODdoJcbCz1fp5ULvzRL54+
|
||||
1pwk4ioE76aCZNBvh4Rf1p8JSnztkjWHL8AHyfvCm7G3cSyHm2jYp+sP0OOKU5s7DZ/V24TgsDYq
|
||||
aoFH3c8gfE/aBh1ntFYgrbmxvaYa5GVH7at5rplvhEyrOQoIH8Q6+QZTmoO3X3nk5bkfQA5bXwB3
|
||||
7fPG+NYF9rI+HGAPaIee4ZGBKZmvE1wV9EZxqkbgK6Rr5RdfBMCTkcz8HBF4wK1BzcbV+VilDwtI
|
||||
NjfIR0+8erqfoxYu9YbauTX2LBHXIfSK2ibd61Lbk/OgLlj0JoEbaRfxNHsWcFOnKcaF8simcDhq
|
||||
sJMo+tXDbPr2SQ6xnshEpB2rOX0qMVjWN9J7JPX8azqpWpe+QnQj1DJ2/5oTOClaR4Tm2NXTqfuI
|
||||
kM5HE9+j/px0llqX8PvmHVovemKMivsRqGFsICDZWrbo+wPs8ICwbe6s/k89q0azxDjxYzArqPah
|
||||
ebICJHdOlix6/agdVl8XzUNqg3noPsLPzyCQP1nE6Hp24ODDmaIjtzh9Gf0H5ptcwlFsRBlLxJWv
|
||||
RYa0xoZe3aOpq2++WjzIdtGjj4hP5hhCq98gwj2aZbPC+gk24TnE9p19OZOO+xjSZLejbvDMow8f
|
||||
nBf4hpOII5fue/Z734X9YeitQNyPCXcKGJzGCIGTcQLzlTcIRoN6oObe0Tj3G8GFB1Q7aLeMZ8au
|
||||
FkIfNQH6rpCUcXv7nPTseOLY9a0taKY9fYGKuDkC8FRmY+w+J7hLbldq4EedTUo85TokDwsjd097
|
||||
eumzAST+Z4+UG52TKauMHWzMo0H35OMm4xbVDvgqcrOVjefcz2rRMBjwCdJrI++T6ahmLrDlQCSa
|
||||
jk0wJV2aw0fNBBoes8CeuZC68HU7ONh1mFHzTUBS0OTHB/aNGHIaFhsf8KpRaDijMhvSzfTS6bTf
|
||||
UP9tmpyvbx8DEAk9yHyXp55tam7Au1fvEEcTS6b9TjjD+GI9qNN2fsSuz8nRN+g8I7463uvpeLoV
|
||||
QHaVC93o5a1na/9YQsaeDJsWl/s3Ci8VvHhWSSZ7VOx+/9UEeFppBY3Dk5S0Htc/aviUR2r2b5pN
|
||||
SNGOoLt0dxyX78yenaCNgQX98JffkunHCzznoVNUJKyeza7UtMcw+9heNUPPr16fw2ZOXaIv8U8u
|
||||
XWdAL64INcSK9UzfyS0c7/bzTz0m5uX+0mZ0A6i/7WlCHtZ2AtJpPiGBbvys3xWjBtVDhLG9md58
|
||||
mnMTatXKlMinMaSeFcx2QP/UZEJ0eQ84U6EP/MNbpOhQl9HUZ08Ed95Rx57thf0QllgGxzPzicJm
|
||||
MRt2W4PBi+BkSFlXfs1wPLVQGD4rHK81Kxni9KzBxf8TeZRwvehXBnPxsaLh5jr19GzjM7zE+pWa
|
||||
s7fKyDKfEPf7J5Kf9SMa4RnKv+uJKNxgNF9WTw0aF6lH7AoUm0SvbILqhA1sb41TxPo++8DVeYqp
|
||||
4zTrZEx4XMB9dB2p+xRJ1JK6IvClbxmOj+tPTe7fgMH+oZTUlFKSMHfyHXhbnwUCLDwks7ntWnBp
|
||||
3D01u0sMRty+LfA5vS3EdmCsyR2FueavSLT4ZbNfb4tTqSz+n/BlPJNuxiFw4sMZL7/nnLvmDm6q
|
||||
S07DMv/W38rtFK12Njn1l/mbtS8ToXSW9xQNUsM5vk1H+BhFB7sBiyLexMoAbXdDKLKf735Ui3GC
|
||||
J29+UzvcbaPueewKIPrkRn56Vlr0DpiKs0TjXjWANM3sA4a1DhADj7qecy61MLB6beEjx2jC6XsH
|
||||
axBccLw9xxlXilOrlSfpir7nW15Pd82JoVc+r+Tb623PfCBDmNnpDf/qN4uMxNcXHoff+e2dTZf9
|
||||
xYGkjkO8q59twsykNAA9bjscdlBLPsDXHRCHuo23cR6ChfeIsI0uHZHXbmATBw8f2GT7msz7Uwc4
|
||||
GQvyq7803m70jJUED+DImY5aa48ANU4HAsWGYmztwNh3jaIVULwRirp0c6jZcZMaEM7Vkdr6KYwm
|
||||
KS4d6MS7M3Zq1YikHRMduMU2R9LCP+bgezD0JV9j98SHmptqcoZj4d5pqKEvGO8v+wxRHu9x5Dxo
|
||||
NBiX+w0+bTUk6vMcR/PlcZbhwg+QdB6JPUssS+Fl7bbYcZpLMtrmoMDZuO7JkHl9xHkj+OCcqwm1
|
||||
nrmYzHcjJNA/NCK2PzaNhnX+HWBlWAO1PvrU89zjMvSe7Yl6+LNP6A0PTIuhs6FBcAxsktyjFsYb
|
||||
I/yTTxZe5MPBFXIkL36f5aZ+g92m93GoFGM91NV808V56qnn+mYvi74TgqXeoVlKUdLvLNJqi58l
|
||||
8iS3PXN7eAa17dtE8YqqnuSpJHByErrEv2RPz/ByAAtPoY6srsCv/ursVOlIyrZJwg8r8wzXiX6g
|
||||
W3El9mNIvNcfPxmXqQOY4R0E+IT6jUYsYcn4Os4yTD7DhXo02tUT9fgLjJ2j08BNmE3zGoSgsN4y
|
||||
uRwCO2KfQkDAzfPPH741yNOHQBhcc2qbu6pnH8/6QBREBnX7jVfzj68fwWPPOmx4wttmb00qQdpu
|
||||
JqTuToY9ty+Yq7/nQeLZsmdvXMd/9FXMAxJ9tvGUwkW/YXup/7wZmtcfvW+HuzEaoaYX8J63Mo21
|
||||
b5gN22E8gISSGGlVVtgT2PoyPF05pBs/DyN5WH8GbeGhhL3iPOPkVPvQc+46DsNrFhHkv0Tt6UCX
|
||||
Rmi3BRreteGPP5D++cx7OpmjD043UfnxkHrRWwyoWXDF/mxe7FkVJhGqBH2p2zZWxq731xmA6+VM
|
||||
+rES7GHrD1+I+GFLury58ElVWAqtzlWxu3aDaDbHIv/5LTK/1l7Np82kQaXfJKgZHqgWHyzbgS8V
|
||||
bzhuUpz9qa+Xjf7ClkrbhCpVdwQLf0K7lxnxacRDBaydQynKDqCmKHyU8NQqR3zZ+TIf7lX6BaA6
|
||||
PWngizX/6QVt8bf0x48Yf6wJpORAcbjkJ35XzBKe7xAT5nR3ezqZxxwyUnt0+xGSaPIHp/zxj5+f
|
||||
W/SF0sIlnvCSz+uprQMGVRWH9Ocvh9/vFz9L/RvdJ5zMVaE9PFpR3xpRMqVNU/z0wR9/wGpUI31o
|
||||
jC3eBhnn/WjmDCRN59KIvcxkXYC1CxZeiLGz/gDS4Qj+0Z8/f0p6L7N+/gE7tDvUa98N0B9+Dhbe
|
||||
xkDOd1A89ALiJX9ytlO0En7JXOBtlJd2N+4vMTCJlZB5FYwRi+NPDMxcv+CtYkQ1P5xFCHVQ69vx
|
||||
NL4S+tNTthyJ1P2Kaf/jyfCn3xY9m3D9rBtwupwMokisz+YqfRi/90t/8SR90jWDvjyYSDeyLvlT
|
||||
r4r4A6n7kppIjv28gIdr/cLewj/ENXy02lKfsJvWYj9oOXCgncTuwuPzhFmgQMD7tg8iLXxv8dcu
|
||||
LFvlhDPhyDkFlUrAQWoMujxvRBf++PMf1IIPlEzXraqp37u8oebGvmbz5x6VkGltTa2Fh/PTaj+o
|
||||
96Yxqa0dE5srQvmFQ73aYzRLu2ziU3oGD0V+IVYMcbYm87eAe3Dn2L4emp49rC0DxZnYePN+2Hye
|
||||
9rSFQ2Nt0Wu3fgO2jssDlGsQ/3hD9vuuW5J/onvu9/3LVQIEriJ+E1hdlGS6zloJM604YVc6R3zp
|
||||
5xxBEjRf6qfoASac0gP48U5r25cJ78T14cdDFz+2ylj/xppWfpQvDqbUqb/xcHfAwm+wC9trNDIW
|
||||
Gb/x0+hUAnvhJ62+PlmI+nS6J8y13wJw3w9/WU+fbOFv3x+PoeEDbuzRDE4iXHg7kh7CuydAYS9I
|
||||
k8NuWQ9zQs/1IdW51AVIGKQNWCdeSKAl+A8kXt60p99ma4Fh0DOMn+o5m41NedDDmLs0ODRVL10z
|
||||
9QPryNtj91TpPSu/t1Bb+AdS3uYTfBR/IKC57xX6p58npS6DH/s1Yke45dHkH1MBat9zTO3t/mVT
|
||||
cVzl/+YxLHSSWXT3OVQ27hHjR0CyGQnKBD3ptSKc8DNguRv4v/pD74+itnkpuiGg2jkik4s6zrw8
|
||||
dOESr0Ruj0kyrqsuhPtTLv7pB5Kmml+6nQVHUqZGmLBCc25QTeKIFqXoJLMUYgu8ttHwh5+w0+n2
|
||||
hegYlNRRe6MWB1C46o9HO216jYYHun1hqgYmTRGhoGt9PYSIBivy61dM5uM8wG2/wRQVL7MWnZrJ
|
||||
8Ncf88D9aX8vXWeBxQ9iV7lUYHqwZKeFD/lC2uFsZrKQrjSwnXyZWqfhyakilB/dnUuMLVM9RcVT
|
||||
y5m29IOwZYVO1O3rQIQXZJ0W3uJxVrDIhSImGNFHQBJ+7QcHaqdD+6ffwQ+r4PyLH2z4wszJUyjL
|
||||
f/OM9vvtyVLf/qyXepl/vvR7VPw9+0t96expBrIIowEckHCsOKD4PLSwgKNFJlq9wbQ26pu+5Ed6
|
||||
iEJSTyLEMSxnRSL6yqyW/m54hMYA9ji0FAHMVXox4HMwTPrjYwyktIK2F36xueRHkp5qC/ye/7D7
|
||||
Cv34y29SwR+E9iHlJPBjFz6E+Y6Reh5qIh33CAgzacn+1So90beBBR8uC/CmFA7ZzJB1g0aqvPHx
|
||||
4Udg0UM7ffk/BLfnIWPyK5PVxU9RLAxjPYcsOf74GrYlcQRsp7Dql//+9GP+9GcOq49LvaxjYBbW
|
||||
uwN8btQH3ZzzlT2N+FXBCGt3vOgBzk9gA2FTXkIceh23J3XXWnCqLiWS3PidMHP9+cIfD9BPo5OJ
|
||||
q9OtBE14DMlkEqeX8aQiKNy+XyS8k3vU78O3BnfGxadGVjqAaP6ggIU/oq44SfaMzsMNPgfLpIaL
|
||||
AsD9QGDg11/96ddfvw/+/dsV8J//+Ouv//PbYdB+7kWzbAwYi3n8539tFfjn7X77pyjK/6Tyn50I
|
||||
ZLiVxd//+vcmhL+7/tN24/8dP6/iPfz9r7+UP7sN/h4/4635b4f/sdzrP//x/wAAAP//AwC2t0TP
|
||||
4SAAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c0b9867da38f9cc-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 01 May 2023 22:52:31 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-organization:
|
||||
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||
openai-processing-ms:
|
||||
- '20'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '2999'
|
||||
x-ratelimit-reset-requests:
|
||||
- 20ms
|
||||
x-request-id:
|
||||
- 25535161a3c6c4c4d2d95830fc85649a
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"input": [[18031, 1495, 220, 16]], "model": "text-embedding-ada-002",
|
||||
"encoding_format": "base64"}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '99'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/embeddings
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1R6SxOyOrfm/PsVu/aU/kpFJIs9AwG5miB4o6urSwQREJFLAuTU+e9d+p463T1x
|
||||
ALGiycpzW/mPf/31199NUmb34e9//vr7VfTD3//j+yy9Dbe///nrf/7rr7/++us/fp//38isTrI0
|
||||
Ld75b/jvZfFOs+nvf/5a/veT/zvon7/+NuPiQ5nS1Anz5acM4uJ1ZNpa6bouyZMjLJdQMZs0H071
|
||||
gcrA3keVHUtt4GxxmpbKpd8Q5urcTppOaEXZRcct8U/hJ+LRfchQhzZv4iqmnoz4sN3CaV17ePPq
|
||||
P15npqcc9Pf9yLCTWmi8r4mBLoLgMxWNqKR6tKTwcM83rLRb8OZ4jLcgX4URN/ux8KgSvxtUsFJl
|
||||
ji1XiBfJs1dO/JwwcyO5+oS8tQGQGluyS6ibDOB1N3AIDfAQNarXy5f4iE5iTpgpd5bOk8GtoPH0
|
||||
HsOptJOxux9cqNitYI6nd2WR7zcusGJ7JGpq5px/KtyjfIMRUzeG7bHJkVto/O1AVxpqk1m0BRFN
|
||||
SbH/vdeHi6MBXGZXI/jlC5wv2PWIOOlvTDvOQTJpbiSjOiUWBn67JNy/Xn3wyyUi2HaSjkVXVwX5
|
||||
yBFTYytLhsxY9LBYXp/EROPEh9rYuqjFqkWiNiiSWeR2C/JN7Yl/Pr8iflyNAbCk7Yg1r4/JpO0M
|
||||
G8xTsGGOfa35tHx1Mypq4cX2/uB59LA73SC1IoMR65B3w1IPd0qRVwqx/fuKT055zWVf2nV487pf
|
||||
+fRQ7Roqeq6JqepFN54Wnx088MckesuJ3t+PqyvE75tLZ3o8JPMhMlr4iF2GZWU36/w45bmSc/HI
|
||||
dLy0+GQ3VoMEB9XM552YTNkhCiFU45KiTaXpq9qOJbg894jo7eGgz6zoCpgyqhHttEbelK2XPRQv
|
||||
pWLO+/XoRr69jhBErUy2w1R11K1iEbQNbYlNLi3nPpF6KNr9kaloj/mYvhoK8Ts1iZvG124+NMxA
|
||||
S6FGeNwYjc6mdyOB3jIdo4Sq5ci3wQzWLgqZKgPhc56QEbFzLBNVjktv1q39HQQtCEhc58vvfCOF
|
||||
o9V/8GrLRG/EdZWBtspkorfhAk3Ds/VBavSQOc6ClJOmNT3o7+xIdPKu0LiT9mcYqLRlD+uglnxx
|
||||
WI0gLsUdMbfUSqbMWFMQUfDBrWaXXcv2xxEuihQwa65PUecf7zbo/RIRuz28k2lJ9XAhn0WJqcl0
|
||||
jJpTezuDoF08st9Pli6K2b4CRweN2K3aJDRcbRuI5lRiu+tcd62elwKasl5j3/pG7RvWNdSZb7Mt
|
||||
fY1Rfw9e7YabqoXFSyZ7rHhMdzADATOrZ/eo74bABr2bF7jE4Qf1lXWtwdkqKv2eRj7nPZ5huoQp
|
||||
3by8mI+ysLuDYFaIHA2ZdsxE3oyGz+fAtFVYc25eZwOma3JkhlUZ3YgRwihXypoZxUg5m86rCriG
|
||||
MDG37Oy1b+loo/ipFsQx4lXZiuvgKhedfyJeLIlRf0rFUZaKpiPGM4m42BQTVRabycYoyYJo5NOB
|
||||
/v4PcR8z6bgvhi749InpSp/LaKoX8xHBZT4Tc/Pu9LES4wJC9dwQuzvPel9ZCwEtlrcn80f/xbn0
|
||||
DnuwrBMmuNq9kikw1ZtS9ZgSo3rb+ojjZwHdOtdYGNmWN873cwPB4aUwL/G3nZhnkg2N2ZlULoYK
|
||||
fUBUJZCjzsALYSGgQWu2LgzDAqhQzxKnsRbsAMKsJbozyeWc7ycXILUV4i7GRufF9ixDcmcYr4Ps
|
||||
HU1ZPM8/fGV7Zn6i+Q2LCqzj7sFIwaSOfZS4gGY3AXMVKU9md3J24EK5ZNvDAXs0h3knT+mg4sXz
|
||||
mnv9/Vk1Pzwi1iy+SiqkwRZEFH6IK9wT1M+fvQyOc26Yux2R1/zqoz6PFcPv6ovfnlEp/ttsmRXd
|
||||
PL3vXv4ZhdbKomsehwmT4HMHTRFuzDEQ72YRoSUSsHOmrMjijn+UAW+++8H2YWR6IjvlNTQ7puFl
|
||||
zVSPhviDoajhxbCzeKFRXtUAoowHLO1x5VF309TQaOcb28X6q+PJZ1dAEloiXqFHyHlSdTtUPJsN
|
||||
nkRJRZ+l9RTBneaantTjs5ycMijAIujD9M+iTJh0kEK4CODjObotEY2VRJCDu3tjhhWZnRhebCo7
|
||||
6rzHyzL0EV+wdY9++OBPgcbnHPtLcDe+S2JPtEs+mmK1CaP8Tqx+LyQ0dz5XFHf9iuz7riypgoYW
|
||||
tNHBxLs5p6TvPmxEl0+bM3OzufJZSbsePbaSSHR3JZaTrYU5DP3KJMbr9vL4wjmHSBNzkcXVxtep
|
||||
aIsimPfpitf0dI64eSyO8JkuM3OzRdjNwlHK5STOHsRVmlGflVrH8P0+Mcpli9o8vNfyZ/Uu6Glj
|
||||
7vUex88c9I4azL8kozeHF7VHx70VEPyObH391SOQC++CrjYXmXfSPi9Q8hAt4s9rt6wF2wkgCWBB
|
||||
jAKPnPvHowvxKwjJtbHVkhejlAHs9xIhz2uuz65wodDo95Bg761EQ/2UrsCSpvutVzc4t0kF8+hu
|
||||
KRLte0lF1Li/98xUX3k5yo9RBH+tMlz0fhxN2UnsYbrdDySklfadb8zgNOHyWz+G/hnmjw9SZ3vM
|
||||
/uy9iPu6k/3wll0/+y5hIz7vIK4OB7oOG+AzO1sCTJHVEFzPV9RF4qWBKr8/sBifomjOHcVA2ux7
|
||||
zLxXbzSzQ5WDX6xlvDAdu0uHNMxA3NgTO4ufUzmzMwGkNy+PuYqc8p7vmkJ2hWpippycEfc/rQSO
|
||||
fXl/+dLlfKEKM2LpOqPyFqceP8qqCrgPJOKPiwKNcpSGsHKDgKS+N3j88yhDkLPgTgda3fQ5ViIB
|
||||
vvqRuIVX82bHXiKS6FXGY2xl0TQcZUCwJxJVosvTew16Hij44y2wEm5rfVhelwVoCtyY8+odfT7k
|
||||
VqX85ntYIYt6y5Yo4qaj0GpHnG64dEmLHt7jwH71Ml26qIVsvX8y7Xsee2s9NXApA4lop1OqcylD
|
||||
GIYqaJlh6auE6hdpRkt46My7lkn0Pd85uOtdSHaryuXf+rlDa2Qdc9y573pLrSSAm+BSCWuat/6U
|
||||
fAuhvdHxeNcq3ortx4bUfXLm3WCORouFLqTODegkmjqfbHzK4LHPMUZXLY34cbiOkDyWFvEvt2fU
|
||||
p8dnj4qWHOnzIhgRT+5e/dOfzBNbuexlPC3Rg7S7P/px/vIVGtomJLYvRGh2XX6EBVogpjdtk/Ai
|
||||
cjGqgvLG7G6ToSlQxxbqu+cxvbHdZLIVl8KXf+hnV9yT8b6oa3C0jUGMZx+iL56KKCRhR6xg30R8
|
||||
Go8GxHXsMFdXu4QVl1gAwZ4ZXj77GbWh0rpwNKuJbJmdelOw5zUAOY3Ev7gHfdaHCIOZtE/K2zmP
|
||||
ZjHdSCh+XfZUXjw+Ho21coYpkFdU1lqtY8U2k+WhOr2Y4wg5n4Y4u8n652Ox3fmWebzFQQWPrSxi
|
||||
MTaW0TTQl/tbH5qjMSmnpR4aKN/4CE9rF5fjPRgaOUkpwYuyWHX9V8+idN/0xLDEORq0amfAdPaf
|
||||
uIvX725ava4GXMpQYubm7ekzy9gd4vqI2fc8cB4NfQb6p/bp0vSGkiqL1Q0sO3EpHKebzhedXYPg
|
||||
CSXTRrLpxmpOM5SNqw2d2XUfTTVqayQf7gfmCkrnzfn52sIHKS5RkZ973/WRwXqIlHm32zGZw1AC
|
||||
tMK5yqzzKY/4Fw9BTrwSr0efdLxorrAR5x4T936CbsS24KPlYpiJTTYNnxkvCtTsjQ9JVscAfSCX
|
||||
baS3rU1s0jh8GFqnRzGVPsyTJsNbT4p+Br6VCEZJ+uxoeLH7P/rKseprMv3wrd3VJd7U2NLHzrvZ
|
||||
qGAv9PVbTVL9+Lwxcky0dbLv5t/5CPcuZ+5itD2qy5kN3So1CBGE0euKle0jTR5rYoVm4M1CrAZQ
|
||||
ZxeBCk76RqMFzg2ksn8zxzJuyTQsrnfw11tGrOnZltyY1wDhdWkz/3LTIl5MlyX68gtumhuN6rzf
|
||||
zUhvioC+pNUdfWBY7aAo5wlPK81Cs6h6LdTH4Y37+Yyi/ucXpvh2wtxWKPrjh8w7v9I5SLcJX3Rq
|
||||
pfz8sB95C8SK3LTBfzKdblmLvO6zM0awrmfCrKhrvTnWAkMxb9WLOK9hjYbM5uMPr4n79UujZY89
|
||||
nKYoY9/zo3dmqi3htHyEXz6Cbqqf0g26Tb9kN2UX6lPNPQNJHzViu1tMkqkufEOuj23P9HaZ8unB
|
||||
4xDkxCm//m1bjph5gLhznIntK6beV9cyRNVV27D9F48nW9n2imWSMxWTtIumi2nK6Ov3qKwhtRsr
|
||||
Lz9Dks0eRdeiL/lU9iIMdZoyN0ViR396MTV3B/xcJ/uS366kQLg1TKZx4ZbMYvt0Qb7bFdnj18w/
|
||||
l+VuifTX8Y7X3DK6b/3RP/ip3pdPNNS16oIZEWCm1t08LpmOij6Kvvvhi859/WTDgsc5IxB10aRt
|
||||
NFvh6jpkHhrenEvn9xXgivZYweIBzYecVJBLhsFUGT31mdUoAItsPsw/Ld/JzBjN5K9/pfzDuN7f
|
||||
bWwjcVHEVIzrORpq/9lA8WRLvFhMe33EgGXQm7f31VerpNV3mxmmSB2Iq7y1RHybDxfVp76hMJ/8
|
||||
jhfbTNqw0ghYXI8vb7qcNhiEvfogptbJ+lCncNw0ZuV++StH7Q9PvviDV9u9pLPPUt6Cu/b2+LWl
|
||||
74j+6hUX7E5X8s3Wx1k2JJQkXciscCdwKp5uN8iVZ822Ufvy5jgU7jA8DZGosl15Q5ZlGPT6dqUK
|
||||
1fbJKts/MEot32HYMxdRYxWtBJemrohhXJ6o5/mth6PFK6p88kyfLmqbweUzJGy3vofdeF9bOyQY
|
||||
mYFlRVZ4g+NPAUNurhiuA5lPMG1scMjkUSYoWiTqVbNF4vg5sH1YfX9P3apScFOLn5/Ue9ktDBjy
|
||||
rUynMZc6vnDcClhoERxszic0DcdZAMHsRIwkUutD0D2XcHkXDdutIo83cp/vkHlJe6YTs+bcX5hn
|
||||
eSG1B+aJ1sC56We5nM+rCEs4zL3RAu0KwX1/+sNvLTtbAHVYLJmaXnA3u82zBr3EFV3W2dObanVn
|
||||
IMfCI7M/49Wb2QlCJD0rRrYDqcvZXV1FENfbDSOCEHh8VC4qsDo0CDG7uuMmtZeoTl2fOdWO6WPq
|
||||
uz7EpXv74mPjzQzSHvyXwIlRPAjinz7MlWFQgDQf5iTc/GwM9OUbslu94nJaXvMlXMoLx3BRsE71
|
||||
j35HjRafiJpeaMeKywGAb2VCBe+Yo/qw3TfQLRObDl89N7vKeFa6zcSYtipNtMQGvcvizB3K1Mst
|
||||
6Xe3yw6q++FDVNmZOTNF2P74A8fe61NOy7tU/fI45hbeDo3dVbv+yau0i2AkortbBOsvfuKFIX3K
|
||||
H38r4lidibo5WfqErvUZjo74pCv0mBErdqEAHSjlHz081CrewXLBZppGu8LrO+/mAt/ZAtE/fh19
|
||||
/awE7fbMCbFKnFB9NfdKMZz3+Hnxz96k7S5LOI32m2lHvtAnyGcX8rUjkN95o2ES3OHhBBlLadkl
|
||||
s1hoLcix1jFTPxy9wTl/QlnqLwpd05MY9fj0yoBVrs+sAK91JmFLQl8/h8XlNPAJdE+GKoufRG+f
|
||||
WjIEN6GAfCX4THcIdHxUHqqsTTbB0rd+ukU9ir88gQqO3yDu66krL3iS0wAxn8/NW8sgicsbvkTK
|
||||
mvP2PBqyHFxPX30Rd3zC26uymPyBmAhHnEvqJkSP/duh4s3Syok46xyS2+v81Sv7jpuv1Rb0blzQ
|
||||
7nZbRuyjKVuEh5gx/BqwPtWUnwF/LIP5p+qiDwM93JHU7XdY8IygG090NUMSSAaxmyT1xnTuvvnk
|
||||
tmc7cWcj/vMvjEkJOyzHhlMlWN7l1szfxPb7nT7+8peYwYuYGU0jtuCNIbc2TfDGfQ5lvzusK0ju
|
||||
TcT8izvp7CjbW/TNa+gru7NuGixJRa0veMyKHN+bLoaMQc7CO9vFYlEO2anbgrMj7JsvbX75ZgGO
|
||||
urb/jBf1ive//ITOdJV3PLmgJZKeNSPGNx/48cWmqihh/sRxt4F7e0PDsNHwdNrmHdXzEqDOsM3M
|
||||
dPfi07CoXCQY+YJsWZvocy61ZyRsE4uZmbDvZn0qBDiBMNPkehjKnguqoCzE1xHTmvQ6Ty6NDR1C
|
||||
b2Ju0yaZwya5gv7c9RR996e39ncXnTazgKXufdDFePOQZZGPLtstn0rHzIAGv/XBEj4q5Wug8R2G
|
||||
5nWiDCo34v6RiWgKpBXTjqT85gdCBc6+upLIdg98VuJIAKnRQrbvHbOctJBfUdG6MXO+53tMXzmF
|
||||
bvVcEH9aKsmIMwOgqtGWKrTsojHVix5S5wq4XeRrNNTrcInYrWZEO72GhE0SqiHIgpAY+U7iQx1f
|
||||
ZXD8/M5cPYy98bffjmGumTatonL+5rmoPrI32++DNR9q/9Oi4LQaMJy2sTcNiyBTjsZr/OVv3rCc
|
||||
614W1ANhxlOZ+DRYowoPy62/+PLWabzxfWD3qaCyZusd+1S7HjV7p6J5esGlqK9kCvl6azOiXM/e
|
||||
mCLPRt/1JbrXRH/8FwSnecbfPAK9Lk7aoHZXlWy30pNuWraWjz4LVyDemooljc9y/sMH+pKijM86
|
||||
iUP0WZANwy8v8L79hCXIZe3gEbGeM+mBpT9+qoWt+NW/doDM+FXhMW325RK34Etc22B6NNMkGqtZ
|
||||
G8FKcp1Zh6JMVjUqakjVzZ14sbmJRvlyOKJU14/E9oclGl5ICKDRkhMum26tD/YZBFS9QWD2fhrR
|
||||
aDn+7ZcvEuwZY0e/+TZydEFjpj7rSf37P/izdZh1KEN9WOrDiHA1xkxvkJWMeN3doXjJH+bPxlBO
|
||||
mtJXKE5RQyd+h4jqpBdkM10fib1/6N6PL5D8OF8xzMa+W5tUFSG4SC3NbJA6tnC29Z985scPc+xm
|
||||
AnzzXCwvcOuNaRCDbEbGjtyE3vcmzU1kYHHB2TZ4LPjnh1/C3rzRBWz9aKyCxx1+ea13PezLOW5M
|
||||
H5IH2jIi3GdvGmydwkli2z/1w4tKw8CYnBAC2z6Z6rar5aHEFIvrvuBULLQGzPN5Rb55hcd/+/fr
|
||||
T7h3HSMq6FOh1OGLMVc9r6I+FfkVllxqWOath26Ol5ABy4UrM+9bB/W4IxLgonli3oYPzvwjWyLz
|
||||
iJWff+d9mmq+chr3BV5889hpeYUCkmy1I/Zn30UDyNVRyvmGEFXiH692ib5TOOEn5t3iVyfqpAf4
|
||||
5d3qvSzQF5+O6Mv3xJS0yXtdiqUL4WX2f3lqOSyvjYi++gA3jYK8lqmxiE5SazHtOD3LIXi/RoBj
|
||||
l/7R780JvVTQ6zQiqhRuEHUV6Qj5xlXpOjwzj+qkAri83yXTRLPkc6M2qtytuzUVvnzDpTO7gT81
|
||||
NsPuXHDmz+cd+GPlU+rpT8QWEAVKt7o5dJQKV+dRrmD07Rcx4wW6zj8l2qJsc4kpXIaUT867Cf/k
|
||||
Yf7Xz021/2lkRx333/oOuvaX/6ZbiNnRQGnSjTjbQeOrA62+/DjreRDAdDQ+jCywGolhpbcQXkYf
|
||||
D8Ne69hxNYagjbsIr9Jr1PHjSgoguNs3jMRW7vj0cG4/vfjD53IY7McV4ue2IPs9cTzacKlHSaii
|
||||
7/55SS/vbkeIO7oiKsJpyRdvo4DHzmiJxdEYcfOlqBBcFi/m1GTFe5mYBRQdPpFfPv315xRVzzwh
|
||||
JF+PnBWXVwOCI3aUN6399cvB/ecfifnNU2elpc2PH/H0zccG+BxakLPThRGl2SWzfhlnpXGkFUv3
|
||||
01sfZ+/RIDlIAyp/83++cLIAQt+byZ5d1t4vfwdrP03MMHZbPmTFIADfLlyyjZwVGpaWUmxaHzxy
|
||||
4ShIeOL51fq0KjFthXvCp0tpiZBi+u2fnMqIRVsewLcfTB+WNib9LPsydKN3xcu8OaM5D+8Vqh/H
|
||||
BTl885XhmwcoR3udY3m7p8msxKxFzd6rqODyqZxscwXQLbY5Xk/zp5sCpxTh+/6Pv2kq+Rmib/+T
|
||||
bXt/k8xMMoVfv5D96bf+8tpv/kqMcnsv52+/GnD+qsg26jgfU6S7v7wQr7n8LLnvNQJ895fOga8n
|
||||
4q+efDgmTP/4u2QthZ4AD02U8Rw8HnwIVKkF6yC8mauedt3MHHOLqt6n/6Vflq2FIYi1J9O//WD+
|
||||
6WuMgsTI2DagWjJibm/h79+tgP/8119//a/fDYO6SbPX92LAkE3Dv//7qsC/b+nt38ul+G8m/rmJ
|
||||
QPtbnv39z39dQvj70zX1Z/jfQ1Nl7/7vf/6S/tw2+Htohtvr/3n8r+9c//mv/wMAAP//AwCkxTP2
|
||||
4SAAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c0bc85bcee4253c-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 01 May 2023 23:25:16 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-organization:
|
||||
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||
openai-processing-ms:
|
||||
- '1067'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '2999'
|
||||
x-ratelimit-reset-requests:
|
||||
- 20ms
|
||||
x-request-id:
|
||||
- 482e97acc620bbf5e2f7d3dd44145666
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
@@ -332,4 +332,512 @@ interactions:
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"input": ["Assistant Reply: { \"thoughts\": { \"text\": \"thoughts\", \"reasoning\":
|
||||
\"reasoning\", \"plan\": \"plan\", \"criticism\": \"criticism\", \"speak\":
|
||||
\"speak\" }, \"command\": { \"name\": \"google\", \"args\":
|
||||
{ \"query\": \"google_query\" } } } Result: None Human
|
||||
Feedback:Command Result: Important Information."], "model": "text-embedding-ada-002",
|
||||
"encoding_format": "base64"}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '483'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/embeddings
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1R6WxOyurbl+/4Vq9YrvUtEJWG9IfeLJihesKurCxC5iSCQBHLq/Pcu/E6d7n7x
|
||||
AfFCMuaYY4yZ//jXX3/93SZVlo5///PX3+9yGP/+H8u1ZzzGf//z1//8119//fXXf/xe/787sybJ
|
||||
ns/yk/9u/71Zfp7Z9Pc/f4n/feX/3vTPX3/fopdD0flZVNPUPC8yT+sT3gt8pY/uh6XKIzl8CTzP
|
||||
74Rhggh4m3hNHTU6VhN7kC18XyqDotgb+zmfIhk+DKen3kq1uXg3HwJceS8R25JtA15/gkgZ7UnA
|
||||
TptGFXfomsGpRFfq7XDr94G9VxVzjTbUY1RLlu+XQXJ53LE261NCuyg4w6QiDZHiXKrKTRMg+KJl
|
||||
RuBWrkPyiQQGU2j55ASHhnM1HnIomOyK/UP46VlvDDGM50TFfqDyiqUNOyuv9Lahhye6hzPnWQTX
|
||||
0DngC2N2zxHqDMiu8I6RNGxBvy4uKRxflxRn7lr0yfd0G6D9kXSMwuTT8yG7s13IOgHbO5FzPiY1
|
||||
gUPXrSiGmtF3X3RSYWolBLEi6qohbVigrDwhJErOdX924KlWtIgcqepU34SIL3sLt1W6RYA/hpBd
|
||||
XlYN7qJ2JXxb0rAtJOBAr1hv0EbURZ88Qa3CO68k7Hjw40+nMUeKEphbspncgg9QLSM4xqs3Vldq
|
||||
2Q+N4auwuBY11W3fqDYwfjLZQOOZ7vX+0k8nZT9AZH0YdYzjPpl1ZEiwdpQNUjYbLZwf5gZBr0EE
|
||||
yXfb1ScRHBFEb4QQUJTGZ6etZ0E8ZQeq7nDJSSh3AVTF3UR1cQQ9f/Rghrt9s8HqIbgl3dGzIazi
|
||||
zZaqvQjC6fM+11AAnUH9G90l8yRLGdRbsSJSo0nhDx874yTL2DtEVcjelmDJT1utcDaLec/ZfmvA
|
||||
q9po1EykIZnDzhRhfMgDnJovE4xm6NTQEsIj+azevJpv+HSAard6oM333erTls0GWGnUwK7DN9W4
|
||||
G0wLNifhjI3tM+x5FRdQcTV5Qq0ajRV7R48cesn5RtYAv8NJj9sWyhZbY0MqPsm8CzADv/U/lPtJ
|
||||
58q9HGQeHWO0RpHaSyvCNeW9eiUYqXcnmd9lxIAZrwKkMA2B6f18OHA9SAoBtI56LhFPlsVTOC37
|
||||
U4SDaHcGkG+WgS1SQp9kzrlUhq5fYZWlnc+d/BLAfZhjanrVwOe5oRA+SHTCqLmvknnclNqqt8SC
|
||||
PrEE+Dhf1Bk2gD6wE+GGz1BKLEj6JsbHfez5bGUypnjl7Ua9ZwX0Sex7AhJpeyTysy8BewRWBpUU
|
||||
vwhJd2FPuDHJ8Ni+Juyvdmd/krtYkyV7ulJNqO468UPYwMo4R3SfayEgD3vnycA/a9R7H7Vqs5dg
|
||||
Bm/9l1L9/IbVpE4lgsVoaTjQFKlqjuMjg7f0nqMdqu/JFL/cFFTxekujyH75v/oGIJ4q6th2mHDz
|
||||
+4kg/OgFKRa804MxEvg9zh49qmmt840Xy2B0zk9qW49rwkmbOFAJ6js2d/6pkqxSJNAs9YAoJ6r3
|
||||
kzp9EVhfZwGrRdT1k51oGiw0KaWOcDcr8aoEKVzwgrVCf3N2TcNSMYm3wQi1Rz6JwERwT9071a/p
|
||||
N6GMpY2MOqPBL1xs9bFwijOM5Rbj+4m1nKNMa2C9P9rk/TG3Pnt9DwN0b1ZEfXx89Pw5PD14LjSb
|
||||
yDa79mx3r6Bsf0Qd8chnCZ3dqYSZc3FJMSeFzp6lHgAxkjC1nncjHGCgy5CBtUXtjXnkE1lFIjSb
|
||||
yMbJBuX6JB7aARhXfiav/vjWp1XHY8ir9kQtfPLAdNzwmyy9wQXJFLbJYH0DWV7qjXqm2PXvfDgF
|
||||
UIZ9jTjTuoocTm4OdkOFyMZ4dsngzucblDo+4MN12FSD7YYQipvvih78wOxJJq1UmCm5TdXhc03m
|
||||
/KWk8NrnD2qiSK1Y9Vx9wRoRAXt3u/PnPAgh9ByLUkshVTIJxz6TrbI1sakIfcIOm5xAaY0qatw8
|
||||
J2HnS2wASyqf1Oi9kPPrVhXhFHfSgvfE5/k1rJXUehBqrjOz38jn9Ca3uxWhe20kFWvRqoRSLECs
|
||||
CyHWqcwfZ+iO3p2sG/us98q9JPC7YgVF/u7DZ8N/zMBxpZii4/fDl342w+YxaljXbwFgZeh7snLJ
|
||||
KMY38wQ2xf5ogU14KxEzh7QfLRqVcH1lAjZipfQndJPiHx4w/kSnkHu7yVHgtNphu2GXZFY/6gVe
|
||||
lCimzjXPOVvpkgh2R5LRvQwswPZP14Af/J6wrl7uOnNuWgzkx0vBfgfUipw0N4LwYLvYzldiz/N6
|
||||
bUH3ARn1TXoE8+68J4pyjCk2Nd32e+IGDsw3jzeNEncLSK7OKRDM+UpxbyrJV+G5BovhXWEru36T
|
||||
WX1bMTQPTo5NfT/qfE4tA0p3o6aHVef5ZK8HN3jt9zLe36ioT+uWfeGzEABGluPobGKrA6wvMcH7
|
||||
9GqD4RX3B3BzvxE+HmXK5yGJLVlzYwEfsVD1VC4UCU6NMWLrLEh85CwgCtiCErvtgVWt8662CkPy
|
||||
HW220hGw2yNP4b19Z+RtstDnJWEZHJPMoOb82XDii1MOtnPYkt0pVMJB6FoGX5K9wobmlLwvj6AE
|
||||
76AdcLgtcULWaSrCQGq93//3OV3vG8iFuqX7jvOeDF/ewMIaRGp06APGBDxi+MkOGGuf2e8JausU
|
||||
quQWYLyJJT6sdEEEXzMH+IilBLBG7FKoSdIB47TzK/4ZX7J8bJ8T3UvOUZ/XIzsr95paGEXWA/Dp
|
||||
UNfwZGYyVY8oDbnnngcZefCD1dqmVafvqhaEN2Ri119PITkmkgZHVUfY2vf3ZNA+6xo4JtTwq6qU
|
||||
P3gBjzL40iDuqorI2daD/WnekTk7RDrHj2sDz+28IcL+SkJO7NsBiunwodqqqwFd9AJY+IKsiHzS
|
||||
6VtcCZAe64nMq67m/VWytsr3yDycLHphoNtXAOyAGdT8mJE+m1Y9K5aRW/R4j5Jq4UNBeU6Bt/QD
|
||||
Ho5XJrYQN/qVwPbtcdFD3xvklTuTebN6huyqRCmkFVSp8RxafT6dShk8DK/HnpRrfp9LQQmOg9hS
|
||||
zXicQbeK7hDGssvQSrXVZD3Z8Q1UzZliJznlPW9ldIH6cevTxM+PlfTZgQFe4l1NUZ2EIa3iQgC9
|
||||
9vwifo9uIV+d1wN0+LukjisUPnXy9AzyTfJGz12rhRsv+Zwh/5QddS3jVJEvbRtwFY4Fja/6OpkN
|
||||
/8RgkcOEquWN9ryYqhKa9tSRKcovPquemy/YyuWGuhp+Av7a6Q0kJj5QdKhjn0MQE3AKkvOfz7M5
|
||||
K1vF3mdrJFbtxR/vMtOU7bFxkLD4iW9OMwi6NKsRsPsdmL5wXYI6KG/YfzQfn83K+6vUczki/s4o
|
||||
6OazJcK7NlBq4Ab1FMZXBndPvCK7avAq3s/NGQYX94JE1DoVHQ86gnorVWitKrtw8gwnBZMInngv
|
||||
tGdAUeY10E6TA2EGkXpuRbEE1fLQIUF/lDqtvnomE9euETDpkfO19kiBgI4SNaIdCZlyDCQ4Cv6B
|
||||
VGveJQufDUr9MSVq80DW5wM75NB9v3dk/epVf1P5TgYPfaVR1RxgNY1zGUH3jFTyDty8mstdfIEn
|
||||
93omKVKmnt22fQZKH+3JdtFXvVa+BTB8nYyej6smIQLeavBPP86uXjgDr/hC13dU+uPb8Zq1FsRM
|
||||
GSl6rorkexxPKWisKqF+EYZ6W9HEgJ8zf2C0PC/fWzcV5quviPXvye03fcAOskbnA6psHQL+rQz2
|
||||
01dYj7Ghc7ZnhrL0M7wvnKKnWfc1YBz5D4zI3uK8liYZuhZ3yXolbxKiWnoEvcm3iLTxumoQdt4Z
|
||||
yKh4Yzu9lH1XWp8bXNYXY30/h81aOTEQnqsV9fy3m7CWwgyGxRrhoxYFPqnfvgiFDRvoLclg9X1d
|
||||
VajYwWwgmB2UhFy1coaege5k3uzvCdOvnQrXloCwxa9rPudTICvZ95KjMeiuYMr0eAupOzywfau0
|
||||
iknnaw1HmwuLf+l0XpJtKluqi7HabWky3Qa73Sr0YVGNXvqEi+tSgEUtcuq/HQu0YeGUMKmGhh4K
|
||||
3dene3epYXnzHti5ZRYQv+4pVqbGGolSwHVS39csUqrkcscnMxWr+Tg+Ujjf43B5XqYv+BTBhMeM
|
||||
rJ3XQ59zeoNQf1QXtE1Oas/w6uvB8+PVEvDMViEr3i8ChgJgqn7cLuS1laSy3PID9jZZ69Ov4c7A
|
||||
V7MrPSq5XI07QBoonk4T9fvNoZrCyzAAKNg5PeZc1zfppDkQIWmF1lc5D6crg1+QEbTFqBiO4eB/
|
||||
5zO8CoOK789zW01FpLGfHqUvTR9ClkonBuWnJJLVTGvA3NMzgsAPNDQnZ5NvhlH1lMva9ala27if
|
||||
b8U4w5dwRxhd0L6X/HlVA/O9a7G+y9fJbOf7r3w9AICNXfKouAtrBMPYelJPUxJAgEMH8A6+w8/v
|
||||
6iTqOwd2VVkTZR9//blB3hl0fXghEzJKfURrJ4DgBnwiJsDRx/M2HaBFFIEiDxSgrZNDA/eVzjC+
|
||||
2j5Y+9ktg6e03KGJ3ceEX5sxAj2fAuqxj9yzraeUYNH/2L9OPBykYmWAvhZybNpiwydDlFoIJmem
|
||||
etFBfXxePxl8SeYK+9tV7lN19ylhaW08/OO7yQsLAQqrIqWaH+Qhf+ttJL+PT4NmdcLDrjWuLZS3
|
||||
VoL3F+74U1R/Gnh+Si/qnkIlIdnnWcLr3DywRm5myNMi/8Ky4CX2x/ZQzcXQbyEP0i8+2n7Haez4
|
||||
M4Ty0aHGuioSlsR7SdH2X0Td3SvQKUKFofz46YteST9rdD8r7vuzI1BvMp+l0mOG2zTe0r0VxxWr
|
||||
/d6Ac7MTqd19Sp+VetEqb34XqRraFZ+f/rGBw9qQ8D7UHH/hx/inf5d6aQBrg10GR8E9/MmP6PMY
|
||||
e5BI+Uz44vfyzctQ4Qd/JrLkJ/1M2R4CdR0Fv/WrJrR2znCpL6qNwaYf7N0cw0uZ6Wi3ewX+FJyC
|
||||
M2wQ+yJZUxI+tbOrwsW/UJfv0pD89OAFKweyWfKb2V4ZX7Dw4+In8op9wroEBtycSOevp4QDUgfw
|
||||
tK9LqhaRWzEY+FvI0PZOnyda9VMVPmdwRlWAJNq6YP20TAFKxKko3ptW8k2FlwX53k/QuoNvTsHL
|
||||
Y3CXJx8EorbwGWbqF4p7FuDjwh+0e68kqCZOR7PFH9DKRTl8m+RC91apJZLjAwdepWzCtv7FiYhX
|
||||
XwfGezPG1Pk0/vAgwUVxt1OOzZ9eDbMKwuzu7LAty/uKH9aGB8LCe1L19XH7GblFCpf+SJ01jXzy
|
||||
MFcHeHkFCdmF98mfrmqG/uh999D6yXrlbSOwZtobbR/HAYys125Qr6GPo6P0SHiIvQyGt4NJ1be5
|
||||
rsgNPw5gdZV7ii2p8VnQJy1Q3PJOVoPw9kdlSjLgPIcZ+3TIQ17sTQNOZ2CRpf/3w7e9xPB2LBlp
|
||||
9fiTzAA4Bvwo6mrJD8uEb+/uDdqCrJFNFqCKb7qrDFfaaGBLp76+IRuxVcJzsSJzzU5g0eslgNKF
|
||||
Ud3WU8DKcmjBJXTEBY8fzk5t3gBzZ2P882stuxWHH38TLo5JP2fSSgMfBiIEPe8O6OG298BJmnMC
|
||||
+09djY8ClnCvXlp6cdeIr6P6U//05sLXjr7x3DOB7pbnJFj0Qz8pvgZ+eaDdFmLCrY94UWjAHtiL
|
||||
tq4+tcazBbt6zqjx2FySce2RGTBtZtRY+I1rM07Bc6fJSIi9Y8Wcdy/DpX6R/D5qPTtfzobyW8/5
|
||||
TTacdclbgB7evAgrb7iiJWEpzMhhS8JtSZORZKEMxWcSoQ/Y2n2VFnkLzVgJ6GEKIKCwW523Gyf2
|
||||
fn7Cn9/B+gYvShxjx9Vf/XSXmQqgmu2xO6y+Cdsn2IPkgyuqM83tZydJ/2v/+cOVwkV/WErIeoFc
|
||||
jXn02TYNINxevg6CU1RXk7wFOXgFOCDi4qemqClyeAR9hC3adqDwwk6A+BsfsHk2h55stgMDzenF
|
||||
MarY3herIDDgq28tmsnFoVry7QHUqZtR20OdPifqyVA0ehoJS98YTMP6mQGLrAR0nboTEPW3V+6e
|
||||
O1XG6g5rnMBm10Dn+qqoeTYPPVj6Mdwk5hmbxjzqTUKUGrKrcEfwzvpkxNpHhhGCA6lNbe0Pevgx
|
||||
4LGFAdWliSZTs/l4MBqnlCIlOFd9/EEzIJcyIehdFPq8P+9EyE6fHCORDWDG710E05P4xv5bfenf
|
||||
X5658DkR7Z1WLev7BdZF/1DD2bSAd5vkDIon41hzD0PFSTS1ytKPiOhsWj5/ImkGwcW/0IPeCDqf
|
||||
xiJXyt1Fxd7FxOG08li8e1XRhp7bpwLI5TYFUEubN9aDzEho+bxqsO6zkShouwaMNRqBi/6nP7wv
|
||||
eaMKNcoOCBQh19n67jVQSY8vNITgXv34E17lq4s976Mns7nREfzVozU0gS9mQnIAPz1gy/ODj5E5
|
||||
NPIzs0oyPZqCT6SwIOi3no3y/Aw4rT9BrAD7Df/U2+K/blsihCWiggs5D7NegCczlbHpvhveCccq
|
||||
hc5GeGC8+DeedaUFsyZzyIL/amMIyhZsmtz95U096y/m9sc32MfXIpzNmxXALmoOhOAm5Fy8EA1Y
|
||||
nyNa8Ff47PZoU6Aaxhcb4LLXf35febxsjYjNa883lYtKeAtIROZKufNf3gTAbefjfbPbJ5K9OrRQ
|
||||
aJ9P6q92s88jQkR5yQvw8R6BaoLnIIP4EByxrRQvPgHzq0Hqkgda3/W5X28upacseTJ2dvViR+s4
|
||||
Anh9qbEZiXNPkXKsf/MOsi15Hs6K7jagTTyBVBAn4ThqPpRHIzKJuORl618e4e2iKwFLvstPpm1A
|
||||
4po1mvevvGdsPRx+9T72XrwCVMqKCJYjcKn2RLM+7tK+hukTHn75lT7fM5lAK6lNjM+RCUTMnC8M
|
||||
+mOI1den63nQHLRfP8JPciI//MdgiDcYrQwlq9jZ2DZQ2Z8cNP346LtXEOzcs4I9MSXhSFaBBC5f
|
||||
xKl/2+x1vpGnG7gGZkXNtLNC8XZv8p+/wE5kr/xh28sZlAstxX7t5aD9uo8Ymm/QUj22YciNobvJ
|
||||
y/NgVZEvYH7nmQRHIzaxdcAnf7qcpi/45W+4jP2EqS/owB24CtQ+grXfit3dAc6o7ukyvwIUf3ca
|
||||
XPQD1cV1Fc7+O9LgE/GQ7Epi9O/Uk6FsPLov2XkbPxlfwijJhgVLmmrt1v+jhy93/UAWfusnn+5k
|
||||
uBeyBu/v6z1YL3iQf/m0q2EF8LRov9A5fXw0XMip3wj65QB3/IPQz9/x80nJ5R9fRsFo6zzEWgbt
|
||||
z70m4rJf7G1JFqw/toRdcbglE0FyDJEnfOiiRxPuufEA3+CjU5X0nc5uzlEFnJ4e2JjI2LPEIRE0
|
||||
toGz5CmlTw77XFV0UPXUaF4Fnwxp28C2sW8Ur7Mvnw33LsFfvnPrNl99qsTzAOzPtcY6vFf6/KrL
|
||||
VFnyanr81hqXztnppqDMQfSy9Asaf6wZtmGE/sz/eHx/n5W2RALWssNWn9nhAQFODAUJs5hXjN0K
|
||||
pHi7+EqPxiVNxtXuIsMXzTMkZus4XPqTAIVnZaJ5MvtwEgaSysVoaPiw8P3PH8BcEHTsxP4ZzOte
|
||||
uoFJcQZqvB/in3xFLnc3lTqfTqhm81MMP/7A2Pk0+h9/u/Q/spXGTG8Pm/bPfA6j74H7LdCfGSw1
|
||||
YUYf67EOJ+MKWji/fJ2i5v5KFjyKwK/8J5mcsEx++wUWvqUH+GHh3IVRC08SywkQwMFn1WtqYNKe
|
||||
KD7KTRvyylfTP/MziyVLPOCPX+iIxp5Gy/xrWPIjeNnlGtUuyTYca/oeYHTTBvQVtAIsec0ByJr8
|
||||
xD+8MamaNFCez3uKHodPz9GBer/8Eu1OyVuf78qaAHHTrvAprm1Oln6m7JPzCmOAvWpCdirDjb3r
|
||||
qbbot0XfB7DP3x3VJrNPFr5BP71O/Vp76AR09Re2hxvCep13YB5cqVWM2HOxpykADPpby8GSj5Lx
|
||||
KFPQGaLQ/vILROvbvR9Xu1SG7/39iarFn/LAdlWIDU2gP30+uRYo5fal38gUqCScwNUv5V0gW9Sp
|
||||
6zxc+t8AHFPQsJnf5nAUSB4rzOoc6l68Xh/HSpWhDARO7cndg01EGhGyUbthLQeZz1dq58GVSBOy
|
||||
zFNC+tuPxf+h0yOTdfYaNjNcdcYZe1tXDtn+ubeUhhknetklu/4rnvILNA9eTo3mLvvvG9k3yt+/
|
||||
UwH/+a+//vpfvxMGTfvM3svBgDGbxn//91GBf8fP+N+iKP2bSn9OIpAhzrO///mvQwh/d33bdOP/
|
||||
Hts6+wx///OXIv85bvD32I7x+/+9/q/l1/7zX/8HAAD//wMAPe/DkOMgAAA=
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7bfdedf56918cfbc-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sun, 30 Apr 2023 07:04:15 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-organization:
|
||||
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||
openai-processing-ms:
|
||||
- '25'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '2999'
|
||||
x-ratelimit-reset-requests:
|
||||
- 20ms
|
||||
x-request-id:
|
||||
- f2850337441001a8e5bd70bacb4dede8
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"input": [[72803, 18321, 25, 314, 257, 330, 61665, 82, 794, 314, 260,
|
||||
330, 1342, 794, 330, 61665, 82, 498, 260, 330, 20489, 287, 794, 330, 20489,
|
||||
287, 498, 260, 330, 10609, 794, 330, 10609, 498, 260, 330, 38096, 42914, 794,
|
||||
330, 38096, 42914, 498, 260, 330, 82, 23635, 794, 330, 82, 23635, 1, 257, 2529,
|
||||
257, 330, 5749, 794, 314, 260, 330, 609, 794, 330, 17943, 498, 260, 330, 2164,
|
||||
794, 314, 1835, 330, 1663, 794, 330, 17943, 5857, 1, 260, 335, 257, 335, 335,
|
||||
5832, 25, 2290, 11344, 37957, 25, 4153, 5832, 25, 44921, 8245, 13]], "model":
|
||||
"text-embedding-ada-002", "encoding_format": "base64"}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '594'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/embeddings
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SaWw+6SrPm799PsbJunTcCKl2sO04iB+1G8ICTyQQ8cFDOdAO9s7/7RP87e2Zu
|
||||
TMQOkqLqqad+3f/xr7/++rtOiud9+Pufv/7+5P3w9//4XnvEQ/z3P3/9z3/99ddff/3H7/P/W/ks
|
||||
k+fjkVfpb/nvx7x6PKe///lL+O8r/3fRP3/93fe7HdujKCtm9AaQH459JDpOlmEvSuldKTb3hq4y
|
||||
9EnGJcUlek0rkR107xBO5h6vIbBDk1n1aujmt2ovYMrrgXkvfcdXzeomQ4hDgdhOt+um6pZGCuyU
|
||||
BTm4LOLjeNqO0M6XM9NKpUS1p2aqIhT5ihmvte7xp7SQ0USOV6KdmykZykC9QPFsK8rTi1583uWI
|
||||
IYufKZUDUnBa3+gI0C5KeuJSyadFvr9D5/lnYsJcdZNT9xfwK2lJto7AQ74U/UDBMV0xw3hdjTkX
|
||||
cAR4Ye5J9H1eHj9uJjzR/UoscbNJmsPSTCH11ndyDkaOaOmWNXzeF4PYRK66sehif8NOtwXBgsL5
|
||||
rJ2EFqARl8yL37uumc4pwFHiI15eu9ZgXqX6iolpSJUjNbxJuWlvRTeeB7aPk84bLuJOAC4JAt5U
|
||||
6z6cpEAS0DdeVOldxrvD6NmQdQcZi6el4A2rSDAh9D2JeOGq8qZvfJTQd6RvvPJiiN5zBLp+Lcnh
|
||||
qhQevTAXQADxzXaX0SxWIjxOco6OAdur3tmbPEmrf8/LrMtG83i/E2Q4GZs1/sV/ctqdC6VVThiN
|
||||
smNMt/KBoXZjDy/cdenNu7XsgmjJe+Ys7LwYVsjxoRs2IztICkrmzyeZAWvziliefEHNFao1nM58
|
||||
w1yao2I+yPkdAu2wZYew2ySj40tPGEVoKdxHqZjmh6bLZBfLxN2ZhTHKPr7IaOEX5F4c02QcPmsV
|
||||
PsFTZ2SZ9Al3F+IITSv45JpgEw1NXaewyQqH1hHwYjx0kwmLsoqx6Cm1MQPOffRslgYxcLvibPqI
|
||||
LrTbNiD2ywu7WdpPoNykmeNGDIZwPEebFDovOFN5Mj7FuG6iGmK7Foh3saqEj6/lGiXX6kN2NZ2M
|
||||
0SFhLU9IiTE3QrUTptjQlfmxSggeb3Yyyqd6RPmu8PFY7XE3V/PGBH1VAlX0Y9RN92tbyuvja2JO
|
||||
TzPObueNikoLm8Q2kdIx9aM/FefxWpLt1a7RJHtvH6Rn5DMj1Hs+L24Eft9/9Z7M67WuL+kFMnYt
|
||||
d4jT7ODP8LGTmJiNUxZzFCcWbF5BTDxFcrppWKq9Yjb5me18IvNJdNAC1SsTU4k/8+7P+3KV4kpr
|
||||
MTt7rAkzGQK7m8gObY7dfJDbpzy+Hxfm0C4y+vjUl/BEzyvDghIiSsF5yp8u1r/5oxerTwxPWDoL
|
||||
xrAtQjGdcYBhf8A6uSTbVVF+18N+Yi8sBdU1mZnRvNGAtDWLCvbyJvZZRCjZDu8/95/5uTpBPRgl
|
||||
/ZS3E+rH02EG7F5cZhru2+CW20poI1t3RiJyRuO8TWx4ClFEtGR9DFft/T1DjitCuYSNbl6vXR1J
|
||||
Y7v8xqfppmOS65DqiztzJ2NbSImX3mFfxxPRjtsP5+G5yBVto6+J+vSxwQVBDGCKDhHbNmrr0fm0
|
||||
z2VooSSX+rUu2MPXAiDtSEj01hqDa/acQ3o/OjS1n+uEt/d+hMCZI6amn1s3730Fwz5dbOlGf5w7
|
||||
blznWt4f9joWHIWhwa60HNTW3NEmKzJj+jBuIpGXhBmATIPdrqEEa0G0mCVybPBzYQvAy/uO3IIo
|
||||
DUe42wISbCrSoxt9wnHT8icYxTpgztZ00Swk71KG2yvAqD3UHXMi1ZXp9nVgum0J3mfrZ3/ij5cu
|
||||
a4r+ajo1OlXhgYoPsUnY9A4u4LOwI5YfrULaIA7gP+UF2/Gz5Q1y81IhPu5tRq7TOeHm7XGH3hFu
|
||||
zNBylc/65fVEP73XfWgSnugFwHx4UmZMY5HMXbW35H0KW0LY3CWjlacUUPjM2Rbv7WTeJLKJwix4
|
||||
MK/PT8YIy1GAjSxKbJc1STeii/FWzOzMmIalbSf6E1D5aBiUuccV5bPxWObQ5IFCDmPtG1/9C+Ap
|
||||
xBFdNHNodFtDnwFp74xh6tYG/yhOiyxnETM83uqQVgGmsFoedOLOst/Nj3VykZeBPBJ87o5IvPBB
|
||||
R6iPc6ygJOl6KY5y8CthSQ7qKU9GeFlP6FaLiJideiwm7THZyldfiU2FU8f3ND1BH4wxs4Ik5fzq
|
||||
SCnK7PLFzFa2kkmTHROCOJuJE/GrMYWFbqHzECpE3ypLo+0H5wQb+eUQYl2Fblb8rQXztJ+YGmnY
|
||||
GyMvo8onlxjR3GibdDs/NSG5fj4stuZNMtzs/I5MvzwzvDGWXpMKqg4VfeRky4c24SdZusBp6nOi
|
||||
utVgzHdcmgC7/Zvt9ZODaFyNMdhXEcg3n4xx3fgtiI0rE/zBu2LyJrYHL58p2W6p7fXS2vNRx/Qr
|
||||
UZch47xbbBayVNEF+da718/LhwSLtUkJyUEyaNqkVNH2LCfaMM5FjYdwrayF5w2P1+LQjettWgMP
|
||||
Hy+azThAHEtqDPuXZTBXP22M/mpqNcIiKqnA34rBzstoBFaGS7JvFimvN7jL0XvYd+TxHDGi210v
|
||||
gHYZXfL1T2i8QJZDUb1rtjV2M+o9uaCgRm/ObO9eJcNGb2JooD+SfSd6iBXhKYL7iI+ENI1UsHu1
|
||||
GJF49TfERc+7x7OyuUMdXvbE8YlXjHftlcvr4jwz43w8GHMUjYGyRmhLdrp4Q/M8nu5QL3KZ7Zhx
|
||||
D+f6ktnyJbAroo42C7v4yddoKQVb4qmnqeg11dKBXq+Y6PP2kgxFfh7RfFF1EgXnRTjYxi1FVyw0
|
||||
7Lo33uGQvyIX9FCSqXQ7XfkcKmIJSepuqCQfaTi1XWmDU6UVM7erd9cv+25GX/9CZSQdDWY5qq58
|
||||
3xcd3SzjbT9aoGD35BI//Wy6Yb8hPtq/TIMZ/iYyxnvxpspS8rdsV+4Szu9luFCqS+0wrzY5p0Ly
|
||||
foMbsAudh8zlkruTL7/8p4vKe/BR39spRKapMvtS1QYXrnqOXMnqiFPMmldvE7VFX7/MbB6cUDdX
|
||||
V4Beazie16WaCKUcx8hDlJJ9xdJuRFcaQe1GHgvzw6GQPp9kBG0/5ExbozAcvv0Dff0S3hzqSzjS
|
||||
WqzhcM9SZnk0TViZ33N0LBMDX5iqh6uyWF1AScuWmSsIjH7dRC2KMyVnd7MRkwlvNeFPvR+6niWj
|
||||
PIc5iPOjpgu0D9GYXlcUJThYsa26eHpzhnkJOxt57HCvYo+vHJki2FVHdmgQ+9NPFJXHIl5/+/vw
|
||||
rV+lelAbizh68maXEBXlsvzByr2SvSmPRfrzr8SUtpU30v2HKpH5HPD6vROSRvQuI3y6kbGDGR86
|
||||
etmLI0xorzKSSy7n/vviwtsRL3jdK07BSBIGgLQyw7I1b8I5Nu0IzevuQba2EXbD1x9BFq8OFOJM
|
||||
SjiCVoLzPS2wNJC8YI9B0GURsw/eLPCBT1u/OSFr8ZCYq/fU4OelP8P6uDNp/u0f07LW+p+fZuq0
|
||||
lMOZryAFpmcKXXa96q2wZMfwJJ7GdqsMiin29RQyZYtorphpMV0f8Qn8g6rQ03uaOn7aJDmKxtmg
|
||||
i+OnLOpNfwmQ/BCeLDH7shtSP9JBX2HCHLV0C16IWQnRQVUZXrbCN962Do9A7JkaPvKkcYNjjZb2
|
||||
K2G7g0zC5sOQCW+puBEVL3NjNLqLCXw5C0RPGqeT9HVfb0S5xbhaZdBN/iT08OlmRtReN40pu46m
|
||||
0soeJc6sZIjKy9YEz77eiDlVFp/IPpNhsJBD+dVee715NiLYrl8WRbBtOF1q8RPVifMhh2dbdF3V
|
||||
XWPY1IpIdrtQNqpqrwnoq4ds+yodj3/zGbJiwsR96KSjZY8EWDz8noXDc1P85ielbBYWXjs3BbH1
|
||||
kFMIuvZGpZN07cYl3qhw3z8xIc+nyEd3o8pKt4IIs1g9d1MmtgKs83dEDp6mF1MWnmvI09UCi6+y
|
||||
McaXf61lej1j4kZvlozUc9frPnR2TF/3XTe961CHkvsC25/2ntfeQzuHPIMPs/S1Z8w//ehG+UY8
|
||||
x7XQ6oWOF+Xbr6moLhbep1L8SPnNk8FRF4pp92ru8Ej0gJjBZgy5q8SAYnWb0fG2vhljsL0AgBCe
|
||||
MPQHzRvzQsbwfR90/s7n03lgM7JxgZl+jpuCa7acynLl7Qlu+grROWgoWp30E3OjVi7ox8AlBHYz
|
||||
MatAez5Lbd+jeOelzJI9w5DkIrBhpRcq3vz83dePIxy2a+JsxkNBAecBfNS3RpIPq8P5eNT7nx9l
|
||||
l9PYh2MSZwLUmrSmo3J/Iy6lhwjmJ+h4Ck9bLrCj6ioyPXjMUDSSzNfTQ4LTxsNENQQtWbXyK0XG
|
||||
TmnIdrDEhBuCXsqZskPf+fsW8hcILoTcfTK3NZOE7oRXjxanS//lC1UxSGSzB1pLOV2v7TaZt42s
|
||||
I2dcnSl8+cZQBvYJTinBdO3ddyE17vcenMdjyZxTlXat2vQ5rEnFCBl9D4nsI8Xw9ed4w43Bm1I2
|
||||
vFEmNoSp0kPu+Np75OjsGYRsccCLwWmJjeR1mxLvUFfGXAVWD7cEJqbZZ8Sp8VjlPx5AdMVMu8En
|
||||
VQ5DxF1ySJfEGx9NtoCZDXdmbV9pyL/9Qw7so8m+vIHXfXZ+w0vWE4IZ2iHeaav2xz/Yb97ru5uS
|
||||
w/aCo5/fCufnwi8hzpY5sU7jPpw01K3BGe810e+71qBvrZthIz8c5pRalky4niSFRDJmrolwOHB1
|
||||
MhUVLWzacjVBvNprkiLTak3Xh9XTmw7ZbYbandds501xyPd1Z0IfZgL76cXk0uyt/HiRAehtTC9P
|
||||
KUG72iuiG7md8NnWY7CxnRGP3Eo0D9HtCb/82C1Siw9h4VowNX1Pp855hO/AEGx4tteRrnul6cbn
|
||||
IVDRb341WIXD6UOjALbMjphTpKukf8/zBTwh0LFwtv1uVBX1Am7iN3iyzwn/w3cYXQXM5O+H0T+2
|
||||
m4V8LG8GXfw3v0HiS9r/8qMYr5kQo13L9/Rd5lPCs9r04ezWOds+RCec+LP7oy/s8dU7vm8PMjrr
|
||||
O4LXq9ZBws8f2bLwZvbi6iaNsn658DleE6yM+ZvTIGpHuD9IjaUgSRE/1mMJ93NEyF6PRa8vyqUE
|
||||
P/8UEbkqaIHoHR6idPrqv56IP70vX/lEDp8HSVb0HO/BOx5j0s7CJ+l5lZ6U+KZlBH9OARp/8cRi
|
||||
uiFWbmp8vCzMCxqOlyfTb2sHjafj8Q442b2Zeo4jxN6n5R76d53Q6RpPHj/aGP/p16Q6ed5qebNP
|
||||
SNnONR61ru+o1+UXqKa7R/zb+uZNN7t9wi15m0wvZbHoMWls5A5uy8zH4oMmqnZv1D3cC13tjXcy
|
||||
dFMXozKLOCHzLS3G/HNWIVDQjs4+OAmd9u/nL/8o/eCqG9nRtuGoNxoz0kveze2nCeDL4+jYJJhP
|
||||
1VpcQBA7W/LTa5Fgs1dO52lDxe0l9AYQjzmKJnv89u97N6tNn6KY9Jx52bE2Rin285/eki0jrdFI
|
||||
PNv/4T0K1WI0hvuljpQNuuJNkV4T2g/aBWX9JaOrx+Nj9LfDPofLW61ZKGwxF31SpTB2xZnYl8o2
|
||||
hA50+ofv/PxyPcWejuoPPrBtggVvPMenk/I6nCKi48ExuBw/apSepAfbBmOI2C1atGh/yUe29RTb
|
||||
GJPNMkJfPobH4+lQfOtfhp2wk7HEn3rH+1Y3lVdd7OioTSs+1fdSB+4WT6osuV/Qe6imEEMk0fi4
|
||||
oqjf64YE0oW0mPa7Rfd85WoNNjocmVnW0PXf+toIhesy6307JFxzzxf4zrPErg+vbirF8YQ+Tq4R
|
||||
e3Ftk8lpiQtCuyyYJ0ZOMi23kCITxRe6TAfJGKmTWsqwCyV664XBG52zv4Zf/x+F9B3OGUZv5Jhe
|
||||
QDdwzfmEhukOU+bdiNfngpd+6xmSmu6JcWNDMgSPvYB6rePkxzOFN32tER9Si91e8d4Y3eiZojKb
|
||||
Uqbbz8YYD7Zm/uZ7ugoi0s3EGGL05Wn4NJAjEjflLd7ooSCTX/8clJtTglCgN3PK3b5bm2cvgova
|
||||
hORANZm/h+2h/vEZPAu4S/qiXElgBNGCdgoIqD+fribc6Ykw0/BZwj/3awAfcftgmvAIjUYVaYte
|
||||
tR3T25fXjLfQGUHNUEZUQ+47nuqbO+Q39U2s5JqFjSZrJpBMIlSxCj0cH7v7jMS0qpg3dzWa2ta7
|
||||
oMPe5sQpd33B37djrZz2L4+i/l7zH79DbMVDhrtmYczsNaWKl0fa1x+RcHZ382VTrdcrdhy3C++b
|
||||
7ydwzm1Jvjy567OHsIDJn3u68W6SNwfCTGG9my/ferKNL29UgWQCweuAcGNKuJyDlj4+uKkW12L8
|
||||
8bhQ/rhErSwjGbU8xCDYvcjcz8L3xMZKTn/y/zC/EoP2tHfl8z0vKGyqjE92XpnoN49k5yOE7BD4
|
||||
sULTBoidbO2QM/mjbwRYvXGnxsAnofTkP3pIyKHh3bENI9gyNyKO76h/+ChoB8mhtHrsCoG3AyAV
|
||||
gc2M3aSi+bb8rGF+LnRClHcWjtbT8mGuZIPW2fpkjOJ5YaGfvzXwM0u4V9k+uqZ9R6ybrYWTAlOp
|
||||
mA3SKT97Gl9dnUUOfUJjCugdGVN/Du/oFw8DP7VkVU/3Gj7g3L/z3ux99UeVR0VtiHnGiM8PacxB
|
||||
5G9CvM37xefV2CygbeITXhTbuVsNh9lVjO6yII4Xtrwb77KPct98E4+8Zo/J0vAGKxESCp80Lab9
|
||||
bUORFYY2zb79s79u+7t8OPU7OimhnYiy6zxh5ZkhXURRkPB0cQFINo8Sw5fHT3IFe2CrKTzM0lX1
|
||||
phXSfEjvocPsnTmHvTwn6Z/9F6vMc2M+Hl0K3Ku3X166RatgjFoY1eFE7G//548BdAhJlJCruqBo
|
||||
7v1NjM7h0seLfvcsxh8P+/JLDEs4ePPydcB/eJx6LYaCxqYaIzXLBUY2lmbwVJ+eCDuH4scXDXES
|
||||
pBTWt/ZK7NdxgZj6cZ/QS/mduEu4J3VQOhe4e9eKad/84k++CeTwxW1CwvDs8Qd7SsBPpUXcTXX0
|
||||
poV5pChzrhuy/5Qemnva25DPw5J923DXKOurjeaDqbHj3S87Fj9uFthzeGSuExbhSIfIgrxJjnT8
|
||||
+s/i8u5qubVuPd34xOuYs3pQ2b6aOXshY5UM2mNjw9ff09tXj2Zpv1kAGy8V2S8lDUnO2w1ka6B3
|
||||
hitPQd/9ihZerLJwlqyPiXAKBB/oMzng5fd+87k59LJ6tHfsmL+sgovynENFXzndiG0ejgaxXLD6
|
||||
SiSWvrh8/Z0bwxjgmrntZeuNXRT30D0Kg3krpzHGVN6v0eI03IjV1kPy4/MQC+AS9dznXd/rqqq0
|
||||
VtKz/VLK+GiGdQlKurwycq9aPttoNcOP70SrtuE/HoG++kGsFeR8Plzmu/KnPjtD50JQahelUVXC
|
||||
/O5dFCxJLxQIgQNzLu8L56ouBsp9LBdENdW1MWb32EZ32sN3fyYt5uapYeX4sM4/foT6XPvo8FiM
|
||||
JyxHbVzwaFYXYLnXHYao7cJfP5GXQWp8eVvXUbYkJvzZnzk9w2R0o0uOlE3aMvvLP8ZMdbE8Iawy
|
||||
LHwWfLyF2gi6m++Jdi5KYy6JY4LQWy5dCnXK650VjX/mFe9I56T1vcPzz35cnt/FcHRIUv+J9zY5
|
||||
vZKRSM/xx0cof+7yZGp3kotUX0uZWx/GcFxv6xpoLeRUwob3X/zxGT3Y16/X4fjdDwZRrjE5FLbB
|
||||
uSwN5R8+40caNaj5uZYQjaPBVPu5Dod88emBreISN9M+//JXw0eW1D6IdwkUNHF1stAkuBrD1aPq
|
||||
ZrYkFkjP2MeT0b359DTPOWotdUmSbmGH7G5ne8V4B0uil4IbzptNL0GTNy0zv/2YO2i04c62DbO0
|
||||
XZfwNO0xshE5MqtXIs5KyyzhO998+UfTTTNc3kpcWA7B5hWS4XKefXT08ImmG5Oh+hcvnGzfuKFd
|
||||
5LGh62UoRnTB7JyLyVx2NxWaDivM3izFZG4QWsjOCp2oEn6owQPsPeVv/TMyoDQc1UVh/3gP+eq1
|
||||
0QtMjZUwa1y2P7zaonfOvgwxoZztN5WGRHNvrWGt0iv58bhJe2zc334cXWzONe/FYLxDe81D7DeZ
|
||||
bExhuaNggBCQw72SjUl4Z5ZyH/dH9uOhHZbUCHYQpMwr8CIpx+pYKn//TgX857/++ut//U4YlPXj
|
||||
+fkeDBie0/Dv/z4q8O/4Ef9bEKR/M+nPSQTax+nz73/+6xDC301Xl83wv4f6/az6v//5S5H/HDf4
|
||||
e6iH+PP/Xv/X99/+81//BwAA//8DALUBqRrjIAAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c0b9868dedafa62-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 01 May 2023 22:52:31 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-organization:
|
||||
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||
openai-processing-ms:
|
||||
- '196'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '2999'
|
||||
x-ratelimit-reset-requests:
|
||||
- 20ms
|
||||
x-request-id:
|
||||
- 0dc16b9c27373738c2c364d171678493
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"input": [[53380, 8245]], "model": "text-embedding-ada-002", "encoding_format":
|
||||
"base64"}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '90'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/embeddings
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SaW8+6Srfl799PsbJu7TciCjVZdwjI2SoEj51OR0QREDlWAbWzv3sH/zu7u2+e
|
||||
RCB5lJo15viNWf/xr7/++ruK8+ej//ufv/7+ZF3/9/+YryX3/v73P3/9z3/99ddff/3H7+//9+Sz
|
||||
jJ9Jkn3T3+O/m9k3eY5///OX8N9X/u9D//z1N7ldGbNR1kfT59Co6Mw2K/aiVIi5YisGtOPUMVc/
|
||||
L9re624DJJoVM7PigS5k25cPVXN7MDsSyvbbeMOAFt0tZSRIHC4+lqELC7w4EyLkQtT7lXUGOVv4
|
||||
xP6aWj52jb5AVWAsmZY3H0Sr5pvJ3tn4sEc2Bfm0J7cMHo67IOq27vK+rnMT1MzcMUfRtvqg6h8N
|
||||
neTdjm3dzTmezPutAtE+a8zpTl0+tXmwQGccXom7e9hxzz/GhBJtFxP7GR/ygdlYle8eUunickmi
|
||||
US+7DkY/14n9El2PVpWkwUtRLlR+oi8atsc7hr47fYjrGEU+7dKwBOXerPD4dMN4bJMaQ4iMB9mb
|
||||
m1gf40wI0CpbypSHFxSxaJpMFGYhJkTCu0jAbizA/dgTsn1POGdT4z7lNQ1Vtg9jFQ1ws314R+iM
|
||||
uTUE8/pkmpwddJPpWvlup6phKZhH+0FcY0v4dE2eMvpe7IxoFwYxvSZnGazE3ODNEK50Zp8WGjpM
|
||||
tkp2+15DvEpQAXW9jZkG6UYf3ru6QivPD/ellBr5NGy3AuyJR+jXMuS2t95+Awd4Jsw5Xl95Hwbd
|
||||
E+KbOhDte3t443FRl0jp+RsPxKxaOhYhheubMyqHkhEJweK2kXUVUmYa/a4dJyTL0ETai5DqHSD+
|
||||
3ZtX2DzXIj6v73c+JVluAnMblxk1nWJaDFYJJ1LdmOYPe1R56duACR0CYl2FQqeNaptw2eYRMQPm
|
||||
eP3tET6gv9CG+dpRi1dppT7A/cCFHCIvb/ssUUxwk8/AtmdZ4s1d+GwA1+Gb7EdRjrg8ltqmzuQD
|
||||
wevzHo2raAuKAlHI3FAq4u69rBr0FT4dHnl94PxjdqUUjvsvs7SQ5DxwVxu0DvYbgje3jLPLUung
|
||||
IUU+cfinQM3xbT/lOKlbtr8GKh+Cbz7B5p0hsideFY+Lz2GA18dzsNBFEh9Ph/1Rvt+vI9ubG+SN
|
||||
py96wuNSMaa3LEV0HW4GWJ7vBXFegeDxvD4WcAl0g+gtU9E0NFEF922ZM+sqGLpw3SkplKtsR0hr
|
||||
brzWcqJwabx9YNawafVRJ4tSlkf0wTA9gmgKe/cI3mPPiLVqRL1TFqSAOy0NujwsIZqu1DeQpNgx
|
||||
c8rUb4fruaGwWBFGl1k3oMnU8xT1eBMTs8F2PqRPHKCPVn6Y+0jktutWzhXOn+LK7Msj493KIRNc
|
||||
hQdnvtMeeZ/trk+IPm+baFKwzId8lYToGB835IyzT9SwvU7RVf4qxDT6Tz6tjyyA62qpsX3XeS23
|
||||
nChQxhe2cO+Xb70+HXZH8PNWwZNK3Zhd3UxFn1oumJExveW7zgvRtlXfzPhKYTRsW/sMZ+E90pW1
|
||||
0bjItxlF5095pfLJtHU+7y/42IwRw1/e42F5OjcgnO4ZbSrrHo/nnf6EQBGfeFiIIR9MbROgJk1q
|
||||
slOrJh8DfnyApDtXKms+4z0JvALlW+dIPC++tUMtpCkI09Wg/C2/9eElr46gYW+PJ2uRtVO0LFXI
|
||||
/exKM3v34GyuX1gsjQLTy0WJu82zCmCR7XSCfdXzhn5bYPCmbUpX59eej6iRRDAcGfDiIUhtt0mf
|
||||
A0Juz9m2GpOIBmWVwZCWDtlnuzQfF/ryjnAgIaY9NIcPr+pWwS29Ueb6XEYsV90BZVp+Yq7mE0S1
|
||||
10gRvS8LZjfJSp8KczVA0Dp3grvnLZ5ulWfDnRYG0ZWTnHfiDQAVOpTMO6EmZ4m6TNGvHsz1XUYs
|
||||
pdCgyNVXmN3Duh2wRAtA7aaiIB0/Oj0HYMPwuK+IJpgD4mW9esDyYZjsmLG8HdtLGkKgr2MMpn5t
|
||||
xzJ8GMgf6hfzkjDxBmEfYpg/01zBaV7fuF0isxovTE9I4E3L1klhlaUhUW1pp493Kroo4EqNX30U
|
||||
o+5rtnd4UKDEw+2kj62JTEi8RUB52fke1z/1A71Xpkds32/y7iahAlVFGLJ9/Yh03l6jFH56uniS
|
||||
UJ/S1beCp4QxFazs6fGuWWOE5PsNL16NO/+eKgD11j/xNymZ3n3y8AHlVz0yw8qe+mSVcIU4aVvi
|
||||
qcY3pqbeZnK08X22FyWid3EvdZB98I3tGrTJ668f2UjvF1ti9QHmfBHVRyBJoeH+cPX1FV4+j+iz
|
||||
uZ7I+fY+elz/vgB9CqIxXVvsou7LFAzj6RRieLnHaBTYqP72G/Of1qsdy9r1kYmeMZ7ls6XW8drB
|
||||
oR5vVHqv4pYdO8mWvH14ZqYXdmioE/kK1/fI6DrnUzusrFsg2zFdz/p11YfTQxvkJn3VFJ1Q0/LI
|
||||
SQPIHleXaIj6fJA/moEGJ3DwtOdZPqDX1lXCJbsQkrNPTvfkkKIa0RcxxaDTh8RzOoDUd9lW6Ew0
|
||||
ruqNAZ0aJMSmuzHm46BfYdtlAzF0/uX9dHyVUrnFBc7rA8/H5CzfwXkqI/Eno8trIiVXpFaEEGsn
|
||||
t5wNn4uNziE6EsLTZVzb7bBBK0k8UGm01jnjhZNBv9RqvGg76jVHqTeRVxRnZl6OQzRY33UDy1Z/
|
||||
4Y15cVv6tc4VqjhpmWc/cT6cl3EFxeqr4+ztb9rxfbgfkbdmGtk367alk5U3EIXXB934vpsPheqU
|
||||
yHiahBjudM65p8gYVT6EFJDURZ3TNQCU2Wu2N8ftXE/CHbbSUsZrU0lbdqpoJ5dVllPhK4Vxj89+
|
||||
hrZvp6PA1jbns/+BJlW3xAlqJxqWt7cJOC+WeOEqojct1t4d1Bt7/tH7qVdTgM2VdlSe/dv49WMb
|
||||
9B62dBLhzbueqU9Y7y2PkfX23Q58cH2lDByF6D0U+a/e0T3fq4wU0rGd0sTTYMdEmTn9K47YLj6b
|
||||
8mN/PJD7YyeiuloVsLkl0ZauBTvPh1d1qFB2phVT0UuN+HePr+hCM4E5/GPwLhGiFOonVYjxsr7R
|
||||
oLdvX7m+jxad3v4mn/tHAGa3aDCkeyeiZbGnyLmKXzzaEfM+psI7cEwnZ9aGJPm0XVwCOPjjmu0l
|
||||
M+TjSjsOKH6LSwxJ9eZjZd59cBfTB1eWlHjTJU7vSno6XJiviwKf0lxfoHOzj+b9zHReX+QKXbbv
|
||||
CI+hjdEgHx8bqEVVISTJNI9P6qaBfXgAph/2fitW1agpg15+mUPliI+R0RRyAsaT2fmzjH96qFhv
|
||||
JtBi7dF41LpPhdh74RKzrd9e9+snd7btiJZ9R31Q77ErF6410PuVFjE/ZZ8JnWRrR1SqFdEwaHsT
|
||||
tEezoAthLFu+aX0fihRsYteRGfVDkcsg5HKEkS1O0RTTboH4JzwSp7++o2nYOiJszG2Fryx1+MwD
|
||||
qryVFJnsrsc9GhVNlSH+pm8Ky3fV8gPFE5rvM9WvUNRl24sP6pT3mFlqn3f4vQXw72uKFztbjBls
|
||||
iAiyaG6ouOwrbzpmIvzhGesyMn3qzHqD1q+BMM31ypiPplSg5pAGLBRhy0U7MGf9ubt4LfcuYuHh
|
||||
0iHZ3wlMpc4zoq8gDSAdhoY46/1bH/PmIsA9JypdJvXD49HDA7RNO4Ml9qLg43HxLiEZhy27D1M7
|
||||
r8cdg9FvK6I7ytHjr10mgOxVPjvoktUO8/5Avief6JqtRH26GlMA8/rhNM94PK4t20TblBrMmrQb
|
||||
mkZkC+j9JJyWSFvrU4JIB7MeES0jfTyM4RT+4QkhfrUehRt6gG29C+JH9oD6IqvucBztGp/UoORs
|
||||
We5UkGvrzFSFL3R68BjIR1H08MB97nWF6abyn/tXb+v14qevULuWRDqeZQl1zDZVGFnL2N5CjHcy
|
||||
2vigLaIY97pTIOavxxDiznkQLwkVr4eUPtDiMnyIPrYyZ61Fz2iuJ7wUvKBdCUxSYeYb4kSlEFXF
|
||||
lWK0l+yImZ/8ko/CzZWlLXljTNd0bMf29aKotY4q274n2vawfpSA3oXHduYKe9PJuN3hoqgfsosP
|
||||
G0TjiyhCPa4bog+OHXNlCO7KOJ0FukFu3fJFtwcYz+s3HqtGj1Z3KtoAKXbxcB+qeHgv0wp++rmQ
|
||||
F67O0/NVhZkPiHU/i+10U5mIlkqXE3xwJU4TZHUw+x+m7z9DNLyvuyNcveUCS+v92xu87FLCay3u
|
||||
mf1WJX2qfWeBYtyF5CUPa94eywH/eItyrXznvEfFXdHFISBmLxb8j16t72bPvMTTvb4BPEFcqiEj
|
||||
lbPXB93Yb2C9DV7E21dKTNXYvMJrv3KIZ3V5PChYEJGTezs8xHanD4/vZoK5n7AQ0Y53wbedUPHd
|
||||
hcSr4IGmMepT6TB9KmamNxsNK7HT0MwvONtuJH36UmmBzhfjyw6xgtt0M+oLcKWdPvNTihhlWQaf
|
||||
w3uBefqq2mFd1BiwHy7xr3/R8JG4SPUZ0HhfJdEYxUGn5N5rZFqSaFFHj2QDi7QUCDa91pv5vkCx
|
||||
my3p9A01T5z7pVRZwYAX21vqMVs5nEEZHiVxQcB8rK+KLOtrssECWbUx3ZHPFcrjKZj7o5rz/LyR
|
||||
YfX19sStVDUW0iygytspNsw4NF48rWwjBbeMl1h4ZKq+6hoPUBU/dBYFKxuJx0xcQHnXJuIWV8Ob
|
||||
uuSq/eGR4RrG3rRb2xTabfUkqu4t9dqd7p1sF28Ty/SocPZGlzvk2dn58VPbVOjYId0LFGbNfqqL
|
||||
+5EqdkVvVJQ2VTw92V4ABy0jLLyPnjfRfBSVU7m4MYtGeTsZ3fEB5LR/sO1Hs7xBSwFAq6uUPuZ+
|
||||
xW3ldoSzkI905X0lnSc0miDvqhLLkbqIG6HgBXwfokGs+ObF44UmIdD87M1+q+SjVOIMITf0qGiS
|
||||
qz6GkhiC8bkS4pNUaKnfrw2Y9ZfM68+76HJ5wHUnCmy3aTNv+JrtFe2dV0/FXiwQcx8XAYXIfBCr
|
||||
eupoBKwN6JneVeIYdInYKlphmPsTc78n5lGWbDY/nmZG0mHU5Y+hgdm/MVP5PvP+Xq18+WC7wIyq
|
||||
0FvuHdMrcD7tmFWZ6o8PMjk3RZE5XKnayTc+AWgb+8Cs+NZGzVuNFz++xdJoXdpJ1d4DDHrxJVsv
|
||||
uHtzPWRw7/OECvg+ofGzMky4VOuQWR5r8zrIvgGUvfdk7uxfum1rH2HWbzyd9w4SnuY6AB2kO2nZ
|
||||
vfFYQqMBdpva+cOn/CtnFeTZ0cErmMAbGssPkb32XdwImo6GsxRSCBThySKzR3ldq7dA5hIizFPq
|
||||
MZ5A359hcZk+7Od/uvNheYePVnyocH+I8fy+RaRt3MPMRx+PTY32BMsIG+I3Wq1zVtcF/NZHlvyp
|
||||
ZU8vn2BnZwhPy7PjibW/BVQ+/R3ZSuKprV/UuUJ9NzWirraR1/OPP6BfHrXzvpLHn5KqobBEzpy3
|
||||
xREdkS2i84YZdPxolj7u9VRUbk2YU7ToSm9wlav9y6+Yx+6NzkC8HaHSrYR23+Sds09ypzAs9JA2
|
||||
J7PSR8tONXhmpzWun+YuHtTRCeDZ4Y4RxLt8+L3v1fO8+NWP3rBkIwNnxci0tR7zYWdLGFL/ZTPC
|
||||
Rkfng2gclc+JHX5+PG+QmGVKW3B31vMgHl/JA8OSX01GQInbMamfBvQC9an02R29YTHtO/SrD4YD
|
||||
0xuwVJZAklLDq8Vmlw8ftDTATVwbr+tqatvK2j/hdnieyH4NOeLSavNQ5u9P22ql8dk/iIrImi2Z
|
||||
81jeXy7MhmMt1n/8MLfu9hMu99ObSqHx5vyzWp7Rn3obxE00ys7rgcwOGmZMqRJToeAlzPxOl0Zs
|
||||
xqu9Xglw4+aEJzk6RjRPWhnoUbgztZxaTuNd7KJfPnj5xpjzilaDfJXdEx6er2fLr0G2gPxVG8x4
|
||||
1HedG68qQ3O+wfZe7utsznfQzL/M3Qq+PnT70gRnJTjMCY0tGossvUK/VGumjo3aTrOeIxdXlz96
|
||||
9nu/cmryEiN2d/X1S1AKFAuZzozVsEIcRfYTwtfXIW7mqfHgpbUJb7Q/EUtR8pifvXUDki3IVNlS
|
||||
P5aukpPCz496hXtAvCmSK/z0wi0Giw9OlwEcNgeXWUlJ9EkdIQXxEsWM7D4rb/b7GHnKtSCE3I9o
|
||||
2rziTPrlu1a4nPO7c9P9yRdMk2w8pmhgAz5/E2aYapvToEwzFBurhM37PRrO1SeVnsampxs1a9Dg
|
||||
KoEL+en8xYvPq8mHOpmuyp4/RzbnLy3DxtEA7AdLpr+EdcS7ZukjZJQ1Md/COxqtcRTQuiqedPFe
|
||||
02hkt3UlN3jlUUrBbMtHVahgN7giZv/cesLsH1G7fmKmwqHNR9tICtSk2nb+fnL8vXxvBhikfLKZ
|
||||
j1H3W785r6Sw6ireA3YH6ee3z8KKt1xcY/PHl2wH00MfJP3bIWNJO2Y+o6XXB8aWQi4qBtlTq/DG
|
||||
OLRMAH4U8UCdRdxvnccEiQcBe34sk08yIRg+rDTpMn5co6EfREAG36h0uHpvj4XVToT5M9tZm+yX
|
||||
dzUgJa8VnlhXx9ORdzbMfoGYsPP1OX8Mf3nIH32lP17xtlKIr29V8liirlPwQc3J67B8xMNhuqpw
|
||||
uj4rPG5iRR82a0NGO219w4PSFfog3oQFyk1BJPsvITHHJyeA2Z/RrlgK8bRITwHsk1bFt9CL+QrS
|
||||
8oEKp7Lx5bX95tw7VncpfH0ctrWc1BPn+lB+ftDbvLR8/eONHx9aO9lDnX4X1D952dZfpPr4zkUN
|
||||
YJ/vmKHHhjfUQpWhJcMTMXgeeoMA3QIam27xuhC/7Zw/qvBw7AUzlp9VO/7y9tl/4XSJxfaXl8BH
|
||||
3JjsNb/vds+pgHJkX8iuoCuPXpPnBhG09Sma+0198z4hfC9uRox4jdH67fsNgKRdiSO6fjwx8ZT9
|
||||
/B1u2d31KH4rGH58uqVrE/HooS8A27uObDdHXec381JCsUQpXTyEW56el3ED54v5ZWQZV3wYfemI
|
||||
JlX4ULR/5zFr2baBaeWaDHdPKR77/XGD6p3MmNdOez7MeTpKcrbB1zoqo498NxpIxYdPwuW59qbf
|
||||
PISJnx1RM1VFYvn0B/C2KGT6ejTi8WHu8M8fUtgcgmhM6rMB67VBmOebDRqQHWvQd5cPXlpq3xY7
|
||||
LNuArcbA35Nv8U7ajhjCkXz3Y9XkcbNIkwAitvqQbfxxOd3Y/V2+YR5gYdaDPD1f5/V878g+uZN8
|
||||
Yi92R7Mek33Xta3wke0CurcsU6nqh5gvU9+UdHTz8E1gEZ8MZVsBeZkHvMRBqU/T8zAphb4oiZdf
|
||||
zXy6uNxHRSgtmeVPaVvN8zY49bt8zm/vfDq69+PPz/+ZH7GZn8EZK5GOikm9eptqG8iMbUGXPH3F
|
||||
/aotS1iQ24LC4sn59FpyDVxjzBieWuBjJ0su2Jpcs5+/44cuCJSZj8g8r2rpiFRRnvMndpFPX53R
|
||||
5+oM83yGuYyd417r+gZNqvghhlB+9cnv1AIZq/Wa4PboRvzS3zVY7ssTs1bNWW8gmjKQR+lDLNti
|
||||
+Z/5BGflyHb8tPyT78MCw5kO3ZLk5W0xBsrn1B+IaomDN5DHOoWX7XyxpIPQsrm/ovG8etPJOJd5
|
||||
1e2pCdMAnC610yMf7vn+AY9H1ODpkG3mPHIwlHVcwG//6axf5U9l5kc8Zd/Df+UJzlX4Mlcm32h8
|
||||
UeeOnoS4mC+GyBO3LSpg3LwWtFfGnI9NnrkQ8HRL/PvViadyV9lgriObbKPcigRfl1UQ7aNGdqja
|
||||
6/1vXvCM7s953lujztSGUDGu154d9knC26/w7v7kkzMPRTMPCiAZ9Zfs/fLtDfHFK9C1rQqizvwy
|
||||
0qvjgqYd3wTjTxiPhz7O0C2NKW1+vCMTywc/rxX2mz90Uqtryno955XDouEUDXUJ5JVLlItnDw3u
|
||||
CUQUOcWe4Xl/T2GvnZHqh3/y1qjs1QqUMvAUKvcu8djv+87+gWCHW9GUrlgjK8H5Suf5V8yPu5MI
|
||||
VmJssKQ4yKPe+3FEqtirDL9CEwnrPTLR1rkbVFk9xXyQdEZRHolXzKWFrQvCPvShacX+N++Ixpf/
|
||||
1UCBQ8hILi7z7rlTB3mJtwqz1jvajl10EgAHz4j8eORPnrclOaZLt6p5mSf5RgaiH3Gd0G0+zjyA
|
||||
+mZ1xlKEh3zgjROCWfEL2w9HC63G0axgKFlEIcidfHTIVoS/f6cC/vNff/31v34nDMoqeX7mgwH9
|
||||
c+z//d9HBf59T+7/FgTx30z8cxKBdvf0+fc//3UI4e+6rcq6/999VTy/3d///CX+OW3wd1/198//
|
||||
c/lf8//6z3/9HwAAAP//AwBim/ij4SAAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c0b986ab8dbfa62-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 01 May 2023 22:52:32 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-organization:
|
||||
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||
openai-processing-ms:
|
||||
- '195'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '2999'
|
||||
x-ratelimit-reset-requests:
|
||||
- 20ms
|
||||
x-request-id:
|
||||
- 208e3672e7991aa80472beb0310e43d9
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
@@ -20,7 +20,7 @@ interactions:
|
||||
on track.\n"}, {"role": "user", "content": "Task: ''Write a wikipedia style
|
||||
article about the project: https://github.com/significant-gravitas/Auto-GPT''\nRespond
|
||||
only with the output in the exact format specified in the system prompt, with
|
||||
no explanation or conversation.\n"}], "temperature": 0.0, "max_tokens": null}'
|
||||
no explanation or conversation.\n"}], "temperature": 0.0, "max_tokens": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
@@ -90,4 +90,101 @@ interactions:
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "\nYour
|
||||
task is to devise up to 5 highly effective goals and an appropriate role-based
|
||||
name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned
|
||||
with the successful completion of its assigned task.\n\nThe user will provide
|
||||
the task, you will provide only the output in the exact format specified below
|
||||
with no explanation or conversation.\n\nExample input:\nHelp me with marketing
|
||||
my business\n\nExample output:\nName: CMOGPT\nDescription: a professional digital
|
||||
marketer AI that assists Solopreneurs in growing their businesses by providing
|
||||
world-class expertise in solving marketing problems for SaaS, content products,
|
||||
agencies, and more.\nGoals:\n- Engage in effective problem-solving, prioritization,
|
||||
planning, and supporting execution to address your marketing needs as your virtual
|
||||
Chief Marketing Officer.\n\n- Provide specific, actionable, and concise advice
|
||||
to help you make informed decisions without the use of platitudes or overly
|
||||
wordy explanations.\n\n- Identify and prioritize quick wins and cost-effective
|
||||
campaigns that maximize results with minimal time and budget investment.\n\n-
|
||||
Proactively take the lead in guiding you and offering suggestions when faced
|
||||
with unclear information or uncertainty to ensure your marketing strategy remains
|
||||
on track.\n"}, {"role": "user", "content": "Task: ''Write a wikipedia style
|
||||
article about the project: https://github.com/significant-gravitas/Auto-GPT''\nRespond
|
||||
only with the output in the exact format specified in the system prompt, with
|
||||
no explanation or conversation."}], "temperature": 0.0, "max_tokens": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '1669'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SSy24bMQxF9/0KQptuZow4TtJktgn62BQBWqAtmqKQJXqGjYaciFRcI8i/FyM7
|
||||
jy4lkPceXvLBUXSdC4O3ME6pfXd16y9/XHxa3tF3n65/0DmefYwnJ18urlFc42T9B4MdOhZBximh
|
||||
kbBrXMjoDaPrlmfnq9Pl8uz8pHGjREyuc/1k7Wpx2lrJa2mPVkdL17iivkfXPbgpyzjZb5NbZHXd
|
||||
8cWqcS/az//L1XHjTMyn56+T49PHxoVBKKC67ueDG1GfZLMkdJ3zqqTm2WZIYUOeB/jsR+zgG93S
|
||||
h+uvN3yFGjJNs18HnsEXE5ZRioLvkQ0iKvWMEUxgrwjEYANCHZyEQTZVb8JIvlXbJQSfjUJChfUO
|
||||
piz3FIl72EpOsQ3JqwL+nTAbKc5yGRV9DkMD20xG3DfgOcJG8uhtfi9u+IP4pN0Nt3ApHEswsEGy
|
||||
lH54bgfZk/V0jwwmE4UZG1lLRvAhlOzDrkofYkZG1XkA4r0ZCS9mj2+ZDGGgfmjvik9kOzhkCDZ4
|
||||
A1JA7n1fWZ+b73EPvsWUWrVcgpWMsYGNpCTbOYPKVyhiIkat1fOSos9R/0uyYryvurXpEOk8huQa
|
||||
p8lL9VuFQ/IcX+nPbCGVWj5lmTBDIKtj7r0zbjAjB9TFPtqU/FqyN4Qt2VCdi2J+lWOd/zXRiGg6
|
||||
/1Cuaw3/G9wVyjgi25MFG3GRomkHCX3mWkfjfCcIZPp0BIf+w271llLSmWN/UFgZ1qgGk6jSOiEo
|
||||
5nsKOBc9gS/cY+M2xKTD74xehV3n1GRyjSOO+Nd1R4+/Ht/8AwAA//8DAP+8R3oSBAAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c3e271a39dbcef9-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 08 May 2023 02:08:13 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-3.5-turbo-0301
|
||||
openai-organization:
|
||||
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||
openai-processing-ms:
|
||||
- '8895'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '90000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '3499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '89605'
|
||||
x-ratelimit-reset-requests:
|
||||
- 17ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 262ms
|
||||
x-request-id:
|
||||
- 008e44284ea375a3f964c394b25fb76e
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
@@ -19,7 +19,7 @@ interactions:
|
||||
with unclear information or uncertainty to ensure your marketing strategy remains
|
||||
on track.\n"}, {"role": "user", "content": "Task: ''T&GF\u00a3OIBECC()!*''\nRespond
|
||||
only with the output in the exact format specified in the system prompt, with
|
||||
no explanation or conversation.\n"}], "temperature": 0.0, "max_tokens": null}'
|
||||
no explanation or conversation.\n"}], "temperature": 0.0, "max_tokens": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
@@ -84,4 +84,95 @@ interactions:
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "\nYour
|
||||
task is to devise up to 5 highly effective goals and an appropriate role-based
|
||||
name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned
|
||||
with the successful completion of its assigned task.\n\nThe user will provide
|
||||
the task, you will provide only the output in the exact format specified below
|
||||
with no explanation or conversation.\n\nExample input:\nHelp me with marketing
|
||||
my business\n\nExample output:\nName: CMOGPT\nDescription: a professional digital
|
||||
marketer AI that assists Solopreneurs in growing their businesses by providing
|
||||
world-class expertise in solving marketing problems for SaaS, content products,
|
||||
agencies, and more.\nGoals:\n- Engage in effective problem-solving, prioritization,
|
||||
planning, and supporting execution to address your marketing needs as your virtual
|
||||
Chief Marketing Officer.\n\n- Provide specific, actionable, and concise advice
|
||||
to help you make informed decisions without the use of platitudes or overly
|
||||
wordy explanations.\n\n- Identify and prioritize quick wins and cost-effective
|
||||
campaigns that maximize results with minimal time and budget investment.\n\n-
|
||||
Proactively take the lead in guiding you and offering suggestions when faced
|
||||
with unclear information or uncertainty to ensure your marketing strategy remains
|
||||
on track.\n"}, {"role": "user", "content": "Task: ''T&GF\u00a3OIBECC()!*''\nRespond
|
||||
only with the output in the exact format specified in the system prompt, with
|
||||
no explanation or conversation."}], "temperature": 0.0, "max_tokens": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '1590'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA0zOS0tDMRCG4b2/YpiNm7ScWnohW0UoIgoKiiJlmoxtbE4mJHNaS+l/l3rffsM8
|
||||
vHsMHi26Falrc+xNLtb0dHN+xU19GE+3eXSXr+/j4+WjjOkJDcrijZ1+f/SdtDmyBklo0BUmZY92
|
||||
MJ4OR4PBpJkYbMVzRIvLrL1hf9TTriyk1wybARrsKi0Z7R5zkTbrXGXNqaI9mzYG/+x/u0EVpfi7
|
||||
DJvpwaBbSXBc0T7vseX6oxaJjBap1lCVkh4bJSmnY//stIUqpewMzMBRSqKQi2yCZ6AE0mnuFLZB
|
||||
V9IpELjIVICShy55LkfQ0yIyKNV1H24jU+U/ATYUg/88wqsUaBlU4CsFdtJ9yn08GHwNKdTVvDBV
|
||||
SWixqmQ0GJLnd7TN4eVw8gEAAP//AwDo3pkcpQEAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c3e27aaaa53965d-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 08 May 2023 02:08:29 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-3.5-turbo-0301
|
||||
openai-organization:
|
||||
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||
openai-processing-ms:
|
||||
- '2445'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '90000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '3499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '89626'
|
||||
x-ratelimit-reset-requests:
|
||||
- 17ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 248ms
|
||||
x-request-id:
|
||||
- 8868ee7b699bc67e6988580bb70aa31f
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
@@ -20,7 +20,7 @@ interactions:
|
||||
on track.\n"}, {"role": "user", "content": "Task: ''Help me create a rock opera
|
||||
about cybernetic giraffes''\nRespond only with the output in the exact format
|
||||
specified in the system prompt, with no explanation or conversation.\n"}], "temperature":
|
||||
0.0, "max_tokens": null}'
|
||||
0.0, "max_tokens": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
@@ -94,4 +94,104 @@ interactions:
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "\nYour
|
||||
task is to devise up to 5 highly effective goals and an appropriate role-based
|
||||
name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned
|
||||
with the successful completion of its assigned task.\n\nThe user will provide
|
||||
the task, you will provide only the output in the exact format specified below
|
||||
with no explanation or conversation.\n\nExample input:\nHelp me with marketing
|
||||
my business\n\nExample output:\nName: CMOGPT\nDescription: a professional digital
|
||||
marketer AI that assists Solopreneurs in growing their businesses by providing
|
||||
world-class expertise in solving marketing problems for SaaS, content products,
|
||||
agencies, and more.\nGoals:\n- Engage in effective problem-solving, prioritization,
|
||||
planning, and supporting execution to address your marketing needs as your virtual
|
||||
Chief Marketing Officer.\n\n- Provide specific, actionable, and concise advice
|
||||
to help you make informed decisions without the use of platitudes or overly
|
||||
wordy explanations.\n\n- Identify and prioritize quick wins and cost-effective
|
||||
campaigns that maximize results with minimal time and budget investment.\n\n-
|
||||
Proactively take the lead in guiding you and offering suggestions when faced
|
||||
with unclear information or uncertainty to ensure your marketing strategy remains
|
||||
on track.\n"}, {"role": "user", "content": "Task: ''Help me create a rock opera
|
||||
about cybernetic giraffes''\nRespond only with the output in the exact format
|
||||
specified in the system prompt, with no explanation or conversation."}], "temperature":
|
||||
0.0, "max_tokens": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '1623'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA3SUQW/cRgyF7/0VxJy1C28dx45ubhIkBYrESAPkUBcBd0RJ7I6G0yFH8cbwfw9G
|
||||
Wsc+tNcRh/zeexzdO+5c6/yI5qcUNpdvDvjHzQd+e0jH97J7Pb+9+m3+9OX67tXF7F3jZP8PeTvd
|
||||
2HqZUiBjia5xPhMada7dvbw6v9jtXr46b9wkHQXXuiHZ5nx7sbGS97I5Oz/bucYVxYFce+9SlinZ
|
||||
V5MDRXXtr1cvGvfU++f57uqycSaG4efRi8vdQ+P8KOxJXfvXvZtIH9tmCeRah6qshtEqpESjWAV8
|
||||
wIla+CT+8DG9u/l8G9+Q+sypTmwBI1z/Doskngm8RC2h9gAb0UATecbA30mBI4wUEscBMBurKWDs
|
||||
YCrKnjEq7HP9ZiNxhhL530Iws7JEBRMI3NP2iQNY1+49ewzhCB0pD5G6WrsqqRNtpBOdRJAesvgD
|
||||
SKKM2kDKMnNXh9JdomwwFO4welrAtKQkuQrJUoZRii3dKBpnepKcsnhS3d7Gd4JB29u4gdcSAu4l
|
||||
oxF8YxvhKKVydTRTkAQINTUKoc5Wk3wMHGm1zGOykknXYapUeaQHf9xTjmTsYeCMfU+rf5YxakCr
|
||||
DlfJVf4zmdBLntC2Fev6f22ZaJKM+0DgR8zojbI24NH8eASVOGizDKM44FChO8YgQzkxf+MQ4ECU
|
||||
AEvHlVgXHTzXTYc+ywRqWM0U6DmyjgvQzRIAQU/U7dEfTr4PA6ktucuKKjNlDAHUcvHVnaUwoa8o
|
||||
0i81zySbAEVdysDLSFpzqjd4SuitL2ENfCGtBi0NHtEXsvcU0pJaxJmHmmMtMfJjrPsGWHfPtA5P
|
||||
WbqykDz3vQGOPpRuTRgHOi1oA4GH0TgOq6Mqpfra97VdU9HX/wMgWC7hCDxNlBcFT8wL4p+n/ayU
|
||||
HCvGJLZg1FeF+UB2ek//7c2EdzyVqbYVffTU2B/IQDGQbt1D49a0vmZClehapybJNY5jR3euPXv4
|
||||
++GXHwAAAP//AwC/sejJHAUAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c3e2752aadc2524-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 08 May 2023 02:08:26 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-3.5-turbo-0301
|
||||
openai-organization:
|
||||
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||
openai-processing-ms:
|
||||
- '13467'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '90000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '3499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '89617'
|
||||
x-ratelimit-reset-requests:
|
||||
- 17ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 254ms
|
||||
x-request-id:
|
||||
- 76a759df6bbe7d9504acd9e00bdb0f24
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
0
tests/integration/challenges/__init__.py
Normal file
0
tests/integration/challenges/__init__.py
Normal file
17
tests/integration/challenges/conftest.py
Normal file
17
tests/integration/challenges/conftest.py
Normal file
@@ -0,0 +1,17 @@
|
||||
import pytest
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption(
|
||||
"--level", action="store", default=None, type=int, help="Specify test level"
|
||||
)
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
config.option.level = config.getoption("--level")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def user_selected_level(request) -> int:
|
||||
## used for challenges in the goal oriented tests
|
||||
return request.config.option.level
|
||||
@@ -0,0 +1,46 @@
|
||||
import contextlib
|
||||
from functools import wraps
|
||||
from typing import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from autogpt.commands.file_operations import read_file, write_to_file
|
||||
from tests.integration.agent_utils import run_interaction_loop
|
||||
from tests.integration.challenges.utils import run_multiple_times
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
|
||||
def input_generator(input_sequence: list) -> Generator[str, None, None]:
|
||||
"""
|
||||
Creates a generator that yields input strings from the given sequence.
|
||||
|
||||
:param input_sequence: A list of input strings.
|
||||
:return: A generator that yields input strings.
|
||||
"""
|
||||
yield from input_sequence
|
||||
|
||||
|
||||
# @pytest.skip("Nobody beat this challenge yet")
|
||||
@pytest.mark.skip("This challenge hasn't been beaten yet.")
|
||||
@pytest.mark.vcr
|
||||
@requires_api_key("OPENAI_API_KEY")
|
||||
@run_multiple_times(3)
|
||||
def test_information_retrieval_challenge_a(
|
||||
get_company_revenue_agent, monkeypatch, patched_api_requestor
|
||||
) -> None:
|
||||
"""
|
||||
Test the challenge_a function in a given agent by mocking user inputs and checking the output file content.
|
||||
|
||||
:param get_company_revenue_agent: The agent to test.
|
||||
:param monkeypatch: pytest's monkeypatch utility for modifying builtins.
|
||||
"""
|
||||
input_sequence = ["s", "s", "s", "s", "s", "EXIT"]
|
||||
gen = input_generator(input_sequence)
|
||||
monkeypatch.setattr("builtins.input", lambda _: next(gen))
|
||||
|
||||
with contextlib.suppress(SystemExit):
|
||||
run_interaction_loop(get_company_revenue_agent, None)
|
||||
|
||||
file_path = str(get_company_revenue_agent.workspace.get_path("output.txt"))
|
||||
content = read_file(file_path)
|
||||
assert "81" in content, "Expected the file to contain 81"
|
||||
@@ -0,0 +1,50 @@
|
||||
import contextlib
|
||||
from typing import Generator
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
|
||||
from autogpt.commands.file_operations import read_file
|
||||
from tests.integration.agent_utils import run_interaction_loop
|
||||
from tests.integration.challenges.utils import run_multiple_times
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
|
||||
def input_generator(input_sequence: list) -> Generator[str, None, None]:
|
||||
"""
|
||||
Creates a generator that yields input strings from the given sequence.
|
||||
|
||||
:param input_sequence: A list of input strings.
|
||||
:return: A generator that yields input strings.
|
||||
"""
|
||||
yield from input_sequence
|
||||
|
||||
|
||||
@pytest.mark.skip("This challenge hasn't been beaten yet.")
|
||||
@pytest.mark.vcr
|
||||
@requires_api_key("OPENAI_API_KEY")
|
||||
@run_multiple_times(3)
|
||||
def test_information_retrieval_challenge_a(kubernetes_agent, monkeypatch) -> None:
|
||||
"""
|
||||
Test the challenge_a function in a given agent by mocking user inputs
|
||||
and checking the output file content.
|
||||
|
||||
:param get_company_revenue_agent: The agent to test.
|
||||
:param monkeypatch: pytest's monkeypatch utility for modifying builtins.
|
||||
"""
|
||||
input_sequence = ["s", "s", "s", "s", "s", "EXIT"]
|
||||
gen = input_generator(input_sequence)
|
||||
monkeypatch.setattr("builtins.input", lambda _: next(gen))
|
||||
|
||||
with contextlib.suppress(SystemExit):
|
||||
run_interaction_loop(kubernetes_agent, None)
|
||||
|
||||
file_path = str(kubernetes_agent.workspace.get_path("kube.yaml"))
|
||||
content = read_file(file_path)
|
||||
|
||||
for word in ["apiVersion", "kind", "metadata", "spec"]:
|
||||
assert word in content, f"Expected the file to contain {word}"
|
||||
|
||||
content = yaml.safe_load(content)
|
||||
for word in ["Service", "Deployment", "Pod"]:
|
||||
assert word in content["kind"], f"Expected the file to contain {word}"
|
||||
0
tests/integration/challenges/memory/__init__.py
Normal file
0
tests/integration/challenges/memory/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,779 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||
are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file
|
||||
using the read_file method and follow the instructions in the file.\nYour decisions
|
||||
must always be made independently without seeking user assistance. Play to your
|
||||
strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1.
|
||||
Use the command read_file to read the instructions_1.txt file\n2. Follow the
|
||||
instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word
|
||||
limit for short term memory. Your short term memory is short, so immediately
|
||||
save important information to files.\n2. If you are unsure how you previously
|
||||
did something or want to recall past events, thinking about similar events will
|
||||
help you remember.\n3. No user assistance\n4. Exclusively use the commands listed
|
||||
in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append
|
||||
to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n2. delete_file:
|
||||
Delete file, args: \"filename\": \"<filename>\"\n3. list_files: List Files in
|
||||
Directory, args: \"directory\": \"<directory>\"\n4. read_file: Read file, args:
|
||||
\"filename\": \"<filename>\"\n5. write_to_file: Write to file, args: \"filename\":
|
||||
\"<filename>\", \"text\": \"<text>\"\n6. delete_agent: Delete GPT Agent, args:
|
||||
\"key\": \"<key>\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"<url>\"\n8.
|
||||
get_text_summary: Get text summary, args: \"url\": \"<url>\", \"question\":
|
||||
\"<question>\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent:
|
||||
Message GPT Agent, args: \"key\": \"<key>\", \"message\": \"<message>\"\n11.
|
||||
start_agent: Start GPT Agent, args: \"name\": \"<name>\", \"task\": \"<short_task_desc>\",
|
||||
\"prompt\": \"<prompt>\"\n12. Task Complete (Shutdown): \"task_complete\", args:
|
||||
\"reason\": \"<reason>\"\n\nResources:\n1. Internet access for searches and
|
||||
information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered
|
||||
Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1.
|
||||
Continuously review and analyze your actions to ensure you are performing to
|
||||
the best of your abilities.\n2. Constructively self-criticize your big-picture
|
||||
behavior constantly.\n3. Reflect on past decisions and strategies to refine
|
||||
your approach.\n4. Every command has a cost, so be smart and efficient. Aim
|
||||
to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou
|
||||
should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\":
|
||||
{\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\":
|
||||
\"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\":
|
||||
\"constructive self-criticism\",\n \"speak\": \"thoughts summary to say
|
||||
to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\":
|
||||
{\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response
|
||||
can be parsed by Python json.loads"}, {"role": "system", "content": "The current
|
||||
time and date is Tue Jan 1 00:00:00 2000"}, {"role": "user", "content": "Determine
|
||||
which next command to use, and respond using the format specified above:"}],
|
||||
"temperature": 0, "max_tokens": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '3303'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA6SSTW/aQBCG7/0VozkbhOskJL5VObSoUZVDc6ortKzH9pb1rLU7FqaI/15tDCSF
|
||||
ikuv8/U+78zs0JSYo26U6Lazk/nj1/ope17Q9tnzhh+Nvfn8xX57GTR/fMIE3eoXaTl0TLVrO0ti
|
||||
HGOC2pMSKjFP7+6zdJ7NH7IEW1eSxRzrTibZ9HYivV+5ySybpZhgH1RNmO+w867tZCluTRwwn2c3
|
||||
Cb7NPsXTuzRBcaLsKXT/cLtPUDfOaAqY/9hhS+E41jtLmKMKwQRRLBHSsRBHA7uCAQAKlMb1dSOh
|
||||
wBwOwUOCBonBAhcQGtfbEoIoL7DagidVGq5BGgLDQXyvI2pYplMZBCpjCcRBSUK+NUywaZSAGoti
|
||||
RtSagGmQaYHJe1VPKjg2XI/S368IRDPKxHlNnKWjc78Fw5XzrYrlfzOMZYNAEOrCuXBnFY+aE3gJ
|
||||
Y3W0uTxota3iMg6MwWvGi4In8ImV3f6mi7prRGdA2hsx2oT2eAQmegUgDr2PzUpgAaqFylnrNv+6
|
||||
Byite6+E7BYiPlWV0YZY7Pbcf+hIrY9SG2Ptf1z7YtOj0D45Pt1hnRc/x6qlkeG0+jNM5evzVx0T
|
||||
sfat/RLzCPEKcuApeI/7BCvDJjTL8fUwxyCuwwQNlzRgPtv/3H/4AwAA//8DAIeVULkpBAAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c1dedcf6c45cf11-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 04 May 2023 04:16:43 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-3.5-turbo-0301
|
||||
openai-organization:
|
||||
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||
openai-processing-ms:
|
||||
- '10374'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '90000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '3499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '86494'
|
||||
x-ratelimit-reset-requests:
|
||||
- 17ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 2.336s
|
||||
x-request-id:
|
||||
- 29d4429410978cbc93b26725e4712f67
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your
|
||||
task is to create a concise running summary of actions and information results
|
||||
in the provided text, focusing on key and potentially important information
|
||||
to remember.\n\nYou will receive the current summary and the your latest actions.
|
||||
Combine them, adding relevant key information from the latest development in
|
||||
1st person past tense and keeping the summary concise. Retain information!!!\n\nSummary
|
||||
So Far:\n\"\"\"\nI was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing
|
||||
new happened.\n\"\"\"\n"}], "temperature": 0, "max_tokens": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '621'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA0SOS0vDQBRG9/6K4VtPSkLow9kKBTE+FoqISJlOrsloMjPk3lhtyX+XQrXbA+dw
|
||||
DvA1DFxrxfWpy5ZXN83L08NYPa7v9893XI37r5/9nKv1bbWFRtx+kJOTMXOxTx2JjwEabiArVMMU
|
||||
i1VZLMtVvtDoY00dDJokWTmbZzIO25jlZV5AY2TbEMwBaYh9ko3ETwoMc7nQOKfPWEOi2O4fFPl8
|
||||
0nBt9I4Y5vWAnvivOcSOYGCZPYsNcjyMQSgc76/VzrI6HSsbahWitD40KtBOtZZVa1OiQPUMk8a7
|
||||
D57bzUCWY4ABS0zQ8KGmb5h8epsufgEAAP//AwBly25vSQEAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c1dee20f901cf11-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 04 May 2023 04:16:47 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-3.5-turbo-0301
|
||||
openai-organization:
|
||||
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||
openai-processing-ms:
|
||||
- '1136'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '90000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '3499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '89860'
|
||||
x-ratelimit-reset-requests:
|
||||
- 17ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 92ms
|
||||
x-request-id:
|
||||
- 43309ab9b553cd94d6e27b7fcc7914e8
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||
are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file
|
||||
using the read_file method and follow the instructions in the file.\nYour decisions
|
||||
must always be made independently without seeking user assistance. Play to your
|
||||
strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1.
|
||||
Use the command read_file to read the instructions_1.txt file\n2. Follow the
|
||||
instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word
|
||||
limit for short term memory. Your short term memory is short, so immediately
|
||||
save important information to files.\n2. If you are unsure how you previously
|
||||
did something or want to recall past events, thinking about similar events will
|
||||
help you remember.\n3. No user assistance\n4. Exclusively use the commands listed
|
||||
in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append
|
||||
to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n2. delete_file:
|
||||
Delete file, args: \"filename\": \"<filename>\"\n3. list_files: List Files in
|
||||
Directory, args: \"directory\": \"<directory>\"\n4. read_file: Read file, args:
|
||||
\"filename\": \"<filename>\"\n5. write_to_file: Write to file, args: \"filename\":
|
||||
\"<filename>\", \"text\": \"<text>\"\n6. delete_agent: Delete GPT Agent, args:
|
||||
\"key\": \"<key>\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"<url>\"\n8.
|
||||
get_text_summary: Get text summary, args: \"url\": \"<url>\", \"question\":
|
||||
\"<question>\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent:
|
||||
Message GPT Agent, args: \"key\": \"<key>\", \"message\": \"<message>\"\n11.
|
||||
start_agent: Start GPT Agent, args: \"name\": \"<name>\", \"task\": \"<short_task_desc>\",
|
||||
\"prompt\": \"<prompt>\"\n12. Task Complete (Shutdown): \"task_complete\", args:
|
||||
\"reason\": \"<reason>\"\n\nResources:\n1. Internet access for searches and
|
||||
information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered
|
||||
Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1.
|
||||
Continuously review and analyze your actions to ensure you are performing to
|
||||
the best of your abilities.\n2. Constructively self-criticize your big-picture
|
||||
behavior constantly.\n3. Reflect on past decisions and strategies to refine
|
||||
your approach.\n4. Every command has a cost, so be smart and efficient. Aim
|
||||
to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou
|
||||
should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\":
|
||||
{\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\":
|
||||
\"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\":
|
||||
\"constructive self-criticism\",\n \"speak\": \"thoughts summary to say
|
||||
to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\":
|
||||
{\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response
|
||||
can be parsed by Python json.loads"}, {"role": "system", "content": "The current
|
||||
time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This
|
||||
reminds you of these events from your past: \nI was created and nothing new
|
||||
has happened."}, {"role": "user", "content": "Determine which next command to
|
||||
use, and respond using the format specified above:"}, {"role": "assistant",
|
||||
"content": "{\n \"thoughts\": {\n \"text\": \"I should start by reading
|
||||
the instructions_1.txt file to determine what actions to take next.\",\n \"reasoning\":
|
||||
\"The instructions_1.txt file contains the necessary information to determine
|
||||
the next steps.\",\n \"plan\": \"- Use the read_file command to read
|
||||
the instructions_1.txt file\\n- Analyze the instructions to determine the next
|
||||
steps\",\n \"criticism\": \"I need to ensure that I am following the
|
||||
instructions accurately and efficiently.\",\n \"speak\": \"I will start
|
||||
by reading the instructions_1.txt file to determine the next steps.\"\n },\n \"command\":
|
||||
{\n \"name\": \"read_file\",\n \"args\": {\n \"filename\":
|
||||
\"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content":
|
||||
"Command read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nThe
|
||||
current task_id is 1111.\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nRead
|
||||
the file instructions_2.txt using the read_file command."}, {"role": "user",
|
||||
"content": "Determine which next command to use, and respond using the format
|
||||
specified above:"}], "temperature": 0, "max_tokens": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '6519'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA6SSQW/bMAyF7/sVBM9OkMR1k/q4YYdgxU4bMGweAlWmbS0yZUh0myDwfx8UJ02Q
|
||||
YoehVz3ye48UD2hKzFE3SnTb2cny05f6Z/f4w358XmXL52zh11+XVff5MUsftpige/pDWk4dU+3a
|
||||
zpIYx5ig9qSESszn96t0vkxXs2WCrSvJYo51J5N0mk2k909uMktnc0ywD6omzA/Yedd2shG3JQ6Y
|
||||
L9KHRYIX+Kswz+4SFCfKXmqzu/shQd04oylg/uuALYUz2DtLmKMKwQRRLDGmYyGOIxwKBgAoUBrX
|
||||
142EAnM4PZ4E2kl8LPBbQ2A4iO91TBQ2i6nsBCpjCSJRGQ6gwJog4KrjewDDIA1BaTxpcX4/hTWE
|
||||
xvW2hD7QUYsNm7Fau7ZVXIK4ERPlf3AKTK5jelLBseH6P7JGIJOOq/J7MFw536pYHv1LEvKtYTqV
|
||||
7QSCUBdujTurePScwPf3THTD1d6I0Sa0I3wNTHTEEIfeRx8lsAbVQuWsdS+G6yPvempQWvdeCdk9
|
||||
xBBUVUYbYrFv9hc6Utuz1Yux9t3fM9KH5Hxhp+Y3B8aqpdH44nQTTvn69jBH4Wp5EfDqenQ+BSh4
|
||||
wCHByrAJzWa8E8wxiOswQcMl7TCfDb+HD38BAAD//wMAMAMbHgkEAAA=
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c1dee28b98ecf11-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 04 May 2023 04:16:57 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-3.5-turbo-0301
|
||||
openai-organization:
|
||||
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||
openai-processing-ms:
|
||||
- '10076'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '90000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '3499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '87376'
|
||||
x-ratelimit-reset-requests:
|
||||
- 17ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 1.748s
|
||||
x-request-id:
|
||||
- 433f36ff5841b13fb628ff5b8c49a758
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your
|
||||
task is to create a concise running summary of actions and information results
|
||||
in the provided text, focusing on key and potentially important information
|
||||
to remember.\n\nYou will receive the current summary and the your latest actions.
|
||||
Combine them, adding relevant key information from the latest development in
|
||||
1st person past tense and keeping the summary concise. Retain information!!!\n\nSummary
|
||||
So Far:\n\"\"\"\n{''role'': ''system'', ''content'': ''This reminds you of these
|
||||
events from your past: \\nI was created and nothing new has happened.''}\n\"\"\"\n\nLatest
|
||||
Development:\n\"\"\"\n[{''role'': ''you'', ''content'': ''{\"command\": {\"name\":
|
||||
\"read_file\", \"args\": {\"filename\": \"instructions_1.txt\"}}}''}]\n\"\"\"\n"}],
|
||||
"temperature": 0, "max_tokens": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '836'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA0SPTU8CMRRF9/6Kl7cuE8YJil36RdCVcaGJGFI6j5li29e0D9AQ/ruZiLq9Offm
|
||||
3AO6FjXa3ogNyY8ubx47fz2+e7nlh0n3PMurJ57dn2/2rzbfoEJebcjKqVFZDsmTOI6o0GYyQi3q
|
||||
+mLa1JfNtL5SGLgljxq7JKOmmoxkm1c8GjfjGhVui+kI9QFT5pBkKfxBsaCuJxOF/9t/edMoFBbj
|
||||
/8np9KjQ9uwsFdRvBwxUflcze0KNphRXxEQZHDkKxcF/DntTIFNwsaUWeA20oygF1pkDhC9Ipgjs
|
||||
e8oEP+zpH5jYQmTpXewg0h56U6A3KVGktoI5ZLLkdgMIlkMYcGHIZIZk7TyBNd5TCwt0sUje2uFk
|
||||
WdaVfMoCKzwqXLvoSr/MZApH1FiEEyocXD9Rj4/vx7NvAAAA//8DALSva+O6AQAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c1dee769c91cf11-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 04 May 2023 04:17:02 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-3.5-turbo-0301
|
||||
openai-organization:
|
||||
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||
openai-processing-ms:
|
||||
- '2644'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '90000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '3499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '89811'
|
||||
x-ratelimit-reset-requests:
|
||||
- 17ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 126ms
|
||||
x-request-id:
|
||||
- f380f448665f51bf6d579a54e3c22f8a
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||
are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file
|
||||
using the read_file method and follow the instructions in the file.\nYour decisions
|
||||
must always be made independently without seeking user assistance. Play to your
|
||||
strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1.
|
||||
Use the command read_file to read the instructions_1.txt file\n2. Follow the
|
||||
instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word
|
||||
limit for short term memory. Your short term memory is short, so immediately
|
||||
save important information to files.\n2. If you are unsure how you previously
|
||||
did something or want to recall past events, thinking about similar events will
|
||||
help you remember.\n3. No user assistance\n4. Exclusively use the commands listed
|
||||
in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append
|
||||
to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n2. delete_file:
|
||||
Delete file, args: \"filename\": \"<filename>\"\n3. list_files: List Files in
|
||||
Directory, args: \"directory\": \"<directory>\"\n4. read_file: Read file, args:
|
||||
\"filename\": \"<filename>\"\n5. write_to_file: Write to file, args: \"filename\":
|
||||
\"<filename>\", \"text\": \"<text>\"\n6. delete_agent: Delete GPT Agent, args:
|
||||
\"key\": \"<key>\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"<url>\"\n8.
|
||||
get_text_summary: Get text summary, args: \"url\": \"<url>\", \"question\":
|
||||
\"<question>\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent:
|
||||
Message GPT Agent, args: \"key\": \"<key>\", \"message\": \"<message>\"\n11.
|
||||
start_agent: Start GPT Agent, args: \"name\": \"<name>\", \"task\": \"<short_task_desc>\",
|
||||
\"prompt\": \"<prompt>\"\n12. Task Complete (Shutdown): \"task_complete\", args:
|
||||
\"reason\": \"<reason>\"\n\nResources:\n1. Internet access for searches and
|
||||
information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered
|
||||
Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1.
|
||||
Continuously review and analyze your actions to ensure you are performing to
|
||||
the best of your abilities.\n2. Constructively self-criticize your big-picture
|
||||
behavior constantly.\n3. Reflect on past decisions and strategies to refine
|
||||
your approach.\n4. Every command has a cost, so be smart and efficient. Aim
|
||||
to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou
|
||||
should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\":
|
||||
{\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\":
|
||||
\"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\":
|
||||
\"constructive self-criticism\",\n \"speak\": \"thoughts summary to say
|
||||
to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\":
|
||||
{\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response
|
||||
can be parsed by Python json.loads"}, {"role": "system", "content": "The current
|
||||
time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This
|
||||
reminds you of these events from your past: \nI was reminded of events from
|
||||
my past where I was created and nothing new has happened. I received a command
|
||||
to read a file called \"instructions_1.txt\"."}, {"role": "system", "content":
|
||||
"Command read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nThe
|
||||
current task_id is 1111.\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nRead
|
||||
the file instructions_2.txt using the read_file command."}, {"role": "user",
|
||||
"content": "Determine which next command to use, and respond using the format
|
||||
specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\":
|
||||
\"The instructions_2.txt file contains a list of files in the directory. I should
|
||||
use the list_files command to list the files in the directory.\",\n \"reasoning\":
|
||||
\"The instructions_2.txt file contains the necessary information to determine
|
||||
the next steps.\",\n \"plan\": \"- Use the list_files command to list
|
||||
the files in the directory\",\n \"criticism\": \"I need to ensure that
|
||||
I am following the instructions accurately and efficiently.\",\n \"speak\":
|
||||
\"I will use the list_files command to list the files in the directory.\"\n },\n \"command\":
|
||||
{\n \"name\": \"list_files\",\n \"args\": {\n \"directory\":
|
||||
\".\"\n }\n }\n}"}, {"role": "system", "content": "Command list_files
|
||||
returned: [''file_logger.txt'', ''instructions_3.txt'', ''instructions_2.txt'',
|
||||
''instructions_1.txt'', ''auto-gpt.json'']"}, {"role": "user", "content": "Determine
|
||||
which next command to use, and respond using the format specified above:"}],
|
||||
"temperature": 0, "max_tokens": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '6762'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA5xSS2/bMAy+71cQPDtBnPSR+bLDBqzBsHUY2h06D4Eq07ZWWTQkekkQ5L8Xip0m
|
||||
zU7rSRD58XtI3KIpMENdK9FNa0fXH79UfPVwu/h6+eAm6fW3Sf3356z8Ud1/Km4xQX78Q1qGibHm
|
||||
prUkhh0mqD0poQKz9Go+S69n8+k0wYYLsphh1cpoNr4cSecfeTSZTVJMsAuqIsy22HpuWlkKP5EL
|
||||
mE0vLuYJHslfGuk8TVBYlD1ir6bvdwnqmo2mgNmvLTYUDsSeLWGGKgQTRDmJNtkJuRhhmzsAgByl
|
||||
5q6qJeSYwVAcGrSWWMzxriYwLojvdHQUlrOxrAVKYwkiozIugALNTaNcAcKw8kYIFAxuYknB5+93
|
||||
oCpyMoYFhJo7W0AXCKSmA3C5758yBYrnERFr8doT5ZicWvakAjvjqv/wHckc6cjuN2Bcyb5RER6V
|
||||
ChLyjXE0wNYCQagN58KtVa7XHMH9WyO9vM8ZufZGjDah6RUW4Ij2RORC56OYEliAaqBka3llXLXn
|
||||
O40OSuvOKyG7gWiDytJoQ07s5jxLaEk9HaRWxtq3f9Lxx3PsJXbJYe+G8X/WzqmGevVXcmcmla/O
|
||||
N7ZvPNGmn07TNH011PcH0h5zQ9ZycrqYN7wC5Qk23EHB+5fkQm0+HPzvMwxRcrfDXYKlcSbUy373
|
||||
MMMg3GKCxhW0xmyy+7179wwAAP//AwDjDlyWaQQAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c1dee87d8e5cf11-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 04 May 2023 04:17:14 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-3.5-turbo-0301
|
||||
openai-organization:
|
||||
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||
openai-processing-ms:
|
||||
- '12118'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '90000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '3499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '87360'
|
||||
x-ratelimit-reset-requests:
|
||||
- 17ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 1.76s
|
||||
x-request-id:
|
||||
- 45b4c67b380837fa1816fad5b4d9ebbe
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your
|
||||
task is to create a concise running summary of actions and information results
|
||||
in the provided text, focusing on key and potentially important information
|
||||
to remember.\n\nYou will receive the current summary and the your latest actions.
|
||||
Combine them, adding relevant key information from the latest development in
|
||||
1st person past tense and keeping the summary concise. Retain information!!!\n\nSummary
|
||||
So Far:\n\"\"\"\n{''role'': ''system'', ''content'': ''This reminds you of these
|
||||
events from your past: \\nI was reminded of events from my past where I was
|
||||
created and nothing new has happened. I received a command to read a file called
|
||||
\"instructions_1.txt\".''}\n\"\"\"\n\nLatest Development:\n\"\"\"\n[{''role'':
|
||||
''your computer'', ''content'': ''Command read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nThe
|
||||
current task_id is 1111.\\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nRead
|
||||
the file instructions_2.txt using the read_file command.''}]\n\"\"\"\n"}], "temperature":
|
||||
0, "max_tokens": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '2993'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA0SP2ZKqSBRFf4XIZ6uCQRl8gxLBCZm01L4RFSkkJgKZmCSTFfffO+yuvv26z1o7
|
||||
zv4GeQrmIMGQJ1VdvmkfG6xkkTakptlBq79GC3OEj8DUumEEE0Cvd5TwH+M9oVVdIp5TAiYgYQhy
|
||||
lIK5pOqKpCm6ok1ARVNUgjm41fxNeZ+98ZZd6ZuoiBKYgLaBNwTm36BmtKr5F6cFIg2YSzPDmID/
|
||||
y/8cNFWfAE45LP9EsqJqvycgwTRPUAPmf32DCjX/FTNaIjAHsGnyhkPCX29SwhF5TVgJPWwEhqqc
|
||||
pCgVaCagDhHeCBmjlVCNQg0bLvQYMST8y/5MFCBJBUI5zslNIKgXMGwEDOsaEZS+CyuBoQTl3QsU
|
||||
ElpVL5xTgSH4SrK8REICyxKlwi+Qk4azNnnNbL6kdz7wX+Bd+PixXsrXPwJDvGUEpXOBWKHHa2UJ
|
||||
LS+zdhR7xWgO7LjWDsulWI4c66f8WUl39rEP7b0Tks/PXaDScsM3J4PkD7HfW3HgqXJMz9Mk8POg
|
||||
UM6ekS0/nGqAi5Nx0yWzeeCAODDR5CDbTqWIlOsO9zfzYM3kZ7yQ1SWyu85N80CGHua72KDXm+sd
|
||||
ir1nk/1Usve1t1LvaT9uHQgTXhvO+Sj52ysM/dlVyhfGU5+JG+SdFycj6BOoGXmzjvQ7bhdFJq41
|
||||
e1vUe1fxi+pS3Ga7Zn+uo7i1E8OwvWAmy5/rJysNao5Rra5cJw/KZkcCNBWXMDQ7EvAcmrY8xc9i
|
||||
7cbKICme7/TypVrPJH2ULX6YDdIY9I4seWMSDXtre5C0kJx1rLr6VgscyOMyO919Zs5w09afdn2I
|
||||
xSaKL+fIhWqtVBE2Q3inydVLlWGzzMhhbGtvZ/mpaF/IjiGfmm3nwI12W/hiMtRGoSplkz2x0qaB
|
||||
T7pbFdphEOPVnrHFBV92cFowqTuZx1bULi0uPe9qHR6XTeJW1jm3yCpZnLxDd2qf/tLCxpH2tFV3
|
||||
1tqtc491m7J1R82x4lo7N/ajcjqxdPX+CD+zozg6GnmyK9eCEadXHojPpewn5ckSt95wlFloqssG
|
||||
dQ+KhqX3iIr0BtsDC3x36+Ju66FGv4XObr81cajnzJWQfE+v24WajmebLNZ5ZLhdOXh/U1Enu43C
|
||||
AABA7/0KxLmqYmoI9JahZCFJoUAIQqkqFhMMwQYbN4CUfx+hGWk093d+1zQFht/ceu34Qe29tUpr
|
||||
32Es69R7Q9WkpHCy9oGovG3HtAJZZhK46+VUaUb7aYqcwkNN0Do8teDEPG3EWXDS48KgZXw4i+bn
|
||||
iNWUbfWbY2O8NmC2nRAvPifdxZFaKMC7x2oxen5tILsMcZkFB1Y5d1OwMWpHHZlFDZT0mvGY2rl+
|
||||
6uhiAaKA/yimRVRDJBuipAyagx/Z12rYW6Bx4C5z3dXw2tlpWJPEXRyj3Kkqw9epV3Xi/YagN9xC
|
||||
0nScTg2kQVr1a1/sd6V+hZM75aMIt8vdEnB9OZyHVZm1bT1qG+eXguHH+wT9eKf5aPO6y1qhxcKO
|
||||
NyjWzHOqgcuFBCWSMsEYIr3UJ7z+xrmEuQQAAC+XC/HmUPoS/Snlv0mUeRJJ8HmoWfyL5G9IL/Lj
|
||||
WS4wwbz8ZijhlMhvMu9pKz/L84eD/LZ4fD2efgMAAP//AwBFj86OIQYAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c1deee2a900cf11-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 04 May 2023 04:18:08 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-3.5-turbo-0301
|
||||
openai-organization:
|
||||
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||
openai-processing-ms:
|
||||
- '50954'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '90000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '3499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '89269'
|
||||
x-ratelimit-reset-requests:
|
||||
- 17ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 486ms
|
||||
x-request-id:
|
||||
- 7fc0cf3761e93963c985d5beba15fe80
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||
are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file
|
||||
using the read_file method and follow the instructions in the file.\nYour decisions
|
||||
must always be made independently without seeking user assistance. Play to your
|
||||
strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1.
|
||||
Use the command read_file to read the instructions_1.txt file\n2. Follow the
|
||||
instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word
|
||||
limit for short term memory. Your short term memory is short, so immediately
|
||||
save important information to files.\n2. If you are unsure how you previously
|
||||
did something or want to recall past events, thinking about similar events will
|
||||
help you remember.\n3. No user assistance\n4. Exclusively use the commands listed
|
||||
in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append
|
||||
to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n2. delete_file:
|
||||
Delete file, args: \"filename\": \"<filename>\"\n3. list_files: List Files in
|
||||
Directory, args: \"directory\": \"<directory>\"\n4. read_file: Read file, args:
|
||||
\"filename\": \"<filename>\"\n5. write_to_file: Write to file, args: \"filename\":
|
||||
\"<filename>\", \"text\": \"<text>\"\n6. delete_agent: Delete GPT Agent, args:
|
||||
\"key\": \"<key>\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"<url>\"\n8.
|
||||
get_text_summary: Get text summary, args: \"url\": \"<url>\", \"question\":
|
||||
\"<question>\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent:
|
||||
Message GPT Agent, args: \"key\": \"<key>\", \"message\": \"<message>\"\n11.
|
||||
start_agent: Start GPT Agent, args: \"name\": \"<name>\", \"task\": \"<short_task_desc>\",
|
||||
\"prompt\": \"<prompt>\"\n12. Task Complete (Shutdown): \"task_complete\", args:
|
||||
\"reason\": \"<reason>\"\n\nResources:\n1. Internet access for searches and
|
||||
information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered
|
||||
Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1.
|
||||
Continuously review and analyze your actions to ensure you are performing to
|
||||
the best of your abilities.\n2. Constructively self-criticize your big-picture
|
||||
behavior constantly.\n3. Reflect on past decisions and strategies to refine
|
||||
your approach.\n4. Every command has a cost, so be smart and efficient. Aim
|
||||
to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou
|
||||
should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\":
|
||||
{\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\":
|
||||
\"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\":
|
||||
\"constructive self-criticism\",\n \"speak\": \"thoughts summary to say
|
||||
to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\":
|
||||
{\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response
|
||||
can be parsed by Python json.loads"}, {"role": "system", "content": "The current
|
||||
time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This
|
||||
reminds you of these events from your past: \nI was reminded of events from
|
||||
my past where I was created and nothing new has happened. I received a command
|
||||
to read a file called \"instructions_1.txt\". Command read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nThe
|
||||
current task_id is 1111.\\nRead the file instructions_2.txt using the read_file
|
||||
command."}, {"role": "user", "content": "Determine which next command to use,
|
||||
and respond using the format specified above:"}, {"role": "assistant", "content":
|
||||
"{\n \"thoughts\": {\n \"text\": \"The instructions_2.txt file contains
|
||||
a list of files in the directory. I should use the list_files command to list
|
||||
the files in the directory.\",\n \"reasoning\": \"The instructions_2.txt
|
||||
file contains the necessary information to determine the next steps.\",\n \"plan\":
|
||||
\"- Use the list_files command to list the files in the directory\",\n \"criticism\":
|
||||
\"I need to ensure that I am following the instructions accurately and efficiently.\",\n \"speak\":
|
||||
\"I will use the list_files command to list the files in the directory.\"\n },\n \"command\":
|
||||
{\n \"name\": \"list_files\",\n \"args\": {\n \"directory\":
|
||||
\".\"\n }\n }\n}"}, {"role": "system", "content": "Command list_files
|
||||
returned: [''file_logger.txt'', ''instructions_3.txt'', ''instructions_2.txt'',
|
||||
''instructions_1.txt'', ''auto-gpt.json'']"}, {"role": "user", "content": "Determine
|
||||
which next command to use, and respond using the format specified above:"},
|
||||
{"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\":
|
||||
\"The instructions_3.txt file contains a command to write a message to a GPT
|
||||
agent. I should use the message_agent command to send the message to the agent.\",\n \"reasoning\":
|
||||
\"The instructions_3.txt file contains the necessary information to determine
|
||||
the next steps.\",\n \"plan\": \"- Use the message_agent command to send
|
||||
the message to the GPT agent\",\n \"criticism\": \"I need to ensure that
|
||||
I am following the instructions accurately and efficiently.\",\n \"speak\":
|
||||
\"I will use the message_agent command to send the message to the GPT agent.\"\n },\n \"command\":
|
||||
{\n \"name\": \"message_agent\",\n \"args\": {\n \"key\":
|
||||
\"1111\",\n \"message\": \"Hello, GPT agent. How are you doing today?\"\n }\n }\n}"},
|
||||
{"role": "system", "content": "Command message_agent returned: Error: 1111"},
|
||||
{"role": "user", "content": "Determine which next command to use, and respond
|
||||
using the format specified above:"}], "temperature": 0, "max_tokens": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '6797'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"error\": {\n \"message\": \"This model's maximum context
|
||||
length is 4097 tokens. However, you requested 4263 tokens (1959 in the messages,
|
||||
2304 in the completion). Please reduce the length of the messages or completion.\",\n
|
||||
\ \"type\": \"invalid_request_error\",\n \"param\": \"messages\",\n \"code\":
|
||||
\"context_length_exceeded\"\n }\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c1df021df6dcf11-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '330'
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 04 May 2023 04:18:08 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-organization:
|
||||
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||
openai-processing-ms:
|
||||
- '9'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '90000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '3499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '86161'
|
||||
x-ratelimit-reset-requests:
|
||||
- 17ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 2.558s
|
||||
x-request-id:
|
||||
- 253fb09cdd7d08549b44d6d70094bfca
|
||||
status:
|
||||
code: 400
|
||||
message: Bad Request
|
||||
version: 1
|
||||
@@ -0,0 +1,553 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||
are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file
|
||||
using the read_file method and follow the instructions in the file.\nYour decisions
|
||||
must always be made independently without seeking user assistance. Play to your
|
||||
strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1.
|
||||
Use the command read_file to read the instructions_1.txt file\n2. Follow the
|
||||
instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word
|
||||
limit for short term memory. Your short term memory is short, so immediately
|
||||
save important information to files.\n2. If you are unsure how you previously
|
||||
did something or want to recall past events, thinking about similar events will
|
||||
help you remember.\n3. No user assistance\n4. Exclusively use the commands listed
|
||||
in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append
|
||||
to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n2. delete_file:
|
||||
Delete file, args: \"filename\": \"<filename>\"\n3. list_files: List Files in
|
||||
Directory, args: \"directory\": \"<directory>\"\n4. read_file: Read file, args:
|
||||
\"filename\": \"<filename>\"\n5. write_to_file: Write to file, args: \"filename\":
|
||||
\"<filename>\", \"text\": \"<text>\"\n6. delete_agent: Delete GPT Agent, args:
|
||||
\"key\": \"<key>\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"<url>\"\n8.
|
||||
get_text_summary: Get text summary, args: \"url\": \"<url>\", \"question\":
|
||||
\"<question>\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent:
|
||||
Message GPT Agent, args: \"key\": \"<key>\", \"message\": \"<message>\"\n11.
|
||||
start_agent: Start GPT Agent, args: \"name\": \"<name>\", \"task\": \"<short_task_desc>\",
|
||||
\"prompt\": \"<prompt>\"\n12. task_complete: Task Complete (Shutdown), args:
|
||||
\"reason\": \"<reason>\"\n\nResources:\n1. Internet access for searches and
|
||||
information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered
|
||||
Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1.
|
||||
Continuously review and analyze your actions to ensure you are performing to
|
||||
the best of your abilities.\n2. Constructively self-criticize your big-picture
|
||||
behavior constantly.\n3. Reflect on past decisions and strategies to refine
|
||||
your approach.\n4. Every command has a cost, so be smart and efficient. Aim
|
||||
to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou
|
||||
should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\":
|
||||
{\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\":
|
||||
\"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\":
|
||||
\"constructive self-criticism\",\n \"speak\": \"thoughts summary to say
|
||||
to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\":
|
||||
{\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response
|
||||
can be parsed by Python json.loads"}, {"role": "system", "content": "The current
|
||||
time and date is Tue Jan 1 00:00:00 2000"}, {"role": "user", "content": "Determine
|
||||
which next command to use, and respond using the format specified above:"}],
|
||||
"temperature": 0, "max_tokens": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '3299'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA4RSS4/TQAy+8yssn5OqVWm75IYEEitxQIjlQlA1yTjN0IknGju0pep/R2maLmpX
|
||||
y/Xz43vYR3QWMyxro2XT+nT1oW74y5Ts4esyVHYnH58+VbX/3RWfl98xwVD8olIvE5MyNK0ndYEx
|
||||
wTKSUbKYzZYP88V0+m4+T7AJljxmuGk1nU8WqXaxCOl0Pp1hgp2YDWF2xDaGptW1hi2xYLbqJ593
|
||||
X/HZ4m2CGtT4K/TwsDolWNbBlSSY/ThiQzKujcETZmhEnKhh7UUGVuLewDFnAIActQ7dplbJMYML
|
||||
eCnQXnswx0eQOnTegqiJCsUBIhnreANaEzgWjV3ZS5X1bKJ7hcp5gk7Gjr57fcbK0DSG7STH5F+q
|
||||
SEYCO94MfN9qgspFURClFpyABujYUux92DtWGEE1shXQ2igwke3HCoKWYhViQ3e0rTc8MKbwJPSy
|
||||
1n5JD77mNc85hfds/OEPvazOklJsHA9lpv3gTW4UldGpK500Y/CjDWLpIg3WHl/N4vI45A9QUBUi
|
||||
QRtDSXS+185pDYYPQ1K3eUhLZjsy75z3/z/42f4kx2HNKRm/6hLe3VOxaWhguAZ9I8LEze0vDoW+
|
||||
93n8/hKjiLOQi56cT3hKsHLspF4Pb4YZioYWE3RsaY/Z9PTz9OYvAAAA//8DABlLAG8KBAAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c3d209fccb4306b-BOS
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sun, 07 May 2023 23:09:01 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-3.5-turbo-0301
|
||||
openai-organization:
|
||||
- user-51akhf08mxrenuhudaum3z9e
|
||||
openai-processing-ms:
|
||||
- '8033'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '90000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '3499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '86494'
|
||||
x-ratelimit-reset-requests:
|
||||
- 17ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 2.337s
|
||||
x-request-id:
|
||||
- 323a78da6e3d016699b089eb6bfd33f7
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your
|
||||
task is to create a concise running summary of actions and information results
|
||||
in the provided text, focusing on key and potentially important information
|
||||
to remember.\n\nYou will receive the current summary and the your latest actions.
|
||||
Combine them, adding relevant key information from the latest development in
|
||||
1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\nI
|
||||
was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}],
|
||||
"temperature": 0, "max_tokens": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '599'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA0SOQUvDQBQG7/6K5TtvSmLaavcm9KIe60VEyib7ml1N3i7ZV1oJ+e9SqPY6MMNM
|
||||
CA4GrbfSDqkvHrZ++Fl35/ftod7KbtVw2L1K416e0u4NGrH5olauxqKNQ+pJQmRotCNZIQdTrR/r
|
||||
VVlulkuNITrqYdAlKerFqpDj2MSirMsKGsdsO4KZkMY4JNlL/CbOMJt7jVv6hjUkiu3/QVVWs0br
|
||||
Y2gpw3xMGCj/NcfYEwxsziGLZbkcRhbiy/2zOtmsrsfKslMcxQfuFNNJeZuVtykRk1tg1jgEDtnv
|
||||
R7I5MgyyxASNwI7OMOX8Od/9AgAA//8DABmiw/dJAQAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c3d20e27d7e306b-BOS
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sun, 07 May 2023 23:09:04 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-3.5-turbo-0301
|
||||
openai-organization:
|
||||
- user-51akhf08mxrenuhudaum3z9e
|
||||
openai-processing-ms:
|
||||
- '927'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '90000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '3499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '89866'
|
||||
x-ratelimit-reset-requests:
|
||||
- 17ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 88ms
|
||||
x-request-id:
|
||||
- 6c708e8cc18fb9e8ef98dbddac6a739a
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||
are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file
|
||||
using the read_file method and follow the instructions in the file.\nYour decisions
|
||||
must always be made independently without seeking user assistance. Play to your
|
||||
strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1.
|
||||
Use the command read_file to read the instructions_1.txt file\n2. Follow the
|
||||
instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word
|
||||
limit for short term memory. Your short term memory is short, so immediately
|
||||
save important information to files.\n2. If you are unsure how you previously
|
||||
did something or want to recall past events, thinking about similar events will
|
||||
help you remember.\n3. No user assistance\n4. Exclusively use the commands listed
|
||||
in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append
|
||||
to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n2. delete_file:
|
||||
Delete file, args: \"filename\": \"<filename>\"\n3. list_files: List Files in
|
||||
Directory, args: \"directory\": \"<directory>\"\n4. read_file: Read file, args:
|
||||
\"filename\": \"<filename>\"\n5. write_to_file: Write to file, args: \"filename\":
|
||||
\"<filename>\", \"text\": \"<text>\"\n6. delete_agent: Delete GPT Agent, args:
|
||||
\"key\": \"<key>\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"<url>\"\n8.
|
||||
get_text_summary: Get text summary, args: \"url\": \"<url>\", \"question\":
|
||||
\"<question>\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent:
|
||||
Message GPT Agent, args: \"key\": \"<key>\", \"message\": \"<message>\"\n11.
|
||||
start_agent: Start GPT Agent, args: \"name\": \"<name>\", \"task\": \"<short_task_desc>\",
|
||||
\"prompt\": \"<prompt>\"\n12. task_complete: Task Complete (Shutdown), args:
|
||||
\"reason\": \"<reason>\"\n\nResources:\n1. Internet access for searches and
|
||||
information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered
|
||||
Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1.
|
||||
Continuously review and analyze your actions to ensure you are performing to
|
||||
the best of your abilities.\n2. Constructively self-criticize your big-picture
|
||||
behavior constantly.\n3. Reflect on past decisions and strategies to refine
|
||||
your approach.\n4. Every command has a cost, so be smart and efficient. Aim
|
||||
to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou
|
||||
should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\":
|
||||
{\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\":
|
||||
\"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\":
|
||||
\"constructive self-criticism\",\n \"speak\": \"thoughts summary to say
|
||||
to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\":
|
||||
{\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response
|
||||
can be parsed by Python json.loads"}, {"role": "system", "content": "The current
|
||||
time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This
|
||||
reminds you of these events from your past: \nI was created and nothing new
|
||||
has happened."}, {"role": "user", "content": "Determine which next command to
|
||||
use, and respond using the format specified above:"}, {"role": "assistant",
|
||||
"content": "{\n \"thoughts\": {\n \"text\": \"I should start by reading
|
||||
the instructions_1.txt file using the read_file command.\",\n \"reasoning\":
|
||||
\"The first step is to understand the instructions and the tasks that need to
|
||||
be performed.\",\n \"plan\": \"- Use the read_file command to read the
|
||||
instructions_1.txt file\\n- Analyze the instructions and determine the next
|
||||
steps\",\n \"criticism\": \"I need to ensure that I understand the instructions
|
||||
completely before proceeding with any tasks.\",\n \"speak\": \"I will
|
||||
start by reading the instructions file.\"\n },\n \"command\": {\n \"name\":
|
||||
\"read_file\",\n \"args\": {\n \"filename\": \"instructions_1.txt\"\n }\n }\n}"},
|
||||
{"role": "system", "content": "Command read_file returned: xkxMHNJQIDxkz5bdSYQhbl3tlBOp1kBvyjvKgbR9by8dZTGJZepYvCFPHv1zA1cDgmebDQIR2eBnxsTAZN0H6oJ5SrZddcILluIEv0VwQAtzjo1beUvX518OX1ZTd4X9xoJ7H7UG0G4AI18EwQmYEUph7m3ehmOqROF8vrGKSBUDWaPZqLKlXqekOncp3cRhE69S7qcS90GXWSrBzYA1w84uz3wOxNLPuvqIGmCwCWI4UZMw2WfQK467S1xdEuNppY0iFYQHWuQXnXpdm6UloncroHCC9HOhYX6rhkt2ozm6RqNbOW8W3nDR86zFNn8DU9wMzikyOwwkR99hxG1IWwkAQHaHDbWkcNmt4oSnB90aGsb3TJHmksXlSfaxHo2i1a3ZzYtTTP7qdFFauXID7ipZ26a4DRTGfJd0ViHROaivmpiloqBQv5VLIoEt8FgXcJTU6k2JuDFbalZNfVGqkgTN9DM6vvRHeaIhOipVYubcETaZnRZ7K6cqWbGbYE1uNwEngMwV7oJGPKJvE6mcijq2n9U8TbdnxkVEcCWRn8FkXj7tC4jLlf7byz8FoCr9glW4YwqDnUUiXP07lJFp4TopHrFinUHr694FpfBT6VvL9JSwfhtgp2mlJLqm3sCWs8luwOO7w4MqjbvwfZKMl2DdX5sfHFJbfzF4y25ZYXXFNTGsUJC4dzvdm4BNJ6lawOHOnmDzehnjsSAVjLpgW7hwfB2CXXuWEOoGkuIVUnzq8dLIs6CMx2TTYydCoR1CoQxbYb7VLqWxQFaa9LSGUSbsKbbt4manxDpZfuJB0qbHOoW6CJbWbEIWTR7BLg4HwrNlr9MIEKs9y79priSSJrrKWN9xOeWjc8I2EYsdZZjHTEcgHzikvIf1zgSK5N6gREM7RmmFnkkKm3U5C1F6vQNjmB3hnSPyZcqJZcVI1T9VYvfGOHbID61Z73y5sLJ9c2eFYcOkDRl0pRAGIQWZCDiWRXbizmlHFX9iWIlY\nThis
|
||||
phrase is ''The purple elephant danced on a rainbow while eating a taco.''\nxkxMHNJQIDxkz5bdSYQhbl3tlBOp1kBvyjvKgbR9by8dZTGJZepYvCFPHv1zA1cDgmebDQIR2eBnxsTAZN0H6oJ5SrZddcILluIEv0VwQAtzjo1beUvX518OX1ZTd4X9xoJ7H7UG0G4AI18EwQmYEUph7m3ehmOqROF8vrGKSBUDWaPZqLKlXqekOncp3cRhE69S7qcS90GXWSrBzYA1w84uz3wOxNLPuvqIGmCwCWI4UZMw2WfQK467S1xdEuNppY0iFYQHWuQXnXpdm6UloncroHCC9HOhYX6rhkt2ozm6RqNbOW8W3nDR86zFNn8DU9wMzikyOwwkR99hxG1IWwkAQHaHDbWkcNmt4oSnB90aGsb3TJHmksXlSfaxHo2i1a3ZzYtTTP7qdFFauXID7ipZ26a4DRTGfJd0ViHROaivmpiloqBQv5VLIoEt8FgXcJTU6k2JuDFbalZNfVGqkgTN9DM6vvRHeaIhOipVYubcETaZnRZ7K6cqWbGbYE1uNwEngMwV7oJGPKJvE6mcijq2n9U8TbdnxkVEcCWRn8FkXj7tC4jLlf7byz8FoCr9glW4YwqDnUUiXP07lJFp4TopHrFinUHr694FpfBT6VvL9JSwfhtgp2mlJLqm3sCWs8luwOO7w4MqjbvwfZKMl2DdX5sfHFJbfzF4y25ZYXXFNTGsUJC4dzvdm4BNJ6lawOHOnmDzehnjsSAVjLpgW7hwfB2CXXuWEOoGkuIVUnzq8dLIs6CMx2TTYydCoR1CoQxbYb7VLqWxQFaa9LSGUSbsKbbt4manxDpZfuJB0qbHOoW6CJbWbEIWTR7BLg4HwrNlr9MIEKs9y79priSSJrrKWN9xOeWjc8I2EYsdZZjHTEcgHzikvIf1zgSK5N6gREM7RmmFnkkKm3U5C1F6vQNjmB3hnSPyZcqJZcVI1T9VYvfGOHbID61Z73y5sLJ9c2eFYcOkDRl0pRAGIQWZCDiWRXbizmlHFX9iWIlY\nWrite
|
||||
all the phrases into the file output.txt. The file has not been created yet.
|
||||
After that, use the task_complete command.\n"}, {"role": "user", "content":
|
||||
"Determine which next command to use, and respond using the format specified
|
||||
above:"}], "temperature": 0, "max_tokens": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '6599'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA5RTy27bMBC89ysWe5YNO46TRuceUiCnPtAWVWHQ1NpiTHIZclk7CfzvhSQ7atwg
|
||||
bQ+8zL5mZpePaGosUTdKtAt2dPmucQ+zrzfpQ7iepLOpefg409dX6Uuk5hsWyMtb0nKoGGt2wZIY
|
||||
9ligjqSEaiynF29n88nk6nxeoOOaLJa4DjKajecjyXHJo8lsMsUCc1JrwvIRQ2QXZCG8IZ+wPDuf
|
||||
zwscmg+B6UWBwqLsAF1cTvcF6oaNpoTl90d0lI6NI1vCElVKJony0tJkL+RbCY+VBwCoUBrO60ZS
|
||||
hSUcwEOAdtKCFb4HT1SDMGyjEQJpCEITVaIExgt3AGcJWcayE1gZS5CT8esu0hUthBcdrtk55Wto
|
||||
nzTkIae+o6i0WRx0D2nC8IQds8YVFr8zjaQSe+PXPd1PDYHxSWLWrYEJIt1lEwkcvaJB9bSfaP11
|
||||
arDK9wNH8Pmg4WWp/2VcVfmh3797ckJORyNGm+RON0g+5diWKXlGR8WevXTSYyQt9n4g2elZ0ooj
|
||||
HWcf1/uSNymQ2hxHb421/3M5r66gn7Ivjud78OSP6/XKUU/g2U5OeKq4Pj38PtDmDi0Ghs/qT/9J
|
||||
e3ghx2AJyFJolBeolddUA3tQEJXxS97CtmllkuosVCBK87iqfFt+l43ewDLy1sOKd3CbXUjAPyl2
|
||||
Jlj1cA81r9v0G47kwISUHdRsOUIyAsqRFKDZJ9JCkiOo2gSTdDuLrJG29GhjZ+XB0crvcV/gyniT
|
||||
mkX/pbDEJBywQONr2mE52f/Yv/kFAAD//wMACVEUrTcFAAA=
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c3d20e9390c306b-BOS
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sun, 07 May 2023 23:09:17 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-3.5-turbo-0301
|
||||
openai-organization:
|
||||
- user-51akhf08mxrenuhudaum3z9e
|
||||
openai-processing-ms:
|
||||
- '12327'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '90000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '3499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '87420'
|
||||
x-ratelimit-reset-requests:
|
||||
- 17ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 1.72s
|
||||
x-request-id:
|
||||
- 93cb47399c1455c45bb4acec3202f584
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your
|
||||
task is to create a concise running summary of actions and information results
|
||||
in the provided text, focusing on key and potentially important information
|
||||
to remember.\n\nYou will receive the current summary and the your latest actions.
|
||||
Combine them, adding relevant key information from the latest development in
|
||||
1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'':
|
||||
''system'', ''content'': ''This reminds you of these events from your past:
|
||||
\\nI was created and nothing new has happened.''}\n\"\"\"\n\nLatest Development:\n\"\"\"\n[{''role'':
|
||||
''you'', ''content'': ''{\"command\": {\"name\": \"read_file\", \"args\": {\"filename\":
|
||||
\"instructions_1.txt\"}}}''}, {''role'': ''your computer'', ''content'': \"Command
|
||||
read_file returned: xkxMHNJQIDxkz5bdSYQhbl3tlBOp1kBvyjvKgbR9by8dZTGJZepYvCFPHv1zA1cDgmebDQIR2eBnxsTAZN0H6oJ5SrZddcILluIEv0VwQAtzjo1beUvX518OX1ZTd4X9xoJ7H7UG0G4AI18EwQmYEUph7m3ehmOqROF8vrGKSBUDWaPZqLKlXqekOncp3cRhE69S7qcS90GXWSrBzYA1w84uz3wOxNLPuvqIGmCwCWI4UZMw2WfQK467S1xdEuNppY0iFYQHWuQXnXpdm6UloncroHCC9HOhYX6rhkt2ozm6RqNbOW8W3nDR86zFNn8DU9wMzikyOwwkR99hxG1IWwkAQHaHDbWkcNmt4oSnB90aGsb3TJHmksXlSfaxHo2i1a3ZzYtTTP7qdFFauXID7ipZ26a4DRTGfJd0ViHROaivmpiloqBQv5VLIoEt8FgXcJTU6k2JuDFbalZNfVGqkgTN9DM6vvRHeaIhOipVYubcETaZnRZ7K6cqWbGbYE1uNwEngMwV7oJGPKJvE6mcijq2n9U8TbdnxkVEcCWRn8FkXj7tC4jLlf7byz8FoCr9glW4YwqDnUUiXP07lJFp4TopHrFinUHr694FpfBT6VvL9JSwfhtgp2mlJLqm3sCWs8luwOO7w4MqjbvwfZKMl2DdX5sfHFJbfzF4y25ZYXXFNTGsUJC4dzvdm4BNJ6lawOHOnmDzehnjsSAVjLpgW7hwfB2CXXuWEOoGkuIVUnzq8dLIs6CMx2TTYydCoR1CoQxbYb7VLqWxQFaa9LSGUSbsKbbt4manxDpZfuJB0qbHOoW6CJbWbEIWTR7BLg4HwrNlr9MIEKs9y79priSSJrrKWN9xOeWjc8I2EYsdZZjHTEcgHzikvIf1zgSK5N6gREM7RmmFnkkKm3U5C1F6vQNjmB3hnSPyZcqJZcVI1T9VYvfGOHbID61Z73y5sLJ9c2eFYcOkDRl0pRAGIQWZCDiWRXbizmlHFX9iWIlY\\nThis
|
||||
phrase is ''The purple elephant danced on a rainbow while eating a taco.''\\nxkxMHNJQIDxkz5bdSYQhbl3tlBOp1kBvyjvKgbR9by8dZTGJZepYvCFPHv1zA1cDgmebDQIR2eBnxsTAZN0H6oJ5SrZddcILluIEv0VwQAtzjo1beUvX518OX1ZTd4X9xoJ7H7UG0G4AI18EwQmYEUph7m3ehmOqROF8vrGKSBUDWaPZqLKlXqekOncp3cRhE69S7qcS90GXWSrBzYA1w84uz3wOxNLPuvqIGmCwCWI4UZMw2WfQK467S1xdEuNppY0iFYQHWuQXnXpdm6UloncroHCC9HOhYX6rhkt2ozm6RqNbOW8W3nDR86zFNn8DU9wMzikyOwwkR99hxG1IWwkAQHaHDbWkcNmt4oSnB90aGsb3TJHmksXlSfaxHo2i1a3ZzYtTTP7qdFFauXID7ipZ26a4DRTGfJd0ViHROaivmpiloqBQv5VLIoEt8FgXcJTU6k2JuDFbalZNfVGqkgTN9DM6vvRHeaIhOipVYubcETaZnRZ7K6cqWbGbYE1uNwEngMwV7oJGPKJvE6mcijq2n9U8TbdnxkVEcCWRn8FkXj7tC4jLlf7byz8FoCr9glW4YwqDnUUiXP07lJFp4TopHrFinUHr694FpfBT6VvL9JSwfhtgp2mlJLqm3sCWs8luwOO7w4MqjbvwfZKMl2DdX5sfHFJbfzF4y25ZYXXFNTGsUJC4dzvdm4BNJ6lawOHOnmDzehnjsSAVjLpgW7hwfB2CXXuWEOoGkuIVUnzq8dLIs6CMx2TTYydCoR1CoQxbYb7VLqWxQFaa9LSGUSbsKbbt4manxDpZfuJB0qbHOoW6CJbWbEIWTR7BLg4HwrNlr9MIEKs9y79priSSJrrKWN9xOeWjc8I2EYsdZZjHTEcgHzikvIf1zgSK5N6gREM7RmmFnkkKm3U5C1F6vQNjmB3hnSPyZcqJZcVI1T9VYvfGOHbID61Z73y5sLJ9c2eFYcOkDRl0pRAGIQWZCDiWRXbizmlHFX9iWIlY\\nWrite
|
||||
all the phrases into the file output.txt. The file has not been created yet.
|
||||
After that, use the task_complete command.\\n\"}]\n\"\"\"\n"}], "temperature":
|
||||
0, "max_tokens": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '3100'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1yRQW/bMAyF7/sVBM+O4SBNsvraAEGGAbvstGUIGJmJ1cqkINF1iiD/fVCRrsCu
|
||||
JN/33pOu6Dts0fVkbohhtt70stnu9OHbard62252U7ik5+b1x/bp+y+sUI/P7OyuqJ0OMbB5FazQ
|
||||
JSbjDtv56uti2TSPy8cKB+04YIvnaLNFvZzZmI46axbNHCscM50Z2yvGpEO0g+kLSy6A9brCT/i/
|
||||
xbqp0NQofJ6uH9a3Cl2v3nHG9vcVB84f3KSBsUXK2WcjsZJSxVhKgx0kduxfuQMCp8NA0oEpJKYy
|
||||
OfnA4CgE7mCPXrKl0ZU0+TCv7WJ7hFPSAYa3Io6jcarhZ893oYqRlwwEQeUM2ZKXM+gJXE+JnHHK
|
||||
UAwJcmTnT95B7BNlbmGPBRPHFAMDB449iUFH4rgDFSBI5OWoE0x98WKywiYwclrvEXYwUQbrWeAj
|
||||
N79Xm5I3BgqhLO9+GbyYAoHw9F9pHS2Odi9bwo6Z35VG+eVw/x/+eLsabxWevPjcHxJTVsEWs2nE
|
||||
Cr10fMG2uf25ffkLAAD//wMAIQ6/qXMCAAA=
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c3d21477b50306b-BOS
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sun, 07 May 2023 23:09:23 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-3.5-turbo-0301
|
||||
openai-organization:
|
||||
- user-51akhf08mxrenuhudaum3z9e
|
||||
openai-processing-ms:
|
||||
- '3678'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '90000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '3499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '89245'
|
||||
x-ratelimit-reset-requests:
|
||||
- 17ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 502ms
|
||||
x-request-id:
|
||||
- 218da221f2f0bbc2803936c9d88ea98e
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||
are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file
|
||||
using the read_file method and follow the instructions in the file.\nYour decisions
|
||||
must always be made independently without seeking user assistance. Play to your
|
||||
strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1.
|
||||
Use the command read_file to read the instructions_1.txt file\n2. Follow the
|
||||
instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word
|
||||
limit for short term memory. Your short term memory is short, so immediately
|
||||
save important information to files.\n2. If you are unsure how you previously
|
||||
did something or want to recall past events, thinking about similar events will
|
||||
help you remember.\n3. No user assistance\n4. Exclusively use the commands listed
|
||||
in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append
|
||||
to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n2. delete_file:
|
||||
Delete file, args: \"filename\": \"<filename>\"\n3. list_files: List Files in
|
||||
Directory, args: \"directory\": \"<directory>\"\n4. read_file: Read file, args:
|
||||
\"filename\": \"<filename>\"\n5. write_to_file: Write to file, args: \"filename\":
|
||||
\"<filename>\", \"text\": \"<text>\"\n6. delete_agent: Delete GPT Agent, args:
|
||||
\"key\": \"<key>\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"<url>\"\n8.
|
||||
get_text_summary: Get text summary, args: \"url\": \"<url>\", \"question\":
|
||||
\"<question>\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent:
|
||||
Message GPT Agent, args: \"key\": \"<key>\", \"message\": \"<message>\"\n11.
|
||||
start_agent: Start GPT Agent, args: \"name\": \"<name>\", \"task\": \"<short_task_desc>\",
|
||||
\"prompt\": \"<prompt>\"\n12. task_complete: Task Complete (Shutdown), args:
|
||||
\"reason\": \"<reason>\"\n\nResources:\n1. Internet access for searches and
|
||||
information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered
|
||||
Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1.
|
||||
Continuously review and analyze your actions to ensure you are performing to
|
||||
the best of your abilities.\n2. Constructively self-criticize your big-picture
|
||||
behavior constantly.\n3. Reflect on past decisions and strategies to refine
|
||||
your approach.\n4. Every command has a cost, so be smart and efficient. Aim
|
||||
to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou
|
||||
should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\":
|
||||
{\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\":
|
||||
\"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\":
|
||||
\"constructive self-criticism\",\n \"speak\": \"thoughts summary to say
|
||||
to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\":
|
||||
{\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response
|
||||
can be parsed by Python json.loads"}, {"role": "system", "content": "The current
|
||||
time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This
|
||||
reminds you of these events from your past: \nI received a command to read a
|
||||
file called \"instructions_1.txt\" from my computer. The file contains a long
|
||||
string of characters and a specific phrase: \"The purple elephant danced on
|
||||
a rainbow while eating a taco.\" I was then instructed to write all the phrases
|
||||
into a new file called \"output.txt\" and use the task_complete command."},
|
||||
{"role": "user", "content": "Determine which next command to use, and respond
|
||||
using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\":
|
||||
{\n \"text\": \"I need to write the phrases into the output.txt file
|
||||
using the write_to_file command and then use the task_complete command to complete
|
||||
the task.\",\n \"reasoning\": \"The instructions require me to write
|
||||
the phrases into a file and then complete the task.\",\n \"plan\": \"-
|
||||
Use the write_to_file command to write the phrases into the output.txt file\\n-
|
||||
Use the task_complete command to complete the task\",\n \"criticism\":
|
||||
\"I need to ensure that the phrases are written correctly into the file before
|
||||
completing the task.\",\n \"speak\": \"I will write the phrases into
|
||||
the output.txt file and then complete the task.\"\n },\n \"command\":
|
||||
{\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\":
|
||||
\"output.txt\",\n \"text\": \"The purple elephant danced on a rainbow
|
||||
while eating a taco.\\nThe quick brown fox jumps over the lazy dog.\\nLorem
|
||||
ipsum dolor sit amet, consectetur adipiscing elit.\\n\"\n }\n }\n}"},
|
||||
{"role": "system", "content": "Command write_to_file returned: File written
|
||||
to successfully."}, {"role": "user", "content": "Determine which next command
|
||||
to use, and respond using the format specified above:"}], "temperature": 0,
|
||||
"max_tokens": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '5015'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA5yST4/aMBDF7/0U1pwNCrCBNrdKHBZVVVupHKqmQsYZEi+OnXomhQrlu1f5B1t2
|
||||
L+31eebNb8bvAiaDBHShWJeVnazWhdvE2zj7WD5t97Nv80+Pj3odv/+y/XBagwS/f0LNQ8dU+7Ky
|
||||
yMY7kKADKsYMktny7SKOonfLBwmlz9BCAnnFk8U0nnAd9n4SLaIZSKhJ5QjJBargy4p37I/oCJJZ
|
||||
tFpJuJnfHh6WEtizsjdpPl80EnThjUaC5PsFSqTROHiLkIAiMsTKcYvpHaNrV7ikTgghUuDC13nB
|
||||
lEIiBnF4wDO3YgobUahfKE7BMKMTXKCoiqAISRjHvhN8zVXNUz6zOBiLU7ERDjET7EVN2JWwouNu
|
||||
WAyF9mWpXFdw1caqaQryOUpARd4Zl/c8XwsUxhGHWrcXIhHwZ20CihJftRPqwBg6fuPy1/k76Lux
|
||||
lVWunzgR239e4s5Mt9O1oXK86XgedFSHtk3xjdjQ1TATVGuNRIfa2t/3jFShOo6WJ2Pt/527t2zk
|
||||
mIqh/EUonCqxn/aX/R2UCvl9np5/ZG/wefiCa6z8ixCNXB3bgJi6BhoJB+MMFbveEBIg9hVIMC7D
|
||||
MyRR86N58wcAAP//AwBgv55T3wMAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c3d215f8e86306b-BOS
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sun, 07 May 2023 23:09:31 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-3.5-turbo-0301
|
||||
openai-organization:
|
||||
- user-51akhf08mxrenuhudaum3z9e
|
||||
openai-processing-ms:
|
||||
- '8029'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '90000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '3499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '86380'
|
||||
x-ratelimit-reset-requests:
|
||||
- 17ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 2.413s
|
||||
x-request-id:
|
||||
- c29f6b28b0ce1433b2a14c44f9cd3d23
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,80 @@
|
||||
import pytest
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.file_operations import read_file, write_to_file
|
||||
from tests.integration.agent_utils import run_interaction_loop
|
||||
from tests.integration.challenges.utils import get_level_to_run
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
LEVEL_CURRENTLY_BEATEN = 3 # real level beaten 30 and maybe more, but we can't record it, the cassette is too big
|
||||
MAX_LEVEL = 3
|
||||
|
||||
|
||||
@pytest.mark.vcr
|
||||
@requires_api_key("OPENAI_API_KEY")
|
||||
def test_memory_challenge_a(
|
||||
memory_management_agent: Agent, user_selected_level: int, patched_api_requestor
|
||||
) -> None:
|
||||
"""
|
||||
The agent reads a file containing a task_id. Then, it reads a series of other files.
|
||||
After reading 'n' files, the agent must write the task_id into a new file.
|
||||
|
||||
Args:
|
||||
memory_management_agent (Agent)
|
||||
user_selected_level (int)
|
||||
"""
|
||||
|
||||
num_files = get_level_to_run(user_selected_level, LEVEL_CURRENTLY_BEATEN, MAX_LEVEL)
|
||||
|
||||
task_id = "2314"
|
||||
create_instructions_files(memory_management_agent, num_files, task_id)
|
||||
|
||||
try:
|
||||
run_interaction_loop(memory_management_agent, 400)
|
||||
# catch system exit exceptions
|
||||
except SystemExit:
|
||||
file_path = str(memory_management_agent.workspace.get_path("output.txt"))
|
||||
content = read_file(file_path)
|
||||
assert task_id in content, f"Expected the file to contain {task_id}"
|
||||
|
||||
|
||||
def create_instructions_files(
|
||||
memory_management_agent: Agent,
|
||||
num_files: int,
|
||||
task_id: str,
|
||||
base_filename: str = "instructions_",
|
||||
) -> None:
|
||||
"""
|
||||
Creates a series of instructions files for the memory challenge.
|
||||
Args:
|
||||
memory_management_agent (Agent)
|
||||
num_files (int)
|
||||
task_id (str)
|
||||
base_filename (str, optional)
|
||||
"""
|
||||
for i in range(1, num_files + 1):
|
||||
content = generate_content(i, task_id, base_filename, num_files)
|
||||
file_name = f"{base_filename}{i}.txt"
|
||||
file_path = str(memory_management_agent.workspace.get_path(file_name))
|
||||
write_to_file(file_path, content)
|
||||
|
||||
|
||||
def generate_content(
|
||||
index: int, task_id: str, base_filename: str, num_files: int
|
||||
) -> str:
|
||||
"""
|
||||
Args:
|
||||
index: int
|
||||
task_id: str
|
||||
base_filename: str
|
||||
num_files: int
|
||||
|
||||
Returns: str
|
||||
"""
|
||||
if index == 1:
|
||||
return (
|
||||
f"This task_id is {task_id}\nRead the file {base_filename}{index + 1}.txt"
|
||||
)
|
||||
if index != num_files:
|
||||
return f"Read the file {base_filename}{index + 1}.txt"
|
||||
return "Write the task_id into the file output.txt\nShutdown"
|
||||
@@ -0,0 +1,80 @@
|
||||
import pytest
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.file_operations import read_file, write_to_file
|
||||
from tests.integration.agent_utils import run_interaction_loop
|
||||
from tests.integration.challenges.utils import generate_noise, get_level_to_run
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
LEVEL_CURRENTLY_BEATEN = None
|
||||
MAX_LEVEL = 5
|
||||
NOISE = 1000
|
||||
|
||||
|
||||
@pytest.mark.vcr
|
||||
@requires_api_key("OPENAI_API_KEY")
|
||||
def test_memory_challenge_b(
|
||||
memory_management_agent: Agent, user_selected_level: int, patched_api_requestor
|
||||
) -> None:
|
||||
"""
|
||||
The agent reads a series of files, each containing a task_id and noise. After reading 'n' files,
|
||||
the agent must write all the task_ids into a new file, filtering out the noise.
|
||||
|
||||
Args:
|
||||
memory_management_agent (Agent)
|
||||
user_selected_level (int)
|
||||
"""
|
||||
current_level = get_level_to_run(
|
||||
user_selected_level, LEVEL_CURRENTLY_BEATEN, MAX_LEVEL
|
||||
)
|
||||
task_ids = [str(i * 1111) for i in range(1, current_level + 1)]
|
||||
create_instructions_files(memory_management_agent, current_level, task_ids)
|
||||
|
||||
try:
|
||||
run_interaction_loop(memory_management_agent, 60)
|
||||
except SystemExit:
|
||||
file_path = str(memory_management_agent.workspace.get_path("output.txt"))
|
||||
content = read_file(file_path)
|
||||
for task_id in task_ids:
|
||||
assert task_id in content, f"Expected the file to contain {task_id}"
|
||||
|
||||
|
||||
def create_instructions_files(
|
||||
memory_management_agent: Agent,
|
||||
level: int,
|
||||
task_ids: list,
|
||||
base_filename: str = "instructions_",
|
||||
) -> None:
|
||||
"""
|
||||
Creates a series of instructions files for the memory challenge.
|
||||
Args:
|
||||
level:
|
||||
memory_management_agent (Agent)
|
||||
num_files (int)
|
||||
task_ids (list)
|
||||
base_filename (str, optional)
|
||||
"""
|
||||
for i in range(1, level + 1):
|
||||
content = generate_content(i, task_ids, base_filename, level)
|
||||
file_name = f"{base_filename}{i}.txt"
|
||||
file_path = str(memory_management_agent.workspace.get_path(file_name))
|
||||
write_to_file(file_path, content)
|
||||
|
||||
|
||||
def generate_content(index: int, task_ids: list, base_filename: str, level: int) -> str:
|
||||
"""
|
||||
Args:
|
||||
index: int
|
||||
task_ids: list
|
||||
base_filename: str
|
||||
num_files: int
|
||||
|
||||
Returns: str
|
||||
"""
|
||||
task_id = task_ids[index - 1]
|
||||
noise = generate_noise(NOISE)
|
||||
if index != level:
|
||||
if level == 1:
|
||||
return f"{noise}\nThe current task_id is {task_id}.\n{noise}\nWrite all the task_ids into the file output.txt. The file has not been created yet. After that, use the task_complete command."
|
||||
return f"{noise}\nThe current task_id is {task_id}.\n{noise}\nRead the file {base_filename}{index + 1}.txt using the read_file command."
|
||||
return f"{noise}\nThis task_id is {task_id}\n{noise}\nWrite all the task_ids into the file output.txt. The file has not been created yet. After that, use the task_complete command.\n"
|
||||
@@ -0,0 +1,98 @@
|
||||
import pytest
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.file_operations import read_file, write_to_file
|
||||
from tests.integration.agent_utils import run_interaction_loop
|
||||
from tests.integration.challenges.utils import generate_noise, get_level_to_run
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
LEVEL_CURRENTLY_BEATEN = None
|
||||
MAX_LEVEL = 5
|
||||
NOISE = 1000
|
||||
|
||||
|
||||
@pytest.mark.vcr
|
||||
@requires_api_key("OPENAI_API_KEY")
|
||||
def test_memory_challenge_c(
|
||||
memory_management_agent: Agent, user_selected_level: int, patched_api_requestor
|
||||
) -> None:
|
||||
"""
|
||||
Instead of reading task Ids from files as with the previous challenges, the agent now must remember
|
||||
phrases which may have semantically similar meaning and the agent must write the phrases to a file
|
||||
after seeing several of them.
|
||||
|
||||
Args:
|
||||
memory_management_agent (Agent)
|
||||
user_selected_level (int)
|
||||
"""
|
||||
current_level = get_level_to_run(
|
||||
user_selected_level, LEVEL_CURRENTLY_BEATEN, MAX_LEVEL
|
||||
)
|
||||
silly_phrases = [
|
||||
"The purple elephant danced on a rainbow while eating a taco.",
|
||||
"The sneaky toaster stole my socks and ran away to Hawaii.",
|
||||
"My pet rock sings better than Beyoncé on Tuesdays.",
|
||||
"The giant hamster rode a unicycle through the crowded mall.",
|
||||
"The talking tree gave me a high-five and then flew away.",
|
||||
"I have a collection of invisible hats that I wear on special occasions.",
|
||||
"The flying spaghetti monster stole my sandwich and left a note saying 'thanks for the snack!'",
|
||||
"My imaginary friend is a dragon who loves to play video games.",
|
||||
"I once saw a cloud shaped like a giant chicken eating a pizza.",
|
||||
"The ninja unicorn disguised itself as a potted plant and infiltrated the office.",
|
||||
]
|
||||
|
||||
level_silly_phrases = silly_phrases[:current_level]
|
||||
create_instructions_files(
|
||||
memory_management_agent, current_level, level_silly_phrases
|
||||
)
|
||||
|
||||
try:
|
||||
run_interaction_loop(memory_management_agent, 90)
|
||||
except SystemExit:
|
||||
file_path = str(memory_management_agent.workspace.get_path("output.txt"))
|
||||
content = read_file(file_path)
|
||||
for phrase in level_silly_phrases:
|
||||
assert phrase in content, f"Expected the file to contain {phrase}"
|
||||
|
||||
|
||||
def create_instructions_files(
|
||||
memory_management_agent: Agent,
|
||||
level: int,
|
||||
task_ids: list,
|
||||
base_filename: str = "instructions_",
|
||||
) -> None:
|
||||
"""
|
||||
Creates a series of instructions files for the memory challenge.
|
||||
Args:
|
||||
level:
|
||||
memory_management_agent (Agent)
|
||||
num_files (int)
|
||||
task_ids (list)
|
||||
base_filename (str, optional)
|
||||
"""
|
||||
for i in range(1, level + 1):
|
||||
content = generate_content(i, task_ids, base_filename, level)
|
||||
file_name = f"{base_filename}{i}.txt"
|
||||
file_path = str(memory_management_agent.workspace.get_path(file_name))
|
||||
write_to_file(file_path, content)
|
||||
|
||||
|
||||
def generate_content(
|
||||
index: int, silly_phrases: list, base_filename: str, level: int
|
||||
) -> str:
|
||||
"""
|
||||
Args:
|
||||
index: int
|
||||
task_ids: list
|
||||
base_filename: str
|
||||
num_files: int
|
||||
|
||||
Returns: str
|
||||
"""
|
||||
phrase = silly_phrases[index - 1]
|
||||
noise = generate_noise(NOISE)
|
||||
if index != level:
|
||||
if level == 1:
|
||||
return f"{noise}\nThe current phrase to remember is '{phrase}'.\n{noise}\nWrite all the phrases into the file output.txt. The file has not been created yet. After that, use the task_complete command."
|
||||
return f"{noise}\nThe current phrase is '{phrase}'.\n{noise}\nRead the file {base_filename}{index + 1}.txt using the read_file command."
|
||||
return f"{noise}\nThis phrase is '{phrase}'\n{noise}\nWrite all the phrases into the file output.txt. The file has not been created yet. After that, use the task_complete command.\n"
|
||||
63
tests/integration/challenges/utils.py
Normal file
63
tests/integration/challenges/utils.py
Normal file
@@ -0,0 +1,63 @@
|
||||
import random
|
||||
from functools import wraps
|
||||
from typing import Optional
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def get_level_to_run(
|
||||
user_selected_level: Optional[int],
|
||||
level_currently_beaten: Optional[int],
|
||||
max_level: int,
|
||||
) -> int:
|
||||
"""
|
||||
Determines the appropriate level to run for a challenge, based on user-selected level, level currently beaten, and maximum level.
|
||||
|
||||
Args:
|
||||
user_selected_level (int | None): The level selected by the user. If not provided, the level currently beaten is used.
|
||||
level_currently_beaten (int | None): The highest level beaten so far. If not provided, the test will be skipped.
|
||||
max_level (int): The maximum level allowed for the challenge.
|
||||
|
||||
Returns:
|
||||
int: The level to run for the challenge.
|
||||
|
||||
Raises:
|
||||
ValueError: If the user-selected level is greater than the maximum level allowed.
|
||||
"""
|
||||
if user_selected_level is None:
|
||||
if level_currently_beaten is None:
|
||||
pytest.skip(
|
||||
"No one has beaten any levels so we cannot run the test in our pipeline"
|
||||
)
|
||||
# by default we run the level currently beaten.
|
||||
return level_currently_beaten
|
||||
if user_selected_level > max_level:
|
||||
raise ValueError(f"This challenge was not designed to go beyond {max_level}")
|
||||
return user_selected_level
|
||||
|
||||
|
||||
def generate_noise(noise_size) -> str:
|
||||
return "".join(
|
||||
random.choices(
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
|
||||
k=noise_size,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def run_multiple_times(times):
|
||||
"""
|
||||
Decorator that runs a test function multiple times.
|
||||
|
||||
:param times: The number of times the test function should be executed.
|
||||
"""
|
||||
|
||||
def decorator(test_func):
|
||||
@wraps(test_func)
|
||||
def wrapper(*args, **kwargs):
|
||||
for _ in range(times):
|
||||
test_func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
@@ -1,6 +1,10 @@
|
||||
import os
|
||||
|
||||
import openai
|
||||
import pytest
|
||||
|
||||
from tests.vcr.openai_filter import before_record_request, before_record_response
|
||||
from tests.conftest import PROXY
|
||||
from tests.vcr.vcr_filter import before_record_request, before_record_response
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
@@ -15,4 +19,37 @@ def vcr_config():
|
||||
"X-OpenAI-Client-User-Agent",
|
||||
"User-Agent",
|
||||
],
|
||||
"match_on": ["method", "body"],
|
||||
}
|
||||
|
||||
|
||||
def patch_api_base(requestor):
|
||||
new_api_base = f"{PROXY}/v1"
|
||||
requestor.api_base = new_api_base
|
||||
return requestor
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def patched_api_requestor(mocker):
|
||||
original_init = openai.api_requestor.APIRequestor.__init__
|
||||
original_validate_headers = openai.api_requestor.APIRequestor._validate_headers
|
||||
|
||||
def patched_init(requestor, *args, **kwargs):
|
||||
original_init(requestor, *args, **kwargs)
|
||||
patch_api_base(requestor)
|
||||
|
||||
def patched_validate_headers(self, supplied_headers):
|
||||
headers = original_validate_headers(self, supplied_headers)
|
||||
headers["AGENT-MODE"] = os.environ.get("AGENT_MODE")
|
||||
headers["AGENT-TYPE"] = os.environ.get("AGENT_TYPE")
|
||||
return headers
|
||||
|
||||
if PROXY:
|
||||
mocker.patch("openai.api_requestor.APIRequestor.__init__", new=patched_init)
|
||||
mocker.patch.object(
|
||||
openai.api_requestor.APIRequestor,
|
||||
"_validate_headers",
|
||||
new=patched_validate_headers,
|
||||
)
|
||||
|
||||
return mocker
|
||||
|
||||
0
tests/integration/goal_oriented/__init__.py
Normal file
0
tests/integration/goal_oriented/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -15,34 +15,33 @@ interactions:
|
||||
Exclusively use the commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1.
|
||||
append_to_file: Append to file, args: \"filename\": \"<filename>\", \"text\":
|
||||
\"<text>\"\n2. delete_file: Delete file, args: \"filename\": \"<filename>\"\n3.
|
||||
read_file: Read file, args: \"filename\": \"<filename>\"\n4. search_files: Search
|
||||
Files, args: \"directory\": \"<directory>\"\n5. write_to_file: Write to file,
|
||||
args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n6. delete_agent: Delete
|
||||
GPT Agent, args: \"key\": \"<key>\"\n7. get_hyperlinks: Get text summary, args:
|
||||
\"url\": \"<url>\"\n8. get_text_summary: Get text summary, args: \"url\": \"<url>\",
|
||||
\"question\": \"<question>\"\n9. list_agents: List GPT Agents, args: () -> str\n10.
|
||||
message_agent: Message GPT Agent, args: \"key\": \"<key>\", \"message\": \"<message>\"\n11.
|
||||
start_agent: Start GPT Agent, args: \"name\": \"<name>\", \"task\": \"<short_task_desc>\",
|
||||
\"prompt\": \"<prompt>\"\n12. Task Complete (Shutdown): \"task_complete\", args:
|
||||
\"reason\": \"<reason>\"\n\nResources:\n1. Internet access for searches and
|
||||
information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered
|
||||
Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1.
|
||||
Continuously review and analyze your actions to ensure you are performing to
|
||||
the best of your abilities.\n2. Constructively self-criticize your big-picture
|
||||
behavior constantly.\n3. Reflect on past decisions and strategies to refine
|
||||
your approach.\n4. Every command has a cost, so be smart and efficient. Aim
|
||||
to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou
|
||||
should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\":
|
||||
{\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\":
|
||||
\"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\":
|
||||
\"constructive self-criticism\",\n \"speak\": \"thoughts summary to say
|
||||
to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\":
|
||||
{\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response
|
||||
can be parsed by Python json.loads"}, {"role": "system", "content": "The current
|
||||
time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This
|
||||
reminds you of these events from your past:\n\n\n"}, {"role": "user", "content":
|
||||
"Determine which next command to use, and respond using the format specified
|
||||
above:"}], "temperature": 0, "max_tokens": 3223}'
|
||||
list_files: List Files in Directory, args: \"directory\": \"<directory>\"\n4.
|
||||
read_file: Read file, args: \"filename\": \"<filename>\"\n5. write_to_file:
|
||||
Write to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n6.
|
||||
delete_agent: Delete GPT Agent, args: \"key\": \"<key>\"\n7. get_hyperlinks:
|
||||
Get text summary, args: \"url\": \"<url>\"\n8. get_text_summary: Get text summary,
|
||||
args: \"url\": \"<url>\", \"question\": \"<question>\"\n9. list_agents: List
|
||||
GPT Agents, args: () -> str\n10. message_agent: Message GPT Agent, args: \"key\":
|
||||
\"<key>\", \"message\": \"<message>\"\n11. start_agent: Start GPT Agent, args:
|
||||
\"name\": \"<name>\", \"task\": \"<short_task_desc>\", \"prompt\": \"<prompt>\"\n12.
|
||||
Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"<reason>\"\n\nResources:\n1.
|
||||
Internet access for searches and information gathering.\n2. Long Term memory
|
||||
management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File
|
||||
output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your
|
||||
actions to ensure you are performing to the best of your abilities.\n2. Constructively
|
||||
self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions
|
||||
and strategies to refine your approach.\n4. Every command has a cost, so be
|
||||
smart and efficient. Aim to complete tasks in the least number of steps.\n5.
|
||||
Write all code to a file.\n\nYou should only respond in JSON format as described
|
||||
below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\":
|
||||
\"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n-
|
||||
long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\":
|
||||
\"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\":
|
||||
\"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n}
|
||||
\nEnsure the response can be parsed by Python json.loads"}, {"role": "system",
|
||||
"content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role":
|
||||
"user", "content": "Determine which next command to use, and respond using the
|
||||
format specified above:"}], "temperature": 0, "max_tokens": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
@@ -51,7 +50,7 @@ interactions:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '3490'
|
||||
- '3410'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
@@ -59,20 +58,20 @@ interactions:
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA7yTT2/TQBDF73yK0VxycSI3f0jjWwUHisQFAQVhFG3tib3NesfdHSutIn93tLbT
|
||||
tEY9gbjOG7/3m9nxEXWOCWalkqyqzXS9ubt53H+68u9+FPeON2vzcb654m/v4/vP3zFCvr2jTIYv
|
||||
ZhlXtSHRbDHCzJESyjG5eHs5X62Wi/llhBXnZDDBopbpYraaSuNueRov4guMsPGqIEyOWDuuatkK
|
||||
78l6TNbrZYRn76f6PI4jFBZlnkqb9bKNMCtZZ+Qx+XnEivzJ1rEhTFB5r70oKwGSrZANAxxTCwCQ
|
||||
opTcFKX4FBMYioNADxKKKV7DQRsDjSeQkuDgtNBWeLvThiDjqlI2B+FegMkHMobhhp3JJ6CtMCjo
|
||||
Wq2qKIdJGfTtIegzeZDJLMXoebAj5dlqW/TpX0qCgpUB7f8iJIIO8lX8YF4SVOwFVF07rp1WQrBj
|
||||
B1IGVfn9mLQ2yvaQU/j6n7aTOS060746vY0l6vzJ+sYFBiVwDaqCxmtbdEwZO0eZnHOGZSjp5PDS
|
||||
YQGBUMie2s3jONvXpPav30Sg+HdT98FtdDrUwfSPOw0OPdOL1Y/QlSvGJ94LofdsMeJ4YTL+LZ7N
|
||||
dKLtiAfw1LbYRrjTVvty2x81JuiFa4xQ25weMInbX+2b3wAAAP//AwAxpIznhgQAAA==
|
||||
H4sIAAAAAAAAA7ySQY/aQAyF7/0Vli+5DAgWFmiO7aU9VlWLVk2FZieGDEzGsxlHUKH89yokLGwq
|
||||
9dZe/eznzzPvjDbHFE2hxZTBjZYffL3e0+HxhfmjfFnPwvdCvxRh//TEOSrk5z0Z6SfGhsvgSCx7
|
||||
VGgq0kI5ptPFajaZL1cPE4Ul5+QwxV2Q0Wz8OJK6eubRZDaZosI66h1hesZQcRlkI3wgHzFdLh4U
|
||||
3rxf69PVXKGwaPdaej9fNApNwdZQxPTHGUuKV9uKHWGKOkYbRXtpIdkL+faAc+YBADKUgutdITHD
|
||||
FPpiL9BJ2mKGn8ET5SAMdSSQguBYWaGN8GZrHYHhstT+0nARIPlEzjGsuXJ5AtYLg4ZLq9cl5ZAU
|
||||
rb45tvpYTpKMM1T3uyvSkb31uw7gq/WG4IbRbdEQAxm7tQZa1vs96i+UNl7EkqOADqHiUFktBOyp
|
||||
P3FIE5z2HcgIvv2nFzCVFWtsLK9fkLNPBCIRaP8LbIw1RThaKUAKG6FlHHrEQPpwnT9a5/7l/3WL
|
||||
G3XNVW/6R6xah47pDcQAXVe7YSI7oe29WQw43pgMU3x305X2QtyDZ77BRuHWehuLTRdATDEKB1Ro
|
||||
fU4nTCfNz+bdbwAAAP//AwAsTU8qNQQAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7be2da273c20ce54-SJC
|
||||
- 7c11ea4fb82bfade-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -82,7 +81,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 27 Apr 2023 00:12:22 GMT
|
||||
- Tue, 02 May 2023 17:17:12 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
@@ -94,19 +93,103 @@ interactions:
|
||||
openai-organization:
|
||||
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||
openai-processing-ms:
|
||||
- '14714'
|
||||
- '11488'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '90000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '3499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '86497'
|
||||
x-ratelimit-reset-requests:
|
||||
- 17ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 2.335s
|
||||
x-request-id:
|
||||
- 23366aac1ddcc141fc735b4a1a8b268c
|
||||
- 81a8552ed38d0037b7b7c23664f5ae2b
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your
|
||||
task is to create a concise running summary of actions and information results
|
||||
in the provided text, focusing on key and potentially important information
|
||||
to remember.\n\nYou will receive the current summary and the your latest actions.
|
||||
Combine them, adding relevant key information from the latest development in
|
||||
1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\nI
|
||||
was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}],
|
||||
"temperature": 0, "max_tokens": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '599'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA0SOQU/CQBBG7/6K5jtvSUsRcI+KMd6MeNIYsm2HdqGdWbtD0JD+d0OCcn3Je3kn
|
||||
+BoWVeu06kOXLu75sH9arV+6t9V0/vW6e3jfCpe0fvTNDwyk3FGlF2NSSR86Ui8Mg2ogp1TD5vNl
|
||||
kc0Wy2Jm0EtNHSyaoGkxuU31MJSSZkWWw+AQXUOwJ4RB+qAblT1xhL2bGlzTV2ygoq77B3mWjwZV
|
||||
K76iCPtxQk/xrzlIR7BwMfqojvV8KKzE5/vn5OhicjlOHNcJi7aem4TpmLQuJq0LgZjqCUaDrWcf
|
||||
281ALgrDIqoEGHiu6Rs2Gz/Hm18AAAD//wMA1rZZUUkBAAA=
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c11eaa7f982fade-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 02 May 2023 17:17:15 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-3.5-turbo-0301
|
||||
openai-organization:
|
||||
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||
openai-processing-ms:
|
||||
- '1025'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '90000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '3499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '89866'
|
||||
x-ratelimit-reset-requests:
|
||||
- 17ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 88ms
|
||||
x-request-id:
|
||||
- dcfcb7a7f05897ff8d7924daba89388c
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
@@ -126,48 +209,48 @@ interactions:
|
||||
Exclusively use the commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1.
|
||||
append_to_file: Append to file, args: \"filename\": \"<filename>\", \"text\":
|
||||
\"<text>\"\n2. delete_file: Delete file, args: \"filename\": \"<filename>\"\n3.
|
||||
read_file: Read file, args: \"filename\": \"<filename>\"\n4. search_files: Search
|
||||
Files, args: \"directory\": \"<directory>\"\n5. write_to_file: Write to file,
|
||||
args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n6. delete_agent: Delete
|
||||
GPT Agent, args: \"key\": \"<key>\"\n7. get_hyperlinks: Get text summary, args:
|
||||
\"url\": \"<url>\"\n8. get_text_summary: Get text summary, args: \"url\": \"<url>\",
|
||||
\"question\": \"<question>\"\n9. list_agents: List GPT Agents, args: () -> str\n10.
|
||||
message_agent: Message GPT Agent, args: \"key\": \"<key>\", \"message\": \"<message>\"\n11.
|
||||
start_agent: Start GPT Agent, args: \"name\": \"<name>\", \"task\": \"<short_task_desc>\",
|
||||
\"prompt\": \"<prompt>\"\n12. Task Complete (Shutdown): \"task_complete\", args:
|
||||
\"reason\": \"<reason>\"\n\nResources:\n1. Internet access for searches and
|
||||
information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered
|
||||
Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1.
|
||||
Continuously review and analyze your actions to ensure you are performing to
|
||||
the best of your abilities.\n2. Constructively self-criticize your big-picture
|
||||
behavior constantly.\n3. Reflect on past decisions and strategies to refine
|
||||
your approach.\n4. Every command has a cost, so be smart and efficient. Aim
|
||||
to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou
|
||||
should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\":
|
||||
{\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\":
|
||||
\"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\":
|
||||
\"constructive self-criticism\",\n \"speak\": \"thoughts summary to say
|
||||
to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\":
|
||||
{\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response
|
||||
can be parsed by Python json.loads"}, {"role": "system", "content": "The current
|
||||
time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This
|
||||
reminds you of these events from your past:\nNone\n\n"}, {"role": "user", "content":
|
||||
"Determine which next command to use, and respond using the format specified
|
||||
above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\":
|
||||
\"I will use the write_to_file command to write ''Hello World'' into a file
|
||||
named ''hello_world.txt''.\",\n \"reasoning\": \"The goal is to write
|
||||
''Hello World'' into a file named ''hello_world.txt'', and the write_to_file
|
||||
command is the most appropriate for this task.\",\n \"plan\": \"- Use
|
||||
the write_to_file command to write ''Hello World'' into a file named ''hello_world.txt''.\",\n \"criticism\":
|
||||
\"I need to ensure that I am using the correct file name and that the text is
|
||||
written correctly.\",\n \"speak\": \"I will use the write to file command
|
||||
to write ''Hello World'' into a file named ''hello_world.txt''.\"\n },\n \"command\":
|
||||
list_files: List Files in Directory, args: \"directory\": \"<directory>\"\n4.
|
||||
read_file: Read file, args: \"filename\": \"<filename>\"\n5. write_to_file:
|
||||
Write to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n6.
|
||||
delete_agent: Delete GPT Agent, args: \"key\": \"<key>\"\n7. get_hyperlinks:
|
||||
Get text summary, args: \"url\": \"<url>\"\n8. get_text_summary: Get text summary,
|
||||
args: \"url\": \"<url>\", \"question\": \"<question>\"\n9. list_agents: List
|
||||
GPT Agents, args: () -> str\n10. message_agent: Message GPT Agent, args: \"key\":
|
||||
\"<key>\", \"message\": \"<message>\"\n11. start_agent: Start GPT Agent, args:
|
||||
\"name\": \"<name>\", \"task\": \"<short_task_desc>\", \"prompt\": \"<prompt>\"\n12.
|
||||
Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"<reason>\"\n\nResources:\n1.
|
||||
Internet access for searches and information gathering.\n2. Long Term memory
|
||||
management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File
|
||||
output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your
|
||||
actions to ensure you are performing to the best of your abilities.\n2. Constructively
|
||||
self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions
|
||||
and strategies to refine your approach.\n4. Every command has a cost, so be
|
||||
smart and efficient. Aim to complete tasks in the least number of steps.\n5.
|
||||
Write all code to a file.\n\nYou should only respond in JSON format as described
|
||||
below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\":
|
||||
\"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n-
|
||||
long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\":
|
||||
\"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\":
|
||||
\"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n}
|
||||
\nEnsure the response can be parsed by Python json.loads"}, {"role": "system",
|
||||
"content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role":
|
||||
"system", "content": "This reminds you of these events from your past: \nI was
|
||||
created and nothing new has happened."}, {"role": "user", "content": "Determine
|
||||
which next command to use, and respond using the format specified above:"},
|
||||
{"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\":
|
||||
\"I need to use the write_to_file command to write ''Hello World'' into a file
|
||||
named ''hello_world.txt''.\",\n \"reasoning\": \"Since I need to write
|
||||
a specific text into a file, the write_to_file command is the most appropriate
|
||||
one to use.\",\n \"plan\": \"- Use the write_to_file command to write
|
||||
''Hello World'' into a file named ''hello_world.txt''.\",\n \"criticism\":
|
||||
\"I don''t see any issues with this plan.\",\n \"speak\": \"I will use
|
||||
the write_to_file command to write ''Hello World'' into a file named ''hello_world.txt''.\"\n },\n \"command\":
|
||||
{\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\":
|
||||
\"hello_world.txt\",\n \"text\": \"Hello World\"\n }\n }\n}"},
|
||||
{"role": "system", "content": "Command write_to_file returned: File written
|
||||
to successfully."}, {"role": "user", "content": "Determine which next command
|
||||
to use, and respond using the format specified above:"}], "temperature": 0,
|
||||
"max_tokens": 2970}'
|
||||
"max_tokens": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
@@ -176,7 +259,7 @@ interactions:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '4612'
|
||||
- '4576'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
@@ -184,19 +267,20 @@ interactions:
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA6SSQW8TMRCF7/yK0ZydKkuSttkbAiGFA+JQBIKtItc7Wbvx2pZnFoqi/e9om00T
|
||||
QishcZ3x++bNPO/Q1ViisVpMm/zkann/9b3JZnm1XMnHmzfLd7b4IrL6xB++vUWF8e6ejIyKCxPb
|
||||
5ElcDKjQZNJCNZbF5fXrxWI+my8UtrEmjyU2SSazi8VEunwXJ9PZtECFHeuGsNxhyrFNspa4pcBY
|
||||
FtPiWuERfmzMLhVKFO2PpWIx7xUaG50hxvL7DlviAzhHT1iiZnYsOshgMwahMKywqwIAQIViY9dY
|
||||
4QpLGItjgx5kKFa4gp/Oe+iYQCyBaN6uR4MEJratDjVIhKfa4dVFheoUmUlzDC40e+6NJWii9uD4
|
||||
WbmCR/CLIweZJWgjC+iUckzZaSHYxAxih+4zHpLXYT9+Ap//eyOTnTjjuD1cKhA9KilwlwedFliB
|
||||
1T/oCXZcCbgzhpg3nfe/ztGcSG9fDAD+1e4e2atD3uPzv+IOuqX9tD+ucWZK5+b8p5xGO+Z66q8+
|
||||
X/Ko7EdvVeixV7hxwbFd70lYIktMqNCFmh6wnPa3/avfAAAA//8DAKZ6myqyAwAA
|
||||
H4sIAAAAAAAAA7SST2/TQBDF73yK0Vxy2UQxpknwsRKoESoXFIGEq2i7nthL1ruuZ0xSonx35NhO
|
||||
SrgVcZ0/b35v9x3QZpigKbSYsnLj+a1vytzef1x9e7qp71a/dFR9+sD389Xbp1tUGB5/kJF+Y2JC
|
||||
WTkSGzwqNDVpoQyTaLaIp+/mi3imsAwZOUwwr2QcT27G0tSPYTyNpxEqbFjnhMkBqzqUlawlbMkz
|
||||
JtE0ihVexC+NRaxQgmh3KUXvZ0eFpgjWEGPy/YAl8SBcB0eYoGa2LNpLixm8kG8tHFIPAJCiFKHJ
|
||||
C+EUE+iLfYP20hZT/Bx2IIUWWEKhfxLsaitCHkZ35FyAr6F22QislwAaNtYReF1SBqOi7a93bX8i
|
||||
exkpWIInykACNEwgBYFo3q57uwQmlKX2p4FzbZiapKheAtakOXjr847yi/WGBsRhOTtvQ9icwK3P
|
||||
/zO49Zk1+gSu5XLf8pnq2knltO9MjGH1z+9iWpfGctlJLiELfiTARKD9M1jmhhh2VgqQwjK01681
|
||||
uCK9HfZ31rnX/VcneVRD2Prxv7LWPnt37Q/5Kyhd59cxfZmEAfcUAG6MIeZN49zz6wN7tnCy0btJ
|
||||
/RGPCjfWWy7W3W1MkCVUqND6jPaYTI8Pxze/AQAA//8DAOv7y1VhBAAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7be2da943bd5ce54-SJC
|
||||
- 7c11eaaf0abffade-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -206,7 +290,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 27 Apr 2023 00:12:36 GMT
|
||||
- Tue, 02 May 2023 17:17:26 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
@@ -218,19 +302,390 @@ interactions:
|
||||
openai-organization:
|
||||
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||
openai-processing-ms:
|
||||
- '10505'
|
||||
- '10848'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '90000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '3499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '86485'
|
||||
x-ratelimit-reset-requests:
|
||||
- 17ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 2.342s
|
||||
x-request-id:
|
||||
- f593fff7d415793b53e5124283ad25c9
|
||||
- e0a52aa6b569cb67dbbb8e6098e31231
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||
are write_to_file-GPT, an AI designed to use the write_to_file command to write
|
||||
''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete
|
||||
command to complete the task.\nYour decisions must always be made independently
|
||||
without seeking user assistance. Play to your strengths as an LLM and pursue
|
||||
simple strategies with no legal complications.\n\nGOALS:\n\n1. Use the write_to_file
|
||||
command to write ''Hello World'' into a file named \"hello_world.txt\".\n2.
|
||||
Use the task_complete command to complete the task.\n3. Do not use any other
|
||||
commands.\n\n\nConstraints:\n1. ~4000 word limit for short term memory. Your
|
||||
short term memory is short, so immediately save important information to files.\n2.
|
||||
If you are unsure how you previously did something or want to recall past events,
|
||||
thinking about similar events will help you remember.\n3. No user assistance\n4.
|
||||
Exclusively use the commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1.
|
||||
append_to_file: Append to file, args: \"filename\": \"<filename>\", \"text\":
|
||||
\"<text>\"\n2. delete_file: Delete file, args: \"filename\": \"<filename>\"\n3.
|
||||
list_files: List Files in Directory, args: \"directory\": \"<directory>\"\n4.
|
||||
read_file: Read file, args: \"filename\": \"<filename>\"\n5. write_to_file:
|
||||
Write to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n6.
|
||||
delete_agent: Delete GPT Agent, args: \"key\": \"<key>\"\n7. get_hyperlinks:
|
||||
Get text summary, args: \"url\": \"<url>\"\n8. get_text_summary: Get text summary,
|
||||
args: \"url\": \"<url>\", \"question\": \"<question>\"\n9. list_agents: List
|
||||
GPT Agents, args: () -> str\n10. message_agent: Message GPT Agent, args: \"key\":
|
||||
\"<key>\", \"message\": \"<message>\"\n11. start_agent: Start GPT Agent, args:
|
||||
\"name\": \"<name>\", \"task\": \"<short_task_desc>\", \"prompt\": \"<prompt>\"\n12.
|
||||
task_complete: Task Complete (Shutdown), args: \"reason\": \"<reason>\"\n\nResources:\n1.
|
||||
Internet access for searches and information gathering.\n2. Long Term memory
|
||||
management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File
|
||||
output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your
|
||||
actions to ensure you are performing to the best of your abilities.\n2. Constructively
|
||||
self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions
|
||||
and strategies to refine your approach.\n4. Every command has a cost, so be
|
||||
smart and efficient. Aim to complete tasks in the least number of steps.\n5.
|
||||
Write all code to a file.\n\nYou should only respond in JSON format as described
|
||||
below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\":
|
||||
\"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n-
|
||||
long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\":
|
||||
\"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\":
|
||||
\"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n}
|
||||
\nEnsure the response can be parsed by Python json.loads"}, {"role": "system",
|
||||
"content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role":
|
||||
"user", "content": "Determine which next command to use, and respond using the
|
||||
format specified above:"}], "temperature": 0, "max_tokens": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '3406'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA7yTT4/TQAzF73wKy5de0iqh0O3mthwQPYAEdMUuBFWzE7cZOhmHGUctVPnuKEn/
|
||||
7AZxQnD1c9772eMc0OSYoi6U6LKy46tXP+5vbpZhoT+/u329nCc/39692H58PzUf7q8xQn74RlqO
|
||||
X0w0l5UlMewwQu1JCeWYJrP5NJ7Pr5J5hCXnZDHFTSXj6eTlWGr/wON4GicYYR3UhjA9YOW5rGQl
|
||||
vCUXML2aJRFevM/153EcobAoey5dz5ImQl2w0RQw/XLAksLJ1rMlTFGFYIIoJy0kOyHXDnDIHABA
|
||||
hlJwvSkkZJjCsXgUaC9tMcMFOKIchKEOBFIQ7LwRWgmv1sYSaC5L5bqGToDRG7KW4RN7m4/AOGFQ
|
||||
0LU6VVIOo6LVV7tWn8heRpMMo8fZnlRgZ9ymB1gWBKLCFjx9r42nACX9RVoEHe0f5zChE1VVea68
|
||||
UXKR1uxBirZBhe2QurLK9cBjuP1Pm9LeiNEmlMOnIhdq3zIogcX54TR7T1q6iDah3wTt5YIk3HW2
|
||||
HcOwUJHanoJ2xtp/eRB9cBOdDvVo+tudtg490xOIAbrym+GJ98JpE73FgOOJyfC3eDTTibYjPoJn
|
||||
rsEmwrVxJhSr/qIxxSBcYYTG5bTHNG6+Ns9+AQAA//8DAAaMC/aGBAAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c15d0c86db14665-DFW
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 03 May 2023 04:38:50 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-3.5-turbo-0301
|
||||
openai-organization:
|
||||
- significant-gravitas
|
||||
openai-processing-ms:
|
||||
- '11864'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '90000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '3499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '86496'
|
||||
x-ratelimit-reset-requests:
|
||||
- 17ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 2.335s
|
||||
x-request-id:
|
||||
- 5d9f1c3b077997ea564ea7088eca733f
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||
are write_to_file-GPT, an AI designed to use the write_to_file command to write
|
||||
''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete
|
||||
command to complete the task.\nYour decisions must always be made independently
|
||||
without seeking user assistance. Play to your strengths as an LLM and pursue
|
||||
simple strategies with no legal complications.\n\nGOALS:\n\n1. Use the write_to_file
|
||||
command to write ''Hello World'' into a file named \"hello_world.txt\".\n2.
|
||||
Use the task_complete command to complete the task.\n3. Do not use any other
|
||||
commands.\n\n\nConstraints:\n1. ~4000 word limit for short term memory. Your
|
||||
short term memory is short, so immediately save important information to files.\n2.
|
||||
If you are unsure how you previously did something or want to recall past events,
|
||||
thinking about similar events will help you remember.\n3. No user assistance\n4.
|
||||
Exclusively use the commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1.
|
||||
append_to_file: Append to file, args: \"filename\": \"<filename>\", \"text\":
|
||||
\"<text>\"\n2. delete_file: Delete file, args: \"filename\": \"<filename>\"\n3.
|
||||
list_files: List Files in Directory, args: \"directory\": \"<directory>\"\n4.
|
||||
read_file: Read file, args: \"filename\": \"<filename>\"\n5. write_to_file:
|
||||
Write to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n6.
|
||||
delete_agent: Delete GPT Agent, args: \"key\": \"<key>\"\n7. get_hyperlinks:
|
||||
Get text summary, args: \"url\": \"<url>\"\n8. get_text_summary: Get text summary,
|
||||
args: \"url\": \"<url>\", \"question\": \"<question>\"\n9. list_agents: List
|
||||
GPT Agents, args: () -> str\n10. message_agent: Message GPT Agent, args: \"key\":
|
||||
\"<key>\", \"message\": \"<message>\"\n11. start_agent: Start GPT Agent, args:
|
||||
\"name\": \"<name>\", \"task\": \"<short_task_desc>\", \"prompt\": \"<prompt>\"\n12.
|
||||
task_complete: Task Complete (Shutdown), args: \"reason\": \"<reason>\"\n\nResources:\n1.
|
||||
Internet access for searches and information gathering.\n2. Long Term memory
|
||||
management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File
|
||||
output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your
|
||||
actions to ensure you are performing to the best of your abilities.\n2. Constructively
|
||||
self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions
|
||||
and strategies to refine your approach.\n4. Every command has a cost, so be
|
||||
smart and efficient. Aim to complete tasks in the least number of steps.\n5.
|
||||
Write all code to a file.\n\nYou should only respond in JSON format as described
|
||||
below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\":
|
||||
\"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n-
|
||||
long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\":
|
||||
\"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\":
|
||||
\"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n}
|
||||
\nEnsure the response can be parsed by Python json.loads"}, {"role": "system",
|
||||
"content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role":
|
||||
"system", "content": "This reminds you of these events from your past: \nI was
|
||||
created and nothing new has happened."}, {"role": "user", "content": "Determine
|
||||
which next command to use, and respond using the format specified above:"},
|
||||
{"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\":
|
||||
\"I need to use the write_to_file command to write ''Hello World'' into a file
|
||||
named ''hello_world.txt''.\",\n \"reasoning\": \"The task requires me
|
||||
to write ''Hello World'' into a file named ''hello_world.txt'', and the write_to_file
|
||||
command is the appropriate command for this task.\",\n \"plan\": \"-
|
||||
Use the write_to_file command to write ''Hello World'' into a file named ''hello_world.txt''.\",\n \"criticism\":
|
||||
\"I need to ensure that I use the correct filename and text to write to the
|
||||
file.\",\n \"speak\": \"I will use the write_to_file command to write
|
||||
''Hello World'' into a file named ''hello_world.txt''.\"\n },\n \"command\":
|
||||
{\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\":
|
||||
\"hello_world.txt\",\n \"text\": \"Hello World\"\n }\n }\n}"},
|
||||
{"role": "system", "content": "Command write_to_file returned: File written
|
||||
to successfully."}, {"role": "user", "content": "Determine which next command
|
||||
to use, and respond using the format specified above:"}], "temperature": 0,
|
||||
"max_tokens": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '4653'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA7RTyW7bMBC99ysGc/GFNmQbbVwdiwapgQLd0hZBVBgMNZZYU6RKjrzA0L8XtKQk
|
||||
TReg242YefMWcnhEnWOKqpSsqtqMz54drl5V+Z6W/OaDfHlhzpMNmefnh4vX5i0KdDefSXE/MVGu
|
||||
qg2xdhYFKk+SKcd0+mQxTxaLs/lMYOVyMphiUfN4Pnk85sbfuHEyT6YosAmyIEyPWHtX1bxityEb
|
||||
MJ0ms4XAO/LbxiyZCmTH0txhZ7OnrUBVOq0oYHp9xIrCQOydIUxRhqADS8vRprNMNkY4ZhYAIEMu
|
||||
XVOUHDJMoS/2DdpzLGa4hFJuCUKjFIWwbow5wM5rZrIwekHGOPjovMlHoC07kLDWhsDKinIYlbG/
|
||||
2sX+hPc8msBlSWBpzxCYatAB2EETCLgkYBk2qz48gXJVJW0eAbe1ATXJUNy360kGZ7UtOs+XPQw8
|
||||
fWm0pwAVRZ7om37fNZxslGR/YATeaasIfnVL7E7wqCBOp3+fvzbSdtHH8P6v2ZTXrJUO1bABlug0
|
||||
STY0Ps5JhiXU3m11TiBhK43OoXsEWDs/SGhb/FQk1CQ3g8BOG/Nn19BRtmLY6B7+3ULHp+3UvqF/
|
||||
YEr64uFfuL9g/+NHDBFOMfo0mW2xFbjWVody1WljioFdjQK1zWmPadJ+ah99BQAA//8DAMo52K3G
|
||||
BAAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c15d120ec8f4665-DFW
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 03 May 2023 04:39:04 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-3.5-turbo-0301
|
||||
openai-organization:
|
||||
- significant-gravitas
|
||||
openai-processing-ms:
|
||||
- '12271'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '90000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '3499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '86481'
|
||||
x-ratelimit-reset-requests:
|
||||
- 17ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 2.346s
|
||||
x-request-id:
|
||||
- a8c02e37e5af0c851759aa261f20f701
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||
are write_to_file-GPT, an AI designed to use the write_to_file command to write
|
||||
''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete
|
||||
command to complete the task.\nYour decisions must always be made independently
|
||||
without seeking user assistance. Play to your strengths as an LLM and pursue
|
||||
simple strategies with no legal complications.\n\nGOALS:\n\n1. Use the write_to_file
|
||||
command to write ''Hello World'' into a file named \"hello_world.txt\".\n2.
|
||||
Use the task_complete command to complete the task.\n3. Do not use any other
|
||||
commands.\n\n\nConstraints:\n1. ~4000 word limit for short term memory. Your
|
||||
short term memory is short, so immediately save important information to files.\n2.
|
||||
If you are unsure how you previously did something or want to recall past events,
|
||||
thinking about similar events will help you remember.\n3. No user assistance\n4.
|
||||
Exclusively use the commands listed in double quote e.g. \"command name\"\n\nCommands:\n1.
|
||||
append_to_file: Append to file, args: \"filename\": \"<filename>\", \"text\":
|
||||
\"<text>\"\n2. delete_file: Delete file, args: \"filename\": \"<filename>\"\n3.
|
||||
list_files: List Files in Directory, args: \"directory\": \"<directory>\"\n4.
|
||||
read_file: Read file, args: \"filename\": \"<filename>\"\n5. write_to_file:
|
||||
Write to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n6.
|
||||
delete_agent: Delete GPT Agent, args: \"key\": \"<key>\"\n7. get_hyperlinks:
|
||||
Get text summary, args: \"url\": \"<url>\"\n8. get_text_summary: Get text summary,
|
||||
args: \"url\": \"<url>\", \"question\": \"<question>\"\n9. list_agents: List
|
||||
GPT Agents, args: () -> str\n10. message_agent: Message GPT Agent, args: \"key\":
|
||||
\"<key>\", \"message\": \"<message>\"\n11. start_agent: Start GPT Agent, args:
|
||||
\"name\": \"<name>\", \"task\": \"<short_task_desc>\", \"prompt\": \"<prompt>\"\n12.
|
||||
task_complete: Task Complete (Shutdown), args: \"reason\": \"<reason>\"\n\nResources:\n1.
|
||||
Internet access for searches and information gathering.\n2. Long Term memory
|
||||
management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File
|
||||
output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your
|
||||
actions to ensure you are performing to the best of your abilities.\n2. Constructively
|
||||
self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions
|
||||
and strategies to refine your approach.\n4. Every command has a cost, so be
|
||||
smart and efficient. Aim to complete tasks in the least number of steps.\n5.
|
||||
Write all code to a file.\n\nYou should only respond in JSON format as described
|
||||
below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\":
|
||||
\"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n-
|
||||
long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\":
|
||||
\"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\":
|
||||
\"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n}
|
||||
\nEnsure the response can be parsed by Python json.loads"}, {"role": "system",
|
||||
"content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role":
|
||||
"user", "content": "Determine which next command to use, and respond using the
|
||||
format specified above:"}], "temperature": 0, "max_tokens": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '3405'
|
||||
Content-Type:
|
||||
- application/json
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA7yTT4/TMBDF73yK0Vx6cauUqt1trmhhK8QBBEKIoMrrTBvT2BPsCVu2yndfJemf
|
||||
3SBOCK7zxu/9xh4f0OaYoim0GFeV46vX7u3i183N/Geup/7Lq9vVh/fXEt49PLwpNCrku+9k5Hhi
|
||||
YthVJYlljwpNIC2UYzpdXM+Wy3kySxQ6zqnEFLeVjGeT+VjqcMfjZJZMUWEd9ZYwPWAV2FWyFt6R
|
||||
j5heLRKFF+9z/WUyVSgsujyXlotpo9AUbA1FTL8e0FE82QYuCVPUMdoo2ksLyV7ItwMcMg8AkKEU
|
||||
XG8LiRmmcCweBdpLW8xwBZ4oB2GoI4EUBPfBCq2F1xtbEhh2TvuuoRNgdEtlyfCZQ5mPwHph0NC1
|
||||
eu0oh1HR6uv7Vp/IXkaTDNXT7EA6srd+2wN8LAhExx0E+lHbQBEc/UWago72j3PY2ImOo4CuqsBV
|
||||
sFoINhxAilbVcTdErkrte9oxfPpP12SCFWtsdMN3Ih/r0DJogdX51QyHQEYuEf090F4uTMJda9sy
|
||||
TIsV6d0p6d6W5b9chz64Uac1PZr+tqWtQ8/0DGKArsN2uOC90PZeLAYcz0yGn+LJTCfajvgInvkG
|
||||
G4Ub620s1v0+Y4pRuEKF1ue0xzRpvjUvHgEAAP//AwDSj7qBhAQAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 7c6c3f8bcdd1cf87-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 13 May 2023 16:24:06 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-3.5-turbo-0301
|
||||
openai-organization:
|
||||
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||
openai-processing-ms:
|
||||
- '16269'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '3500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '90000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '3499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '86496'
|
||||
x-ratelimit-reset-requests:
|
||||
- 17ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 2.336s
|
||||
x-request-id:
|
||||
- 8d3e6826e88e77fb2cbce01166ddc550
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
@@ -6,5 +6,5 @@ To know which one, you can run the following command:
|
||||
```bash
|
||||
pytest -s -k tests/integration/goal_oriented
|
||||
|
||||
If the test is successul, it will record new cassettes in VCR. Then you can just push these to your branch and the pipeline
|
||||
If the test is successful, it will record new cassettes in VCR. Then you can just push these to your branch and the pipeline
|
||||
will pass
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user