mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-12 00:28:31 -05:00
Compare commits
48 Commits
docker-upd
...
updated-do
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e94654aa11 | ||
|
|
e9de43b0d2 | ||
|
|
de8add92e7 | ||
|
|
597cbdcc58 | ||
|
|
e3f35d79c7 | ||
|
|
0040495143 | ||
|
|
d3eac86f9a | ||
|
|
c3cb90ac20 | ||
|
|
9b5bf81d7c | ||
|
|
86db4deef9 | ||
|
|
d8f989daf8 | ||
|
|
00f2b134cb | ||
|
|
a3959712dc | ||
|
|
8477b25c5a | ||
|
|
f133c9c1ef | ||
|
|
dc72ec97bc | ||
|
|
0c915cb558 | ||
|
|
f6ab15db47 | ||
|
|
80161decb9 | ||
|
|
0bf8edcd96 | ||
|
|
b1347a92de | ||
|
|
22ce8e0047 | ||
|
|
5a7193cfb7 | ||
|
|
c1f301ab8b | ||
|
|
f32244a112 | ||
|
|
9395706841 | ||
|
|
a98677b79d | ||
|
|
056eb46c0f | ||
|
|
6fde030c37 | ||
|
|
bf1e01d423 | ||
|
|
52c731abd6 | ||
|
|
c8fbce643e | ||
|
|
6c001bd595 | ||
|
|
f5b89672f8 | ||
|
|
76480ffa03 | ||
|
|
ab60a57379 | ||
|
|
1d9b01fc77 | ||
|
|
e81d9f9f0b | ||
|
|
0d5d0270ea | ||
|
|
bd25f9223c | ||
|
|
07305b55ff | ||
|
|
cdfe3e5fbc | ||
|
|
e992cdf8c2 | ||
|
|
ebd2ecd84c | ||
|
|
0b919522ae | ||
|
|
ef691359b7 | ||
|
|
f8815c3053 | ||
|
|
a60ed21404 |
@@ -37,3 +37,4 @@ rnd/autogpt_builder/.env.local
|
||||
rnd/autogpt_server/.env
|
||||
rnd/autogpt_server/.venv/
|
||||
|
||||
rnd/market/.env
|
||||
|
||||
30
.github/PULL_REQUEST_TEMPLATE.md
vendored
30
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -6,26 +6,18 @@
|
||||
|
||||
<!-- Concisely describe all of the changes made in this pull request: -->
|
||||
|
||||
### PR Quality Scorecard ✨
|
||||
|
||||
### Testing 🔍
|
||||
> [!NOTE]
|
||||
Only for the new autogpt platform, currently in rnd/
|
||||
|
||||
<!--
|
||||
Check out our contribution guide:
|
||||
https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing
|
||||
|
||||
1. Avoid duplicate work, issues, PRs etc.
|
||||
2. Also consider contributing something other than code; see the [contribution guide]
|
||||
for options.
|
||||
3. Clearly explain your changes.
|
||||
4. Avoid making unnecessary changes, especially if they're purely based on personal
|
||||
preferences. Doing so is the maintainers' job. ;-)
|
||||
Please make sure your changes have been tested and are in good working condition.
|
||||
Here is a list of our critical paths, if you need some inspiration on what and how to test:
|
||||
-->
|
||||
|
||||
- [x] Have you used the PR description template?   `+2 pts`
|
||||
- [ ] Is your pull request atomic, focusing on a single change?   `+5 pts`
|
||||
- [ ] Have you linked the GitHub issue(s) that this PR addresses?   `+5 pts`
|
||||
- [ ] Have you documented your changes clearly and comprehensively?   `+5 pts`
|
||||
- [ ] Have you changed or added a feature?   `-4 pts`
|
||||
- [ ] Have you added/updated corresponding documentation?   `+4 pts`
|
||||
- [ ] Have you added/updated corresponding integration tests?   `+5 pts`
|
||||
- [ ] Have you changed the behavior of AutoGPT?   `-5 pts`
|
||||
- [ ] Have you also run `agbenchmark` to verify that these changes do not regress performance?   `+10 pts`
|
||||
- Create from scratch and execute an agent with at least 3 blocks
|
||||
- Import an agent from file upload, and confirm it executes correctly
|
||||
- Upload agent to marketplace
|
||||
- Import an agent from marketplace and confirm it executes correctly
|
||||
- Edit an agent from monitor, and confirm it executes correctly
|
||||
|
||||
41
.github/workflows/autogpt-server-docker.yml
vendored
Normal file
41
.github/workflows/autogpt-server-docker.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: AutoGPT Server Docker Build & Push
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ updated-docker-ci ]
|
||||
paths:
|
||||
- '**'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: AutoGPT
|
||||
|
||||
env:
|
||||
PROJECT_ID: agpt-dev
|
||||
IMAGE_NAME: agpt-server-dev
|
||||
REGION: us-central1
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Cloud SDK
|
||||
uses: google-github-actions/setup-gcloud@v0.2.1
|
||||
with:
|
||||
project_id: ${{ env.PROJECT_ID }}
|
||||
service_account_key: ${{ secrets.GCP_SA_KEY }}
|
||||
export_default_credentials: true
|
||||
|
||||
- name: Configure Docker
|
||||
run: gcloud auth configure-docker ${{ env.REGION }}-docker.pkg.dev
|
||||
|
||||
- name: Build Docker image
|
||||
run: docker build -t ${{ env.REGION }}-docker.pkg.dev/${{ env.PROJECT_ID }}/${{ env.IMAGE_NAME }}:${{ github.sha }} -f rnd/autogpt_server/Dockerfile .
|
||||
|
||||
- name: Push Docker image
|
||||
run: docker push ${{ env.REGION }}-docker.pkg.dev/${{ env.PROJECT_ID }}/${{ env.IMAGE_NAME }}:${{ github.sha }}
|
||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -1,3 +1,6 @@
|
||||
[submodule "forge/tests/vcr_cassettes"]
|
||||
path = forge/tests/vcr_cassettes
|
||||
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
|
||||
[submodule "rnd/supabase"]
|
||||
path = rnd/supabase
|
||||
url = https://github.com/supabase/supabase.git
|
||||
|
||||
@@ -185,7 +185,7 @@ If you don't know which to choose, you can safely go with OpenAI*.
|
||||
1. Get your Groq API key from [Settings > API keys][groq/api-keys]
|
||||
2. Open `.env`
|
||||
3. Find the line that says `GROQ_API_KEY=`
|
||||
4. Insert your Anthropic API Key directly after = without quotes or spaces:
|
||||
4. Insert your Groq API Key directly after = without quotes or spaces:
|
||||
```ini
|
||||
GROQ_API_KEY=gsk_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
```
|
||||
|
||||
@@ -69,6 +69,8 @@ Lets the agent execute non-interactive Shell commands and Python code. Python ex
|
||||
| `shell_denylist` | List of prohibited shell commands | `List[str]` | `[]` |
|
||||
| `docker_container_name` | Name of the Docker container used for code execution | `str` | `"agent_sandbox"` |
|
||||
|
||||
All shell command configurations are expected to be for convience only. This component is not secure and should not be used in production environments. It is recommended to use more appropriate sandboxing.
|
||||
|
||||
### CommandProvider
|
||||
|
||||
- `execute_shell` execute shell command
|
||||
|
||||
@@ -73,6 +73,7 @@ Once you have installed Yarn and Poetry, you can run the following command to in
|
||||
|
||||
```bash
|
||||
cd rnd/autogpt_server
|
||||
cp .env.example .env
|
||||
poetry install
|
||||
```
|
||||
|
||||
@@ -90,7 +91,7 @@ Once you have installed the dependencies, you can proceed to the next step.
|
||||
In order to setup the database, you need to run the following commands, in the same terminal you ran the `poetry install` command:
|
||||
|
||||
```sh
|
||||
docker compose up postgres -d
|
||||
docker compose up postgres redis -d
|
||||
poetry run prisma migrate dev
|
||||
```
|
||||
After deploying the migration, to ensure that the database schema is correctly mapped to your codebase, allowing the application to interact with the database properly, you need to generate the Prisma database model:
|
||||
@@ -101,16 +102,17 @@ poetry run prisma generate
|
||||
|
||||
Without running this command, the necessary Python modules (prisma.models) won't be available, leading to a `ModuleNotFoundError`.
|
||||
|
||||
### Running the server
|
||||
### Running the server
|
||||
|
||||
To run the server, you can run the following commands in the same terminal you ran the `poetry install` command:
|
||||
|
||||
```bash
|
||||
docker compose build
|
||||
docker compose up
|
||||
cp supabase/docker/.env.example .env
|
||||
docker compose -f docker-compose.combined.yml build
|
||||
docker compose -f docker-compose.combined.yml up -d
|
||||
```
|
||||
|
||||
In the other terminal, you can run the following command to start the frontend:
|
||||
In the other terminal from autogpt_builder, you can run the following command to start the frontend:
|
||||
|
||||
```bash
|
||||
yarn dev
|
||||
@@ -119,3 +121,10 @@ yarn dev
|
||||
### Checking if the server is running
|
||||
|
||||
You can check if the server is running by visiting [http://localhost:3000](http://localhost:3000) in your browser.
|
||||
|
||||
### Notes:
|
||||
By default the daemons for different services run on the following ports:
|
||||
|
||||
Execution Manager Daemon: 8002
|
||||
Execution Scheduler Daemon: 8003
|
||||
Rest Server Daemon: 8004
|
||||
|
||||
157
rnd/README.md
157
rnd/README.md
@@ -1,36 +1,133 @@
|
||||
This is a guide to setting up and running the AutoGPT Server and Builder. This tutorial will cover downloading the necessary files, setting up the server, and testing the system.
|
||||
# AutoGPT Platform
|
||||
|
||||
https://github.com/user-attachments/assets/fd0d0f35-3155-4263-b575-ba3efb126cb4
|
||||
Welcome to the AutoGPT Platform - a powerful system for creating and running AI agents to solve business problems. This platform enables you to harness the power of artificial intelligence to automate tasks, analyze data, and generate insights for your organization.
|
||||
|
||||
1. Navigate to the AutoGPT GitHub repository.
|
||||
2. Click the "Code" button, then select "Download ZIP".
|
||||
3. Once downloaded, extract the ZIP file to a folder of your choice.
|
||||
## Getting Started
|
||||
|
||||
4. Open the extracted folder and navigate to the "rnd" directory.
|
||||
5. Enter the "AutoGPT server" folder.
|
||||
6. Open a terminal window in this directory.
|
||||
7. Locate and open the README file in the AutoGPT server folder: [doc](./autogpt_server/README.md#setup).
|
||||
8. Copy and paste each command from the setup section in the README into your terminal.
|
||||
- Important: Wait for each command to finish before running the next one.
|
||||
9. If all commands run without errors, enter the final command: `poetry run app`
|
||||
10. You should now see the server running in your terminal.
|
||||
### Prerequisites
|
||||
|
||||
- Docker
|
||||
- Docker Compose V2 (comes with Docker Desktop, or can be installed separately)
|
||||
|
||||
### Running the System
|
||||
|
||||
To run the AutoGPT Platform, follow these steps:
|
||||
|
||||
1. Clone this repository to your local machine.
|
||||
2. Navigate to rnd/supabase
|
||||
3. Run the following command:
|
||||
```
|
||||
git submodule update --init --recursive
|
||||
```
|
||||
4. Navigate back to rnd (cd ..)
|
||||
5. Run the following command:
|
||||
```
|
||||
cp supabase/docker/.env.example .env
|
||||
```
|
||||
6. Run the following command:
|
||||
|
||||
```
|
||||
docker compose -f docker-compose.combined.yml up -d
|
||||
|
||||
```
|
||||
|
||||
This command will start all the necessary backend services defined in the `docker-compose.combined.yml` file in detached mode.
|
||||
7. Navigate to rnd/autogpt_builder.
|
||||
8. Run the following command:
|
||||
```
|
||||
cp .env.example .env.local
|
||||
```
|
||||
9. Run the following command:
|
||||
```
|
||||
yarn dev
|
||||
```
|
||||
|
||||
### Docker Compose Commands
|
||||
|
||||
Here are some useful Docker Compose commands for managing your AutoGPT Platform:
|
||||
|
||||
- `docker compose -f docker-compose.combined.yml up -d`: Start the services in detached mode.
|
||||
- `docker compose -f docker-compose.combined.yml stop`: Stop the running services without removing them.
|
||||
- `docker compose rm`: Remove stopped service containers.
|
||||
- `docker compose build`: Build or rebuild services.
|
||||
- `docker compose down`: Stop and remove containers, networks, and volumes.
|
||||
- `docker compose watch`: Watch for changes in your services and automatically update them.
|
||||
|
||||
|
||||
### Sample Scenarios
|
||||
|
||||
Here are some common scenarios where you might use multiple Docker Compose commands:
|
||||
|
||||
1. Updating and restarting a specific service:
|
||||
```
|
||||
docker compose build api_srv
|
||||
docker compose up -d --no-deps api_srv
|
||||
```
|
||||
This rebuilds the `api_srv` service and restarts it without affecting other services.
|
||||
|
||||
2. Viewing logs for troubleshooting:
|
||||
```
|
||||
docker compose logs -f api_srv ws_srv
|
||||
```
|
||||
This shows and follows the logs for both `api_srv` and `ws_srv` services.
|
||||
|
||||
3. Scaling a service for increased load:
|
||||
```
|
||||
docker compose up -d --scale executor=3
|
||||
```
|
||||
This scales the `executor` service to 3 instances to handle increased load.
|
||||
|
||||
4. Stopping the entire system for maintenance:
|
||||
```
|
||||
docker compose stop
|
||||
docker compose rm -f
|
||||
docker compose pull
|
||||
docker compose up -d
|
||||
```
|
||||
This stops all services, removes containers, pulls the latest images, and restarts the system.
|
||||
|
||||
5. Developing with live updates:
|
||||
```
|
||||
docker compose watch
|
||||
```
|
||||
This watches for changes in your code and automatically updates the relevant services.
|
||||
|
||||
6. Checking the status of services:
|
||||
```
|
||||
docker compose ps
|
||||
```
|
||||
This shows the current status of all services defined in your docker-compose.yml file.
|
||||
|
||||
These scenarios demonstrate how to use Docker Compose commands in combination to manage your AutoGPT Platform effectively.
|
||||
|
||||
|
||||
### Persisting Data
|
||||
|
||||
To persist data for PostgreSQL and Redis, you can modify the `docker-compose.yml` file to add volumes. Here's how:
|
||||
|
||||
1. Open the `docker-compose.yml` file in a text editor.
|
||||
2. Add volume configurations for PostgreSQL and Redis services:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
postgres:
|
||||
# ... other configurations ...
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
|
||||
redis:
|
||||
# ... other configurations ...
|
||||
volumes:
|
||||
- redis_data:/data
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
redis_data:
|
||||
```
|
||||
|
||||
3. Save the file and run `docker compose up -d` to apply the changes.
|
||||
|
||||
This configuration will create named volumes for PostgreSQL and Redis, ensuring that your data persists across container restarts.
|
||||
|
||||
11. Navigate back to the "rnd" folder.
|
||||
12. Open the "AutoGPT builder" folder.
|
||||
13. Open the README file in this folder: [doc](./autogpt_builder/README.md#getting-started).
|
||||
14. In your terminal, run the following commands:
|
||||
```
|
||||
npm install
|
||||
```
|
||||
```
|
||||
npm run dev
|
||||
```
|
||||
|
||||
15. Once the front-end is running, click the link to navigate to `localhost:3000`.
|
||||
16. Click on the "Build" option.
|
||||
17. Add a few blocks to test the functionality.
|
||||
18. Connect the blocks together.
|
||||
19. Click "Run".
|
||||
20. Check your terminal window - you should see that the server has received the request, is processing it, and has executed it.
|
||||
|
||||
And there you have it! You've successfully set up and tested AutoGPT.
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
NEXT_PUBLIC_AGPT_SERVER_URL=http://localhost:8000/api
|
||||
NEXT_PUBLIC_AUTH_CALLBACK_URL=http://localhost:8006/auth/callback
|
||||
NEXT_PUBLIC_AGPT_SERVER_URL=http://localhost:8006/api
|
||||
NEXT_PUBLIC_AGPT_WS_SERVER_URL=ws://localhost:8001/ws
|
||||
NEXT_PUBLIC_AGPT_MARKETPLACE_URL=http://localhost:8001/api/v1/market
|
||||
NEXT_PUBLIC_AGPT_MARKETPLACE_URL=http://localhost:8015/api/v1/market
|
||||
|
||||
## Supabase credentials
|
||||
## YOU ONLY NEED THEM IF YOU WANT TO USE SUPABASE USER AUTHENTICATION
|
||||
## If you're using self-hosted version then you most likely don't need to set this
|
||||
# NEXT_PUBLIC_SUPABASE_URL=your-project-url
|
||||
# NEXT_PUBLIC_SUPABASE_ANON_KEY=your-anon-key
|
||||
|
||||
NEXT_PUBLIC_SUPABASE_URL=http://localhost:8000
|
||||
NEXT_PUBLIC_SUPABASE_ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
|
||||
|
||||
## OAuth Callback URL
|
||||
## This should be {domain}/auth/callback
|
||||
|
||||
3
rnd/autogpt_builder/.gitignore
vendored
3
rnd/autogpt_builder/.gitignore
vendored
@@ -34,3 +34,6 @@ yarn-error.log*
|
||||
# typescript
|
||||
*.tsbuildinfo
|
||||
next-env.d.ts
|
||||
|
||||
# Sentry Config File
|
||||
.env.sentry-build-plugin
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
# Base stage for both dev and prod
|
||||
FROM node:21-alpine AS base
|
||||
WORKDIR /app
|
||||
COPY autogpt_builder/package.json autogpt_builder/yarn.lock ./
|
||||
COPY rnd/autogpt_builder/package.json rnd/autogpt_builder/yarn.lock ./
|
||||
RUN yarn install --frozen-lockfile
|
||||
|
||||
# Dev stage
|
||||
FROM base AS dev
|
||||
ENV NODE_ENV=development
|
||||
COPY autogpt_builder/ .
|
||||
COPY rnd/autogpt_builder/ .
|
||||
EXPOSE 3000
|
||||
CMD ["npm", "run", "dev"]
|
||||
CMD ["yarn", "run", "dev"]
|
||||
|
||||
# Build stage for prod
|
||||
FROM base AS build
|
||||
COPY autogpt_builder/ .
|
||||
COPY rnd/autogpt_builder/ .
|
||||
RUN npm run build
|
||||
|
||||
# Prod stage
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { withSentryConfig } from "@sentry/nextjs";
|
||||
import dotenv from "dotenv";
|
||||
|
||||
// Load environment variables
|
||||
@@ -28,4 +29,56 @@ const nextConfig = {
|
||||
},
|
||||
};
|
||||
|
||||
export default nextConfig;
|
||||
export default withSentryConfig(nextConfig, {
|
||||
// For all available options, see:
|
||||
// https://github.com/getsentry/sentry-webpack-plugin#options
|
||||
|
||||
org: "significant-gravitas",
|
||||
project: "builder",
|
||||
|
||||
// Only print logs for uploading source maps in CI
|
||||
silent: !process.env.CI,
|
||||
|
||||
// For all available options, see:
|
||||
// https://docs.sentry.io/platforms/javascript/guides/nextjs/manual-setup/
|
||||
|
||||
// Upload a larger set of source maps for prettier stack traces (increases build time)
|
||||
widenClientFileUpload: true,
|
||||
|
||||
// Automatically annotate React components to show their full name in breadcrumbs and session replay
|
||||
reactComponentAnnotation: {
|
||||
enabled: true,
|
||||
},
|
||||
|
||||
// Route browser requests to Sentry through a Next.js rewrite to circumvent ad-blockers.
|
||||
// This can increase your server load as well as your hosting bill.
|
||||
// Note: Check that the configured route will not match with your Next.js middleware, otherwise reporting of client-
|
||||
// side errors will fail.
|
||||
tunnelRoute: "/monitoring",
|
||||
|
||||
// Hides source maps from generated client bundles
|
||||
hideSourceMaps: true,
|
||||
|
||||
// Automatically tree-shake Sentry logger statements to reduce bundle size
|
||||
disableLogger: true,
|
||||
|
||||
// Enables automatic instrumentation of Vercel Cron Monitors. (Does not yet work with App Router route handlers.)
|
||||
// See the following for more information:
|
||||
// https://docs.sentry.io/product/crons/
|
||||
// https://vercel.com/docs/cron-jobs
|
||||
automaticVercelMonitors: true,
|
||||
|
||||
async headers() {
|
||||
return [
|
||||
{
|
||||
source: "/:path*",
|
||||
headers: [
|
||||
{
|
||||
key: "Document-Policy",
|
||||
value: "js-profiling",
|
||||
},
|
||||
],
|
||||
},
|
||||
];
|
||||
},
|
||||
});
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
"@radix-ui/react-switch": "^1.1.0",
|
||||
"@radix-ui/react-toast": "^1.2.1",
|
||||
"@radix-ui/react-tooltip": "^1.1.2",
|
||||
"@sentry/nextjs": "^8",
|
||||
"@supabase/ssr": "^0.4.0",
|
||||
"@supabase/supabase-js": "^2.45.0",
|
||||
"@tanstack/react-table": "^8.20.5",
|
||||
|
||||
57
rnd/autogpt_builder/sentry.client.config.ts
Normal file
57
rnd/autogpt_builder/sentry.client.config.ts
Normal file
@@ -0,0 +1,57 @@
|
||||
// This file configures the initialization of Sentry on the client.
|
||||
// The config you add here will be used whenever a users loads a page in their browser.
|
||||
// https://docs.sentry.io/platforms/javascript/guides/nextjs/
|
||||
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
|
||||
Sentry.init({
|
||||
dsn: "https://fe4e4aa4a283391808a5da396da20159@o4505260022104064.ingest.us.sentry.io/4507946746380288",
|
||||
|
||||
// Add optional integrations for additional features
|
||||
integrations: [
|
||||
Sentry.replayIntegration(),
|
||||
Sentry.httpClientIntegration(),
|
||||
Sentry.replayCanvasIntegration(),
|
||||
Sentry.reportingObserverIntegration(),
|
||||
Sentry.browserProfilingIntegration(),
|
||||
// Sentry.feedbackIntegration({
|
||||
// // Additional SDK configuration goes in here, for example:
|
||||
// colorScheme: "system",
|
||||
// }),
|
||||
],
|
||||
|
||||
// Define how likely traces are sampled. Adjust this value in production, or use tracesSampler for greater control.
|
||||
tracesSampleRate: 1,
|
||||
|
||||
// Set `tracePropagationTargets` to control for which URLs trace propagation should be enabled
|
||||
tracePropagationTargets: [
|
||||
"localhost",
|
||||
/^https:\/\/dev\-builder\.agpt\.co\/api/,
|
||||
],
|
||||
|
||||
beforeSend(event, hint) {
|
||||
// Check if it is an exception, and if so, show the report dialog
|
||||
if (event.exception && event.event_id) {
|
||||
Sentry.showReportDialog({ eventId: event.event_id });
|
||||
}
|
||||
return event;
|
||||
},
|
||||
|
||||
// Define how likely Replay events are sampled.
|
||||
// This sets the sample rate to be 10%. You may want this to be 100% while
|
||||
// in development and sample at a lower rate in production
|
||||
replaysSessionSampleRate: 0.1,
|
||||
|
||||
// Define how likely Replay events are sampled when an error occurs.
|
||||
replaysOnErrorSampleRate: 1.0,
|
||||
|
||||
// Setting this option to true will print useful information to the console while you're setting up Sentry.
|
||||
debug: false,
|
||||
|
||||
// Set profilesSampleRate to 1.0 to profile every transaction.
|
||||
// Since profilesSampleRate is relative to tracesSampleRate,
|
||||
// the final profiling rate can be computed as tracesSampleRate * profilesSampleRate
|
||||
// For example, a tracesSampleRate of 0.5 and profilesSampleRate of 0.5 would
|
||||
// result in 25% of transactions being profiled (0.5*0.5=0.25)
|
||||
profilesSampleRate: 1.0,
|
||||
});
|
||||
16
rnd/autogpt_builder/sentry.edge.config.ts
Normal file
16
rnd/autogpt_builder/sentry.edge.config.ts
Normal file
@@ -0,0 +1,16 @@
|
||||
// This file configures the initialization of Sentry for edge features (middleware, edge routes, and so on).
|
||||
// The config you add here will be used whenever one of the edge features is loaded.
|
||||
// Note that this config is unrelated to the Vercel Edge Runtime and is also required when running locally.
|
||||
// https://docs.sentry.io/platforms/javascript/guides/nextjs/
|
||||
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
|
||||
Sentry.init({
|
||||
dsn: "https://fe4e4aa4a283391808a5da396da20159@o4505260022104064.ingest.us.sentry.io/4507946746380288",
|
||||
|
||||
// Define how likely traces are sampled. Adjust this value in production, or use tracesSampler for greater control.
|
||||
tracesSampleRate: 1,
|
||||
|
||||
// Setting this option to true will print useful information to the console while you're setting up Sentry.
|
||||
debug: false,
|
||||
});
|
||||
23
rnd/autogpt_builder/sentry.server.config.ts
Normal file
23
rnd/autogpt_builder/sentry.server.config.ts
Normal file
@@ -0,0 +1,23 @@
|
||||
// This file configures the initialization of Sentry on the server.
|
||||
// The config you add here will be used whenever the server handles a request.
|
||||
// https://docs.sentry.io/platforms/javascript/guides/nextjs/
|
||||
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
// import { NodeProfilingIntegration } from "@sentry/profiling-node";
|
||||
|
||||
Sentry.init({
|
||||
dsn: "https://fe4e4aa4a283391808a5da396da20159@o4505260022104064.ingest.us.sentry.io/4507946746380288",
|
||||
|
||||
// Define how likely traces are sampled. Adjust this value in production, or use tracesSampler for greater control.
|
||||
tracesSampleRate: 1,
|
||||
|
||||
// Setting this option to true will print useful information to the console while you're setting up Sentry.
|
||||
debug: false,
|
||||
|
||||
// Integrations
|
||||
integrations: [
|
||||
Sentry.anrIntegration(),
|
||||
// NodeProfilingIntegration,
|
||||
// Sentry.fsIntegration(),
|
||||
],
|
||||
});
|
||||
27
rnd/autogpt_builder/src/app/global-error.tsx
Normal file
27
rnd/autogpt_builder/src/app/global-error.tsx
Normal file
@@ -0,0 +1,27 @@
|
||||
"use client";
|
||||
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
import NextError from "next/error";
|
||||
import { useEffect } from "react";
|
||||
|
||||
export default function GlobalError({
|
||||
error,
|
||||
}: {
|
||||
error: Error & { digest?: string };
|
||||
}) {
|
||||
useEffect(() => {
|
||||
Sentry.captureException(error);
|
||||
}, [error]);
|
||||
|
||||
return (
|
||||
<html>
|
||||
<body>
|
||||
{/* `NextError` is the default Next.js error page component. Its type
|
||||
definition requires a `statusCode` prop. However, since the App Router
|
||||
does not expose status codes for errors, we simply pass 0 to render a
|
||||
generic error message. */}
|
||||
<NextError statusCode={0} />
|
||||
</body>
|
||||
</html>
|
||||
);
|
||||
}
|
||||
@@ -3,6 +3,7 @@ import { revalidatePath } from "next/cache";
|
||||
import { redirect } from "next/navigation";
|
||||
import { createServerClient } from "@/lib/supabase/server";
|
||||
import { z } from "zod";
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
|
||||
const loginFormSchema = z.object({
|
||||
email: z.string().email().min(2).max(64),
|
||||
@@ -10,45 +11,54 @@ const loginFormSchema = z.object({
|
||||
});
|
||||
|
||||
export async function login(values: z.infer<typeof loginFormSchema>) {
|
||||
const supabase = createServerClient();
|
||||
return await Sentry.withServerActionInstrumentation("login", {}, async () => {
|
||||
const supabase = createServerClient();
|
||||
|
||||
if (!supabase) {
|
||||
redirect("/error");
|
||||
}
|
||||
if (!supabase) {
|
||||
redirect("/error");
|
||||
}
|
||||
|
||||
// We are sure that the values are of the correct type because zod validates the form
|
||||
const { data, error } = await supabase.auth.signInWithPassword(values);
|
||||
// We are sure that the values are of the correct type because zod validates the form
|
||||
const { data, error } = await supabase.auth.signInWithPassword(values);
|
||||
|
||||
if (error) {
|
||||
return error.message;
|
||||
}
|
||||
if (error) {
|
||||
return error.message;
|
||||
}
|
||||
|
||||
if (data.session) {
|
||||
await supabase.auth.setSession(data.session);
|
||||
}
|
||||
if (data.session) {
|
||||
await supabase.auth.setSession(data.session);
|
||||
}
|
||||
|
||||
revalidatePath("/", "layout");
|
||||
redirect("/profile");
|
||||
revalidatePath("/", "layout");
|
||||
redirect("/profile");
|
||||
});
|
||||
}
|
||||
|
||||
export async function signup(values: z.infer<typeof loginFormSchema>) {
|
||||
const supabase = createServerClient();
|
||||
"use server";
|
||||
return await Sentry.withServerActionInstrumentation(
|
||||
"signup",
|
||||
{},
|
||||
async () => {
|
||||
const supabase = createServerClient();
|
||||
|
||||
if (!supabase) {
|
||||
redirect("/error");
|
||||
}
|
||||
if (!supabase) {
|
||||
redirect("/error");
|
||||
}
|
||||
|
||||
// We are sure that the values are of the correct type because zod validates the form
|
||||
const { data, error } = await supabase.auth.signUp(values);
|
||||
// We are sure that the values are of the correct type because zod validates the form
|
||||
const { data, error } = await supabase.auth.signUp(values);
|
||||
|
||||
if (error) {
|
||||
return error.message;
|
||||
}
|
||||
if (error) {
|
||||
return error.message;
|
||||
}
|
||||
|
||||
if (data.session) {
|
||||
await supabase.auth.setSession(data.session);
|
||||
}
|
||||
if (data.session) {
|
||||
await supabase.auth.setSession(data.session);
|
||||
}
|
||||
|
||||
revalidatePath("/", "layout");
|
||||
redirect("/profile");
|
||||
revalidatePath("/", "layout");
|
||||
redirect("/profile");
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import AgentDetailContent from "@/components/marketplace/AgentDetailContent";
|
||||
async function getAgentDetails(id: string): Promise<AgentDetailResponse> {
|
||||
const apiUrl =
|
||||
process.env.NEXT_PUBLIC_AGPT_MARKETPLACE_URL ||
|
||||
"http://localhost:8001/api/v1/market";
|
||||
"http://localhost:8015/api/v1/market";
|
||||
const api = new MarketplaceAPI(apiUrl);
|
||||
try {
|
||||
console.log(`Fetching agent details for id: ${id}`);
|
||||
|
||||
@@ -185,7 +185,7 @@ const Pagination: React.FC<{
|
||||
const Marketplace: React.FC = () => {
|
||||
const apiUrl =
|
||||
process.env.NEXT_PUBLIC_AGPT_MARKETPLACE_URL ||
|
||||
"http://localhost:8001/api/v1/market";
|
||||
"http://localhost:8015/api/v1/market";
|
||||
const api = useMemo(() => new MarketplaceAPI(apiUrl), [apiUrl]);
|
||||
|
||||
const [searchValue, setSearchValue] = useState("");
|
||||
|
||||
32
rnd/autogpt_builder/src/components/CreditButton.tsx
Normal file
32
rnd/autogpt_builder/src/components/CreditButton.tsx
Normal file
@@ -0,0 +1,32 @@
|
||||
"use client";
|
||||
|
||||
import { useState, useEffect } from "react";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { IconRefresh } from "@/components/ui/icons";
|
||||
import AutoGPTServerAPI from "@/lib/autogpt-server-api";
|
||||
|
||||
export default function CreditButton() {
|
||||
const [credit, setCredit] = useState<number | null>(null);
|
||||
const api = new AutoGPTServerAPI();
|
||||
|
||||
const fetchCredit = async () => {
|
||||
const response = await api.getUserCredit();
|
||||
setCredit(response.credits);
|
||||
};
|
||||
useEffect(() => {
|
||||
fetchCredit();
|
||||
}, [api]);
|
||||
|
||||
return (
|
||||
credit !== null && (
|
||||
<Button
|
||||
onClick={fetchCredit}
|
||||
variant="outline"
|
||||
className="flex items-center space-x-2 text-muted-foreground"
|
||||
>
|
||||
<span>Credits: {credit}</span>
|
||||
<IconRefresh />
|
||||
</Button>
|
||||
)
|
||||
);
|
||||
}
|
||||
@@ -12,8 +12,11 @@ import InputModalComponent from "./InputModalComponent";
|
||||
import OutputModalComponent from "./OutputModalComponent";
|
||||
import {
|
||||
BlockIORootSchema,
|
||||
BlockIOStringSubSchema,
|
||||
Category,
|
||||
NodeExecutionResult,
|
||||
BlockUIType,
|
||||
BlockCost,
|
||||
} from "@/lib/autogpt-server-api/types";
|
||||
import { beautifyString, cn, setNestedProperty } from "@/lib/utils";
|
||||
import { Button } from "@/components/ui/button";
|
||||
@@ -21,7 +24,10 @@ import { Switch } from "@/components/ui/switch";
|
||||
import { Copy, Trash2 } from "lucide-react";
|
||||
import { history } from "./history";
|
||||
import NodeHandle from "./NodeHandle";
|
||||
import { NodeGenericInputField } from "./node-input-components";
|
||||
import {
|
||||
NodeGenericInputField,
|
||||
NodeTextBoxInput,
|
||||
} from "./node-input-components";
|
||||
import SchemaTooltip from "./SchemaTooltip";
|
||||
import { getPrimaryCategoryColor } from "@/lib/utils";
|
||||
import { FlowContext } from "./Flow";
|
||||
@@ -40,6 +46,7 @@ export type ConnectionData = Array<{
|
||||
|
||||
export type CustomNodeData = {
|
||||
blockType: string;
|
||||
blockCosts: BlockCost[];
|
||||
title: string;
|
||||
description: string;
|
||||
categories: Category[];
|
||||
@@ -59,6 +66,7 @@ export type CustomNodeData = {
|
||||
backend_id?: string;
|
||||
errors?: { [key: string]: string };
|
||||
isOutputStatic?: boolean;
|
||||
uiType: BlockUIType;
|
||||
};
|
||||
|
||||
export type CustomNode = Node<CustomNodeData, "custom">;
|
||||
@@ -118,8 +126,16 @@ export function CustomNode({ data, id, width, height }: NodeProps<CustomNode>) {
|
||||
setIsAdvancedOpen(checked);
|
||||
};
|
||||
|
||||
const generateOutputHandles = (schema: BlockIORootSchema) => {
|
||||
if (!schema?.properties) return null;
|
||||
const generateOutputHandles = (
|
||||
schema: BlockIORootSchema,
|
||||
nodeType: BlockUIType,
|
||||
) => {
|
||||
if (
|
||||
!schema?.properties ||
|
||||
nodeType === BlockUIType.OUTPUT ||
|
||||
nodeType === BlockUIType.NOTE
|
||||
)
|
||||
return null;
|
||||
const keys = Object.keys(schema.properties);
|
||||
return keys.map((key) => (
|
||||
<div key={key}>
|
||||
@@ -133,6 +149,137 @@ export function CustomNode({ data, id, width, height }: NodeProps<CustomNode>) {
|
||||
));
|
||||
};
|
||||
|
||||
const generateInputHandles = (
|
||||
schema: BlockIORootSchema,
|
||||
nodeType: BlockUIType,
|
||||
) => {
|
||||
if (!schema?.properties) return null;
|
||||
let keys = Object.entries(schema.properties);
|
||||
switch (nodeType) {
|
||||
case BlockUIType.INPUT:
|
||||
// For INPUT blocks, dont include connection handles
|
||||
return keys.map(([propKey, propSchema]) => {
|
||||
const isRequired = data.inputSchema.required?.includes(propKey);
|
||||
const isConnected = isHandleConnected(propKey);
|
||||
const isAdvanced = propSchema.advanced;
|
||||
return (
|
||||
(isRequired || isAdvancedOpen || !isAdvanced) && (
|
||||
<div key={propKey}>
|
||||
<span className="text-m green -mb-1 text-gray-900">
|
||||
{propSchema.title || beautifyString(propKey)}
|
||||
</span>
|
||||
<div key={propKey} onMouseOver={() => {}}>
|
||||
{!isConnected && (
|
||||
<NodeGenericInputField
|
||||
className="mb-2 mt-1"
|
||||
propKey={propKey}
|
||||
propSchema={propSchema}
|
||||
currentValue={getValue(propKey)}
|
||||
connections={data.connections}
|
||||
handleInputChange={handleInputChange}
|
||||
handleInputClick={handleInputClick}
|
||||
errors={data.errors ?? {}}
|
||||
displayName={propSchema.title || beautifyString(propKey)}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
);
|
||||
});
|
||||
|
||||
case BlockUIType.NOTE:
|
||||
// For NOTE blocks, don't render any input handles
|
||||
const [noteKey, noteSchema] = keys[0];
|
||||
return (
|
||||
<div key={noteKey}>
|
||||
<NodeTextBoxInput
|
||||
className=""
|
||||
selfKey={noteKey}
|
||||
schema={noteSchema as BlockIOStringSubSchema}
|
||||
value={getValue(noteKey)}
|
||||
handleInputChange={handleInputChange}
|
||||
handleInputClick={handleInputClick}
|
||||
error={data.errors?.[noteKey] ?? ""}
|
||||
displayName={noteSchema.title || beautifyString(noteKey)}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
|
||||
case BlockUIType.OUTPUT:
|
||||
// For OUTPUT blocks, only show the 'value' property
|
||||
return keys.map(([propKey, propSchema]) => {
|
||||
const isRequired = data.inputSchema.required?.includes(propKey);
|
||||
const isConnected = isHandleConnected(propKey);
|
||||
const isAdvanced = propSchema.advanced;
|
||||
return (
|
||||
(isRequired || isAdvancedOpen || !isAdvanced) && (
|
||||
<div key={propKey} onMouseOver={() => {}}>
|
||||
{propKey !== "value" ? (
|
||||
<span className="text-m green -mb-1 text-gray-900">
|
||||
{propSchema.title || beautifyString(propKey)}
|
||||
</span>
|
||||
) : (
|
||||
<NodeHandle
|
||||
keyName={propKey}
|
||||
isConnected={isConnected}
|
||||
isRequired={isRequired}
|
||||
schema={propSchema}
|
||||
side="left"
|
||||
/>
|
||||
)}
|
||||
{!isConnected && (
|
||||
<NodeGenericInputField
|
||||
className="mb-2 mt-1"
|
||||
propKey={propKey}
|
||||
propSchema={propSchema}
|
||||
currentValue={getValue(propKey)}
|
||||
connections={data.connections}
|
||||
handleInputChange={handleInputChange}
|
||||
handleInputClick={handleInputClick}
|
||||
errors={data.errors ?? {}}
|
||||
displayName={propSchema.title || beautifyString(propKey)}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
);
|
||||
});
|
||||
|
||||
default:
|
||||
return keys.map(([propKey, propSchema]) => {
|
||||
const isRequired = data.inputSchema.required?.includes(propKey);
|
||||
const isConnected = isHandleConnected(propKey);
|
||||
const isAdvanced = propSchema.advanced;
|
||||
return (
|
||||
(isRequired || isAdvancedOpen || isConnected || !isAdvanced) && (
|
||||
<div key={propKey} onMouseOver={() => {}}>
|
||||
<NodeHandle
|
||||
keyName={propKey}
|
||||
isConnected={isConnected}
|
||||
isRequired={isRequired}
|
||||
schema={propSchema}
|
||||
side="left"
|
||||
/>
|
||||
{!isConnected && (
|
||||
<NodeGenericInputField
|
||||
className="mb-2 mt-1"
|
||||
propKey={propKey}
|
||||
propSchema={propSchema}
|
||||
currentValue={getValue(propKey)}
|
||||
connections={data.connections}
|
||||
handleInputChange={handleInputChange}
|
||||
handleInputClick={handleInputClick}
|
||||
errors={data.errors ?? {}}
|
||||
displayName={propSchema.title || beautifyString(propKey)}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
);
|
||||
});
|
||||
}
|
||||
};
|
||||
const handleInputChange = (path: string, value: any) => {
|
||||
const keys = parseKeys(path);
|
||||
const newValues = JSON.parse(JSON.stringify(data.hardcodedValues));
|
||||
@@ -376,15 +523,27 @@ export function CustomNode({ data, id, width, height }: NodeProps<CustomNode>) {
|
||||
);
|
||||
});
|
||||
|
||||
const inputValues = data.hardcodedValues;
|
||||
const blockCost =
|
||||
data.blockCosts &&
|
||||
data.blockCosts.find((cost) =>
|
||||
Object.entries(cost.cost_filter).every(
|
||||
// Undefined, null, or empty values are considered equal
|
||||
([key, value]) =>
|
||||
value === inputValues[key] || (!value && !inputValues[key]),
|
||||
),
|
||||
);
|
||||
console.debug(`Block cost ${inputValues}|${data.blockCosts}=${blockCost}`);
|
||||
|
||||
return (
|
||||
<div
|
||||
className={`${blockClasses} ${errorClass} ${statusClass}`}
|
||||
className={`${data.uiType === BlockUIType.NOTE ? "w-[300px]" : "w-[500px]"} ${blockClasses} ${errorClass} ${statusClass} ${data.uiType === BlockUIType.NOTE ? "bg-yellow-100" : "bg-white"}`}
|
||||
onMouseEnter={handleHovered}
|
||||
onMouseLeave={handleMouseLeave}
|
||||
data-id={`custom-node-${id}`}
|
||||
>
|
||||
<div
|
||||
className={`mb-2 p-3 ${getPrimaryCategoryColor(data.categories)} rounded-t-xl`}
|
||||
className={`mb-2 p-3 ${data.uiType === BlockUIType.NOTE ? "bg-yellow-100" : getPrimaryCategoryColor(data.categories)} rounded-t-xl`}
|
||||
>
|
||||
<div className="flex items-center justify-between">
|
||||
<div className="font-roboto p-3 text-lg font-semibold">
|
||||
@@ -417,53 +576,29 @@ export function CustomNode({ data, id, width, height }: NodeProps<CustomNode>) {
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex items-start justify-between gap-2 p-3">
|
||||
{blockCost && (
|
||||
<div className="p-3 text-right font-semibold">
|
||||
Cost: {blockCost.cost_amount} / {blockCost.cost_type}
|
||||
</div>
|
||||
)}
|
||||
{data.uiType !== BlockUIType.NOTE ? (
|
||||
<div className="flex items-start justify-between p-3">
|
||||
<div>
|
||||
{data.inputSchema &&
|
||||
generateInputHandles(data.inputSchema, data.uiType)}
|
||||
</div>
|
||||
<div className="flex-none">
|
||||
{data.outputSchema &&
|
||||
generateOutputHandles(data.outputSchema, data.uiType)}
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
<div>
|
||||
{data.inputSchema &&
|
||||
Object.entries(data.inputSchema.properties).map(
|
||||
([propKey, propSchema]) => {
|
||||
const isRequired = data.inputSchema.required?.includes(propKey);
|
||||
const isConnected = isHandleConnected(propKey);
|
||||
const isAdvanced = propSchema.advanced;
|
||||
return (
|
||||
(isRequired ||
|
||||
isAdvancedOpen ||
|
||||
isConnected ||
|
||||
!isAdvanced) && (
|
||||
<div key={propKey} onMouseOver={() => {}}>
|
||||
<NodeHandle
|
||||
keyName={propKey}
|
||||
isConnected={isConnected}
|
||||
isRequired={isRequired}
|
||||
schema={propSchema}
|
||||
side="left"
|
||||
/>
|
||||
{!isConnected && (
|
||||
<NodeGenericInputField
|
||||
className="mb-2 mt-1"
|
||||
propKey={propKey}
|
||||
propSchema={propSchema}
|
||||
currentValue={getValue(propKey)}
|
||||
connections={data.connections}
|
||||
handleInputChange={handleInputChange}
|
||||
handleInputClick={handleInputClick}
|
||||
errors={data.errors ?? {}}
|
||||
displayName={
|
||||
propSchema.title || beautifyString(propKey)
|
||||
}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
);
|
||||
},
|
||||
)}
|
||||
generateInputHandles(data.inputSchema, data.uiType)}
|
||||
</div>
|
||||
<div className="flex-none">
|
||||
{data.outputSchema && generateOutputHandles(data.outputSchema)}
|
||||
</div>
|
||||
</div>
|
||||
{isOutputOpen && (
|
||||
)}
|
||||
{isOutputOpen && data.uiType !== BlockUIType.NOTE && (
|
||||
<div
|
||||
data-id="latest-output"
|
||||
className="nodrag m-3 break-words rounded-md border-[1.5px] p-2"
|
||||
@@ -486,25 +621,27 @@ export function CustomNode({ data, id, width, height }: NodeProps<CustomNode>) {
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
<div className="mt-2.5 flex items-center pb-4 pl-4">
|
||||
<Switch checked={isOutputOpen} onCheckedChange={toggleOutput} />
|
||||
<span className="m-1 mr-4">Output</span>
|
||||
{hasAdvancedFields && (
|
||||
<>
|
||||
<Switch onCheckedChange={toggleAdvancedSettings} />
|
||||
<span className="m-1">Advanced</span>
|
||||
</>
|
||||
)}
|
||||
{data.status && (
|
||||
<Badge
|
||||
variant="outline"
|
||||
data-id={`badge-${id}-${data.status}`}
|
||||
className={cn(data.status.toLowerCase(), "ml-auto mr-5")}
|
||||
>
|
||||
{data.status}
|
||||
</Badge>
|
||||
)}
|
||||
</div>
|
||||
{data.uiType !== BlockUIType.NOTE && (
|
||||
<div className="mt-2.5 flex items-center pb-4 pl-4">
|
||||
<Switch checked={isOutputOpen} onCheckedChange={toggleOutput} />
|
||||
<span className="m-1 mr-4">Output</span>
|
||||
{hasAdvancedFields && (
|
||||
<>
|
||||
<Switch onCheckedChange={toggleAdvancedSettings} />
|
||||
<span className="m-1">Advanced</span>
|
||||
</>
|
||||
)}
|
||||
{data.status && (
|
||||
<Badge
|
||||
variant="outline"
|
||||
data-id={`badge-${id}-${data.status}`}
|
||||
className={cn(data.status.toLowerCase(), "ml-auto mr-5")}
|
||||
>
|
||||
{data.status}
|
||||
</Badge>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
<InputModalComponent
|
||||
title={activeKey ? `Enter ${beautifyString(activeKey)}` : undefined}
|
||||
isOpen={isModalOpen}
|
||||
|
||||
@@ -27,7 +27,7 @@ import "@xyflow/react/dist/style.css";
|
||||
import { CustomNode } from "./CustomNode";
|
||||
import "./flow.css";
|
||||
import { Link } from "@/lib/autogpt-server-api";
|
||||
import { getTypeColor } from "@/lib/utils";
|
||||
import { getTypeColor, filterBlocksByType } from "@/lib/utils";
|
||||
import { history } from "./history";
|
||||
import { CustomEdge } from "./CustomEdge";
|
||||
import ConnectionLine from "./ConnectionLine";
|
||||
@@ -36,14 +36,19 @@ import { SaveControl } from "@/components/edit/control/SaveControl";
|
||||
import { BlocksControl } from "@/components/edit/control/BlocksControl";
|
||||
import {
|
||||
IconPlay,
|
||||
IconUndo2,
|
||||
IconRedo2,
|
||||
IconSquare,
|
||||
IconUndo2,
|
||||
IconOutput,
|
||||
} from "@/components/ui/icons";
|
||||
import { startTutorial } from "./tutorial";
|
||||
import useAgentGraph from "@/hooks/useAgentGraph";
|
||||
import { v4 as uuidv4 } from "uuid";
|
||||
import { useRouter, usePathname, useSearchParams } from "next/navigation";
|
||||
import { LogOut } from "lucide-react";
|
||||
import RunnerUIWrapper, {
|
||||
RunnerUIWrapperRef,
|
||||
} from "@/components/RunnerUIWrapper";
|
||||
|
||||
// This is for the history, this is the minimum distance a block must move before it is logged
|
||||
// It helps to prevent spamming the history with small movements especially when pressing on a input in a block
|
||||
@@ -101,6 +106,8 @@ const FlowEditor: React.FC<{
|
||||
// State to control if blocks menu should be pinned open
|
||||
const [pinBlocksPopover, setPinBlocksPopover] = useState(false);
|
||||
|
||||
const runnerUIRef = useRef<RunnerUIWrapperRef>(null);
|
||||
|
||||
useEffect(() => {
|
||||
const params = new URLSearchParams(window.location.search);
|
||||
|
||||
@@ -407,6 +414,7 @@ const FlowEditor: React.FC<{
|
||||
position: viewportCenter, // Set the position to the calculated viewport center
|
||||
data: {
|
||||
blockType: nodeType,
|
||||
blockCosts: nodeSchema.costs,
|
||||
title: `${nodeType} ${nodeId}`,
|
||||
description: nodeSchema.description,
|
||||
categories: nodeSchema.categories,
|
||||
@@ -417,6 +425,7 @@ const FlowEditor: React.FC<{
|
||||
isOutputOpen: false,
|
||||
block_id: blockId,
|
||||
isOutputStatic: nodeSchema.staticOutput,
|
||||
uiType: nodeSchema.uiType,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -549,9 +558,21 @@ const FlowEditor: React.FC<{
|
||||
onClick: handleRedo,
|
||||
},
|
||||
{
|
||||
label: !isRunning ? "Run" : "Stop",
|
||||
label: !savedAgent
|
||||
? "Please save the agent to run"
|
||||
: !isRunning
|
||||
? "Run"
|
||||
: "Stop",
|
||||
icon: !isRunning ? <IconPlay /> : <IconSquare />,
|
||||
onClick: !isRunning ? requestSaveAndRun : requestStopRun,
|
||||
onClick: !isRunning
|
||||
? () => runnerUIRef.current?.runOrOpenInput()
|
||||
: requestStopRun,
|
||||
disabled: !savedAgent,
|
||||
},
|
||||
{
|
||||
label: "Runner Output",
|
||||
icon: <LogOut size={18} strokeWidth={1.8} />,
|
||||
onClick: () => runnerUIRef.current?.openRunnerOutput(),
|
||||
},
|
||||
];
|
||||
|
||||
@@ -587,12 +608,21 @@ const FlowEditor: React.FC<{
|
||||
<SaveControl
|
||||
agentMeta={savedAgent}
|
||||
onSave={(isTemplate) => requestSave(isTemplate ?? false)}
|
||||
agentDescription={agentDescription}
|
||||
onDescriptionChange={setAgentDescription}
|
||||
agentName={agentName}
|
||||
onNameChange={setAgentName}
|
||||
/>
|
||||
</ControlPanel>
|
||||
</ReactFlow>
|
||||
</div>
|
||||
<RunnerUIWrapper
|
||||
ref={runnerUIRef}
|
||||
nodes={nodes}
|
||||
setNodes={setNodes}
|
||||
isRunning={isRunning}
|
||||
requestSaveAndRun={requestSaveAndRun}
|
||||
/>
|
||||
</FlowContext.Provider>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -9,9 +9,12 @@ import {
|
||||
IconCircleUser,
|
||||
IconMenu,
|
||||
IconPackage2,
|
||||
IconRefresh,
|
||||
IconSquareActivity,
|
||||
IconWorkFlow,
|
||||
} from "@/components/ui/icons";
|
||||
import AutoGPTServerAPI from "@/lib/autogpt-server-api";
|
||||
import CreditButton from "@/components/CreditButton";
|
||||
|
||||
export async function NavBar() {
|
||||
const isAvailable = Boolean(
|
||||
@@ -96,6 +99,8 @@ export async function NavBar() {
|
||||
</a>
|
||||
</div>
|
||||
<div className="flex flex-1 items-center justify-end gap-4">
|
||||
{isAvailable && user && <CreditButton />}
|
||||
|
||||
{isAvailable && !user && (
|
||||
<Link
|
||||
href="/login"
|
||||
|
||||
141
rnd/autogpt_builder/src/components/RunnerUIWrapper.tsx
Normal file
141
rnd/autogpt_builder/src/components/RunnerUIWrapper.tsx
Normal file
@@ -0,0 +1,141 @@
|
||||
import React, {
|
||||
useState,
|
||||
useCallback,
|
||||
forwardRef,
|
||||
useImperativeHandle,
|
||||
} from "react";
|
||||
import RunnerInputUI from "./runner-ui/RunnerInputUI";
|
||||
import RunnerOutputUI from "./runner-ui/RunnerOutputUI";
|
||||
import { Node } from "@xyflow/react";
|
||||
import { filterBlocksByType } from "@/lib/utils";
|
||||
import { BlockIORootSchema } from "@/lib/autogpt-server-api/types";
|
||||
|
||||
interface RunnerUIWrapperProps {
|
||||
nodes: Node[];
|
||||
setNodes: React.Dispatch<React.SetStateAction<Node[]>>;
|
||||
isRunning: boolean;
|
||||
requestSaveAndRun: () => void;
|
||||
}
|
||||
|
||||
export interface RunnerUIWrapperRef {
|
||||
openRunnerInput: () => void;
|
||||
openRunnerOutput: () => void;
|
||||
runOrOpenInput: () => void;
|
||||
}
|
||||
|
||||
const RunnerUIWrapper = forwardRef<RunnerUIWrapperRef, RunnerUIWrapperProps>(
|
||||
({ nodes, setNodes, isRunning, requestSaveAndRun }, ref) => {
|
||||
const [isRunnerInputOpen, setIsRunnerInputOpen] = useState(false);
|
||||
const [isRunnerOutputOpen, setIsRunnerOutputOpen] = useState(false);
|
||||
|
||||
const getBlockInputsAndOutputs = useCallback(() => {
|
||||
const inputBlocks = filterBlocksByType(
|
||||
nodes,
|
||||
(node) => node.data.block_id === "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
);
|
||||
|
||||
const outputBlocks = filterBlocksByType(
|
||||
nodes,
|
||||
(node) => node.data.block_id === "363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
);
|
||||
|
||||
const inputs = inputBlocks.map((node) => ({
|
||||
id: node.id,
|
||||
type: "input" as const,
|
||||
inputSchema: node.data.inputSchema as BlockIORootSchema,
|
||||
hardcodedValues: {
|
||||
name: (node.data.hardcodedValues as any).name || "",
|
||||
description: (node.data.hardcodedValues as any).description || "",
|
||||
value: (node.data.hardcodedValues as any).value,
|
||||
placeholder_values:
|
||||
(node.data.hardcodedValues as any).placeholder_values || [],
|
||||
limit_to_placeholder_values:
|
||||
(node.data.hardcodedValues as any).limit_to_placeholder_values ||
|
||||
false,
|
||||
},
|
||||
}));
|
||||
|
||||
const outputs = outputBlocks.map((node) => ({
|
||||
id: node.id,
|
||||
type: "output" as const,
|
||||
outputSchema: node.data.outputSchema as BlockIORootSchema,
|
||||
hardcodedValues: {
|
||||
name: (node.data.hardcodedValues as any).name || "Output",
|
||||
description:
|
||||
(node.data.hardcodedValues as any).description ||
|
||||
"Output from the agent",
|
||||
value: (node.data.hardcodedValues as any).value,
|
||||
},
|
||||
result: (node.data.executionResults as any)?.at(-1)?.data?.output,
|
||||
}));
|
||||
|
||||
return { inputs, outputs };
|
||||
}, [nodes]);
|
||||
|
||||
const handleInputChange = useCallback(
|
||||
(nodeId: string, field: string, value: string) => {
|
||||
setNodes((nds) =>
|
||||
nds.map((node) => {
|
||||
if (node.id === nodeId) {
|
||||
return {
|
||||
...node,
|
||||
data: {
|
||||
...node.data,
|
||||
hardcodedValues: {
|
||||
...(node.data.hardcodedValues as any),
|
||||
[field]: value,
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
return node;
|
||||
}),
|
||||
);
|
||||
},
|
||||
[setNodes],
|
||||
);
|
||||
|
||||
const openRunnerInput = () => setIsRunnerInputOpen(true);
|
||||
const openRunnerOutput = () => setIsRunnerOutputOpen(true);
|
||||
|
||||
const runOrOpenInput = () => {
|
||||
const { inputs } = getBlockInputsAndOutputs();
|
||||
if (inputs.length > 0) {
|
||||
openRunnerInput();
|
||||
} else {
|
||||
requestSaveAndRun();
|
||||
}
|
||||
};
|
||||
|
||||
useImperativeHandle(ref, () => ({
|
||||
openRunnerInput,
|
||||
openRunnerOutput,
|
||||
runOrOpenInput,
|
||||
}));
|
||||
|
||||
return (
|
||||
<>
|
||||
<RunnerInputUI
|
||||
isOpen={isRunnerInputOpen}
|
||||
onClose={() => setIsRunnerInputOpen(false)}
|
||||
blockInputs={getBlockInputsAndOutputs().inputs}
|
||||
onInputChange={handleInputChange}
|
||||
onRun={() => {
|
||||
setIsRunnerInputOpen(false);
|
||||
requestSaveAndRun();
|
||||
}}
|
||||
isRunning={isRunning}
|
||||
/>
|
||||
<RunnerOutputUI
|
||||
isOpen={isRunnerOutputOpen}
|
||||
onClose={() => setIsRunnerOutputOpen(false)}
|
||||
blockOutputs={getBlockInputsAndOutputs().outputs}
|
||||
/>
|
||||
</>
|
||||
);
|
||||
},
|
||||
);
|
||||
|
||||
RunnerUIWrapper.displayName = "RunnerUIWrapper";
|
||||
|
||||
export default RunnerUIWrapper;
|
||||
@@ -9,6 +9,7 @@ import {
|
||||
import FeaturedAgentsTable from "./FeaturedAgentsTable";
|
||||
import { AdminAddFeaturedAgentDialog } from "./AdminAddFeaturedAgentDialog";
|
||||
import { revalidatePath } from "next/cache";
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
|
||||
export default async function AdminFeaturedAgentsControl({
|
||||
className,
|
||||
@@ -55,9 +56,15 @@ export default async function AdminFeaturedAgentsControl({
|
||||
component: <Button>Remove</Button>,
|
||||
action: async (rows) => {
|
||||
"use server";
|
||||
const all = rows.map((row) => removeFeaturedAgent(row.id));
|
||||
await Promise.all(all);
|
||||
revalidatePath("/marketplace");
|
||||
return await Sentry.withServerActionInstrumentation(
|
||||
"removeFeaturedAgent",
|
||||
{},
|
||||
async () => {
|
||||
const all = rows.map((row) => removeFeaturedAgent(row.id));
|
||||
await Promise.all(all);
|
||||
revalidatePath("/marketplace");
|
||||
},
|
||||
);
|
||||
},
|
||||
},
|
||||
]}
|
||||
|
||||
@@ -1,17 +1,23 @@
|
||||
"use server";
|
||||
import AutoGPTServerAPI from "@/lib/autogpt-server-api";
|
||||
import MarketplaceAPI from "@/lib/marketplace-api";
|
||||
import { revalidatePath } from "next/cache";
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
|
||||
export async function approveAgent(
|
||||
agentId: string,
|
||||
version: number,
|
||||
comment: string,
|
||||
) {
|
||||
const api = new MarketplaceAPI();
|
||||
await api.approveAgentSubmission(agentId, version, comment);
|
||||
console.debug(`Approving agent ${agentId}`);
|
||||
revalidatePath("/marketplace");
|
||||
return await Sentry.withServerActionInstrumentation(
|
||||
"approveAgent",
|
||||
{},
|
||||
async () => {
|
||||
const api = new MarketplaceAPI();
|
||||
await api.approveAgentSubmission(agentId, version, comment);
|
||||
console.debug(`Approving agent ${agentId}`);
|
||||
revalidatePath("/marketplace");
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
export async function rejectAgent(
|
||||
@@ -19,67 +25,117 @@ export async function rejectAgent(
|
||||
version: number,
|
||||
comment: string,
|
||||
) {
|
||||
const api = new MarketplaceAPI();
|
||||
await api.rejectAgentSubmission(agentId, version, comment);
|
||||
console.debug(`Rejecting agent ${agentId}`);
|
||||
revalidatePath("/marketplace");
|
||||
return await Sentry.withServerActionInstrumentation(
|
||||
"rejectAgent",
|
||||
{},
|
||||
async () => {
|
||||
const api = new MarketplaceAPI();
|
||||
await api.rejectAgentSubmission(agentId, version, comment);
|
||||
console.debug(`Rejecting agent ${agentId}`);
|
||||
revalidatePath("/marketplace");
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
export async function getReviewableAgents() {
|
||||
const api = new MarketplaceAPI();
|
||||
return api.getAgentSubmissions();
|
||||
return await Sentry.withServerActionInstrumentation(
|
||||
"getReviewableAgents",
|
||||
{},
|
||||
async () => {
|
||||
const api = new MarketplaceAPI();
|
||||
return api.getAgentSubmissions();
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
export async function getFeaturedAgents(
|
||||
page: number = 1,
|
||||
pageSize: number = 10,
|
||||
) {
|
||||
const api = new MarketplaceAPI();
|
||||
const featured = await api.getFeaturedAgents(page, pageSize);
|
||||
console.debug(`Getting featured agents ${featured.agents.length}`);
|
||||
return featured;
|
||||
return await Sentry.withServerActionInstrumentation(
|
||||
"getFeaturedAgents",
|
||||
{},
|
||||
async () => {
|
||||
const api = new MarketplaceAPI();
|
||||
const featured = await api.getFeaturedAgents(page, pageSize);
|
||||
console.debug(`Getting featured agents ${featured.agents.length}`);
|
||||
return featured;
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
export async function getFeaturedAgent(agentId: string) {
|
||||
const api = new MarketplaceAPI();
|
||||
const featured = await api.getFeaturedAgent(agentId);
|
||||
console.debug(`Getting featured agent ${featured.agentId}`);
|
||||
return featured;
|
||||
return await Sentry.withServerActionInstrumentation(
|
||||
"getFeaturedAgent",
|
||||
{},
|
||||
async () => {
|
||||
const api = new MarketplaceAPI();
|
||||
const featured = await api.getFeaturedAgent(agentId);
|
||||
console.debug(`Getting featured agent ${featured.agentId}`);
|
||||
return featured;
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
export async function addFeaturedAgent(
|
||||
agentId: string,
|
||||
categories: string[] = ["featured"],
|
||||
) {
|
||||
const api = new MarketplaceAPI();
|
||||
await api.addFeaturedAgent(agentId, categories);
|
||||
console.debug(`Adding featured agent ${agentId}`);
|
||||
revalidatePath("/marketplace");
|
||||
return await Sentry.withServerActionInstrumentation(
|
||||
"addFeaturedAgent",
|
||||
{},
|
||||
async () => {
|
||||
const api = new MarketplaceAPI();
|
||||
await api.addFeaturedAgent(agentId, categories);
|
||||
console.debug(`Adding featured agent ${agentId}`);
|
||||
revalidatePath("/marketplace");
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
export async function removeFeaturedAgent(
|
||||
agentId: string,
|
||||
categories: string[] = ["featured"],
|
||||
) {
|
||||
const api = new MarketplaceAPI();
|
||||
await api.removeFeaturedAgent(agentId, categories);
|
||||
console.debug(`Removing featured agent ${agentId}`);
|
||||
revalidatePath("/marketplace");
|
||||
return await Sentry.withServerActionInstrumentation(
|
||||
"removeFeaturedAgent",
|
||||
{},
|
||||
async () => {
|
||||
const api = new MarketplaceAPI();
|
||||
await api.removeFeaturedAgent(agentId, categories);
|
||||
console.debug(`Removing featured agent ${agentId}`);
|
||||
revalidatePath("/marketplace");
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
export async function getCategories() {
|
||||
const api = new MarketplaceAPI();
|
||||
const categories = await api.getCategories();
|
||||
console.debug(`Getting categories ${categories.unique_categories.length}`);
|
||||
return categories;
|
||||
return await Sentry.withServerActionInstrumentation(
|
||||
"getCategories",
|
||||
{},
|
||||
async () => {
|
||||
const api = new MarketplaceAPI();
|
||||
const categories = await api.getCategories();
|
||||
console.debug(
|
||||
`Getting categories ${categories.unique_categories.length}`,
|
||||
);
|
||||
return categories;
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
export async function getNotFeaturedAgents(
|
||||
page: number = 1,
|
||||
pageSize: number = 100,
|
||||
) {
|
||||
const api = new MarketplaceAPI();
|
||||
const agents = await api.getNotFeaturedAgents(page, pageSize);
|
||||
console.debug(`Getting not featured agents ${agents.agents.length}`);
|
||||
return agents;
|
||||
return await Sentry.withServerActionInstrumentation(
|
||||
"getNotFeaturedAgents",
|
||||
{},
|
||||
async () => {
|
||||
const api = new MarketplaceAPI();
|
||||
const agents = await api.getNotFeaturedAgents(page, pageSize);
|
||||
console.debug(`Getting not featured agents ${agents.agents.length}`);
|
||||
return agents;
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
.custom-node {
|
||||
color: #000000;
|
||||
width: 500px;
|
||||
box-sizing: border-box;
|
||||
transition: border-color 0.3s ease-in-out;
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ import React from "react";
|
||||
export type Control = {
|
||||
icon: React.ReactNode;
|
||||
label: string;
|
||||
disabled?: boolean;
|
||||
onClick: () => void;
|
||||
};
|
||||
|
||||
@@ -50,15 +51,18 @@ export const ControlPanel = ({
|
||||
{controls.map((control, index) => (
|
||||
<Tooltip key={index} delayDuration={500}>
|
||||
<TooltipTrigger asChild>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
onClick={() => control.onClick()}
|
||||
data-id={`control-button-${index}`}
|
||||
>
|
||||
{control.icon}
|
||||
<span className="sr-only">{control.label}</span>
|
||||
</Button>
|
||||
<div>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
onClick={() => control.onClick()}
|
||||
data-id={`control-button-${index}`}
|
||||
disabled={control.disabled || false}
|
||||
>
|
||||
{control.icon}
|
||||
<span className="sr-only">{control.label}</span>
|
||||
</Button>
|
||||
</div>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent side="right">{control.label}</TooltipContent>
|
||||
</Tooltip>
|
||||
|
||||
@@ -18,6 +18,8 @@ import {
|
||||
|
||||
interface SaveControlProps {
|
||||
agentMeta: GraphMeta | null;
|
||||
agentName: string;
|
||||
agentDescription: string;
|
||||
onSave: (isTemplate: boolean | undefined) => void;
|
||||
onNameChange: (name: string) => void;
|
||||
onDescriptionChange: (description: string) => void;
|
||||
@@ -35,7 +37,9 @@ interface SaveControlProps {
|
||||
export const SaveControl = ({
|
||||
agentMeta,
|
||||
onSave,
|
||||
agentName,
|
||||
onNameChange,
|
||||
agentDescription,
|
||||
onDescriptionChange,
|
||||
}: SaveControlProps) => {
|
||||
/**
|
||||
@@ -75,7 +79,7 @@ export const SaveControl = ({
|
||||
id="name"
|
||||
placeholder="Enter your agent name"
|
||||
className="col-span-3"
|
||||
defaultValue={agentMeta?.name || ""}
|
||||
value={agentName}
|
||||
onChange={(e) => onNameChange(e.target.value)}
|
||||
/>
|
||||
<Label htmlFor="description">Description</Label>
|
||||
@@ -83,9 +87,21 @@ export const SaveControl = ({
|
||||
id="description"
|
||||
placeholder="Your agent description"
|
||||
className="col-span-3"
|
||||
defaultValue={agentMeta?.description || ""}
|
||||
value={agentDescription}
|
||||
onChange={(e) => onDescriptionChange(e.target.value)}
|
||||
/>
|
||||
{agentMeta?.version && (
|
||||
<>
|
||||
<Label htmlFor="version">Version</Label>
|
||||
<Input
|
||||
id="version"
|
||||
placeholder="Version"
|
||||
className="col-span-3"
|
||||
value={agentMeta?.version || "-"}
|
||||
disabled
|
||||
/>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
</CardContent>
|
||||
<CardFooter className="flex flex-col items-stretch gap-2">
|
||||
|
||||
@@ -81,7 +81,7 @@ function convertGraphToReactFlow(graph: any): { nodes: Node[]; edges: Edge[] } {
|
||||
async function installGraph(id: string): Promise<void> {
|
||||
const apiUrl =
|
||||
process.env.NEXT_PUBLIC_AGPT_MARKETPLACE_URL ||
|
||||
"http://localhost:8001/api/v1/market";
|
||||
"http://localhost:8015/api/v1/market";
|
||||
const api = new MarketplaceAPI(apiUrl);
|
||||
|
||||
const serverAPIUrl = process.env.AGPT_SERVER_API_URL;
|
||||
|
||||
@@ -1,9 +1,16 @@
|
||||
"use server";
|
||||
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
import MarketplaceAPI, { AnalyticsEvent } from "@/lib/marketplace-api";
|
||||
|
||||
export async function makeAnalyticsEvent(event: AnalyticsEvent) {
|
||||
const apiUrl = process.env.AGPT_SERVER_API_URL;
|
||||
const api = new MarketplaceAPI();
|
||||
await api.makeAnalyticsEvent(event);
|
||||
return await Sentry.withServerActionInstrumentation(
|
||||
"makeAnalyticsEvent",
|
||||
{},
|
||||
async () => {
|
||||
const apiUrl = process.env.AGPT_SERVER_API_URL;
|
||||
const api = new MarketplaceAPI();
|
||||
await api.makeAnalyticsEvent(event);
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import {
|
||||
BlockIONumberSubSchema,
|
||||
BlockIOBooleanSubSchema,
|
||||
} from "@/lib/autogpt-server-api/types";
|
||||
import { FC, useCallback, useEffect, useState } from "react";
|
||||
import React, { FC, useCallback, useEffect, useState } from "react";
|
||||
import { Button } from "./ui/button";
|
||||
import { Switch } from "./ui/switch";
|
||||
import {
|
||||
@@ -380,7 +380,7 @@ const NodeKeyValueInput: FC<{
|
||||
<Input
|
||||
type="text"
|
||||
placeholder="Value"
|
||||
value={value ?? ""}
|
||||
defaultValue={value ?? ""}
|
||||
onBlur={(e) =>
|
||||
updateKeyValuePairs(
|
||||
keyValuePairs.toSpliced(index, 1, {
|
||||
@@ -563,7 +563,7 @@ const NodeStringInput: FC<{
|
||||
<Input
|
||||
type="text"
|
||||
id={selfKey}
|
||||
value={schema.secret && value ? "********" : value}
|
||||
defaultValue={schema.secret && value ? "********" : value}
|
||||
readOnly={schema.secret}
|
||||
placeholder={
|
||||
schema?.placeholder || `Enter ${beautifyString(displayName)}`
|
||||
@@ -587,6 +587,52 @@ const NodeStringInput: FC<{
|
||||
);
|
||||
};
|
||||
|
||||
export const NodeTextBoxInput: FC<{
|
||||
selfKey: string;
|
||||
schema: BlockIOStringSubSchema;
|
||||
value?: string;
|
||||
error?: string;
|
||||
handleInputChange: NodeObjectInputTreeProps["handleInputChange"];
|
||||
handleInputClick: NodeObjectInputTreeProps["handleInputClick"];
|
||||
className?: string;
|
||||
displayName: string;
|
||||
}> = ({
|
||||
selfKey,
|
||||
schema,
|
||||
value = "",
|
||||
error,
|
||||
handleInputChange,
|
||||
handleInputClick,
|
||||
className,
|
||||
displayName,
|
||||
}) => {
|
||||
return (
|
||||
<div className={className}>
|
||||
<div
|
||||
className="nodrag relative m-0 h-[200px] w-full bg-yellow-100 p-4"
|
||||
onClick={schema.secret ? () => handleInputClick(selfKey) : undefined}
|
||||
>
|
||||
<textarea
|
||||
id={selfKey}
|
||||
value={schema.secret && value ? "********" : value}
|
||||
readOnly={schema.secret}
|
||||
placeholder={
|
||||
schema?.placeholder || `Enter ${beautifyString(displayName)}`
|
||||
}
|
||||
onChange={(e) => handleInputChange(selfKey, e.target.value)}
|
||||
onBlur={(e) => handleInputChange(selfKey, e.target.value)}
|
||||
className="h-full w-full resize-none overflow-hidden border-none bg-transparent text-lg text-black outline-none"
|
||||
style={{
|
||||
fontSize: "min(1em, 16px)",
|
||||
lineHeight: "1.2",
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
{error && <span className="error-message">{error}</span>}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
const NodeNumberInput: FC<{
|
||||
selfKey: string;
|
||||
schema: BlockIONumberSubSchema;
|
||||
@@ -612,7 +658,7 @@ const NodeNumberInput: FC<{
|
||||
<Input
|
||||
type="number"
|
||||
id={selfKey}
|
||||
value={value}
|
||||
defaultValue={value}
|
||||
onBlur={(e) => handleInputChange(selfKey, parseFloat(e.target.value))}
|
||||
placeholder={
|
||||
schema.placeholder || `Enter ${beautifyString(displayName)}`
|
||||
|
||||
@@ -0,0 +1,61 @@
|
||||
import React from "react";
|
||||
import { Input } from "@/components/ui/input";
|
||||
import {
|
||||
Select,
|
||||
SelectContent,
|
||||
SelectItem,
|
||||
SelectTrigger,
|
||||
SelectValue,
|
||||
} from "@/components/ui/select";
|
||||
|
||||
interface InputBlockProps {
|
||||
id: string;
|
||||
name: string;
|
||||
description?: string;
|
||||
value: string;
|
||||
placeholder_values?: any[];
|
||||
onInputChange: (id: string, field: string, value: string) => void;
|
||||
}
|
||||
|
||||
export function InputBlock({
|
||||
id,
|
||||
name,
|
||||
description,
|
||||
value,
|
||||
placeholder_values,
|
||||
onInputChange,
|
||||
}: InputBlockProps) {
|
||||
return (
|
||||
<div className="space-y-1">
|
||||
<h3 className="text-base font-semibold">{name || "Unnamed Input"}</h3>
|
||||
{description && <p className="text-sm text-gray-600">{description}</p>}
|
||||
<div>
|
||||
{placeholder_values && placeholder_values.length > 1 ? (
|
||||
<Select
|
||||
onValueChange={(value) => onInputChange(id, "value", value)}
|
||||
value={value}
|
||||
>
|
||||
<SelectTrigger className="w-full">
|
||||
<SelectValue placeholder="Select a value" />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{placeholder_values.map((placeholder, index) => (
|
||||
<SelectItem key={index} value={placeholder.toString()}>
|
||||
{placeholder.toString()}
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
) : (
|
||||
<Input
|
||||
id={`${id}-Value`}
|
||||
value={value}
|
||||
onChange={(e) => onInputChange(id, "value", e.target.value)}
|
||||
placeholder={placeholder_values?.[0]?.toString() || "Enter value"}
|
||||
className="w-full"
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,33 @@
|
||||
import React from "react";
|
||||
import { ScrollArea } from "@/components/ui/scroll-area";
|
||||
import { InputBlock } from "./RunnerInputBlock";
|
||||
import { BlockInput } from "./RunnerInputUI";
|
||||
|
||||
interface InputListProps {
|
||||
blockInputs: BlockInput[];
|
||||
onInputChange: (nodeId: string, field: string, value: string) => void;
|
||||
}
|
||||
|
||||
export function InputList({ blockInputs, onInputChange }: InputListProps) {
|
||||
return (
|
||||
<ScrollArea className="h-[20vh] overflow-auto pr-4 sm:h-[30vh] md:h-[40vh] lg:h-[50vh]">
|
||||
<div className="space-y-4">
|
||||
{blockInputs && blockInputs.length > 0 ? (
|
||||
blockInputs.map((block) => (
|
||||
<InputBlock
|
||||
key={block.id}
|
||||
id={block.id}
|
||||
name={block.hardcodedValues.name}
|
||||
description={block.hardcodedValues.description}
|
||||
value={block.hardcodedValues.value?.toString() || ""}
|
||||
placeholder_values={block.hardcodedValues.placeholder_values}
|
||||
onInputChange={onInputChange}
|
||||
/>
|
||||
))
|
||||
) : (
|
||||
<p>No input blocks available.</p>
|
||||
)}
|
||||
</div>
|
||||
</ScrollArea>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,74 @@
|
||||
import React from "react";
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
DialogDescription,
|
||||
DialogFooter,
|
||||
} from "@/components/ui/dialog";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { BlockIORootSchema } from "@/lib/autogpt-server-api/types";
|
||||
import { InputList } from "./RunnerInputList";
|
||||
|
||||
export interface BlockInput {
|
||||
id: string;
|
||||
inputSchema: BlockIORootSchema;
|
||||
hardcodedValues: {
|
||||
name: string;
|
||||
description: string;
|
||||
value: any;
|
||||
placeholder_values?: any[];
|
||||
limit_to_placeholder_values?: boolean;
|
||||
};
|
||||
}
|
||||
|
||||
interface RunSettingsUiProps {
|
||||
isOpen: boolean;
|
||||
onClose: () => void;
|
||||
blockInputs: BlockInput[];
|
||||
onInputChange: (nodeId: string, field: string, value: string) => void;
|
||||
onRun: () => void;
|
||||
isRunning: boolean;
|
||||
}
|
||||
|
||||
export function RunnerInputUI({
|
||||
isOpen,
|
||||
onClose,
|
||||
blockInputs,
|
||||
onInputChange,
|
||||
onRun,
|
||||
isRunning,
|
||||
}: RunSettingsUiProps) {
|
||||
const handleRun = () => {
|
||||
onRun();
|
||||
onClose();
|
||||
};
|
||||
|
||||
return (
|
||||
<Dialog open={isOpen} onOpenChange={onClose}>
|
||||
<DialogContent className="flex max-h-[80vh] flex-col overflow-hidden sm:max-w-[400px] md:max-w-[500px] lg:max-w-[600px]">
|
||||
<DialogHeader className="px-4 py-4">
|
||||
<DialogTitle className="text-2xl">Run Settings</DialogTitle>
|
||||
<DialogDescription className="mt-2 text-sm">
|
||||
Configure settings for running your agent.
|
||||
</DialogDescription>
|
||||
</DialogHeader>
|
||||
<div className="flex-grow overflow-y-auto px-4 py-4">
|
||||
<InputList blockInputs={blockInputs} onInputChange={onInputChange} />
|
||||
</div>
|
||||
<DialogFooter className="px-6 py-4">
|
||||
<Button
|
||||
onClick={handleRun}
|
||||
className="px-8 py-2 text-lg"
|
||||
disabled={isRunning}
|
||||
>
|
||||
{isRunning ? "Running..." : "Run"}
|
||||
</Button>
|
||||
</DialogFooter>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
|
||||
export default RunnerInputUI;
|
||||
@@ -0,0 +1,94 @@
|
||||
import React from "react";
|
||||
import {
|
||||
Sheet,
|
||||
SheetContent,
|
||||
SheetHeader,
|
||||
SheetTitle,
|
||||
SheetDescription,
|
||||
} from "@/components/ui/sheet";
|
||||
import { ScrollArea } from "@/components/ui/scroll-area";
|
||||
import { BlockIORootSchema } from "@/lib/autogpt-server-api/types";
|
||||
import { Label } from "@/components/ui/label";
|
||||
import { Textarea } from "@/components/ui/textarea";
|
||||
|
||||
interface BlockOutput {
|
||||
id: string;
|
||||
outputSchema: BlockIORootSchema;
|
||||
hardcodedValues: {
|
||||
name: string;
|
||||
description: string;
|
||||
};
|
||||
result?: any;
|
||||
}
|
||||
|
||||
interface OutputModalProps {
|
||||
isOpen: boolean;
|
||||
onClose: () => void;
|
||||
blockOutputs: BlockOutput[];
|
||||
}
|
||||
|
||||
const formatOutput = (output: any): string => {
|
||||
if (typeof output === "object") {
|
||||
try {
|
||||
return JSON.stringify(output, null, 2);
|
||||
} catch (error) {
|
||||
return `Error formatting output: ${(error as Error).message}`;
|
||||
}
|
||||
}
|
||||
return String(output);
|
||||
};
|
||||
|
||||
export function RunnerOutputUI({
|
||||
isOpen,
|
||||
onClose,
|
||||
blockOutputs,
|
||||
}: OutputModalProps) {
|
||||
return (
|
||||
<Sheet open={isOpen} onOpenChange={onClose}>
|
||||
<SheetContent
|
||||
side="right"
|
||||
className="flex h-full w-full flex-col overflow-hidden sm:max-w-[500px]"
|
||||
>
|
||||
<SheetHeader className="px-2 py-2">
|
||||
<SheetTitle className="text-xl">Run Outputs</SheetTitle>
|
||||
<SheetDescription className="mt-1 text-sm">
|
||||
View the outputs from your agent run.
|
||||
</SheetDescription>
|
||||
</SheetHeader>
|
||||
<div className="flex-grow overflow-y-auto px-2 py-2">
|
||||
<ScrollArea className="h-full overflow-auto pr-4">
|
||||
<div className="space-y-4">
|
||||
{blockOutputs && blockOutputs.length > 0 ? (
|
||||
blockOutputs.map((block) => (
|
||||
<div key={block.id} className="space-y-1">
|
||||
<Label className="text-base font-semibold">
|
||||
{block.hardcodedValues.name || "Unnamed Output"}
|
||||
</Label>
|
||||
|
||||
{block.hardcodedValues.description && (
|
||||
<Label className="block text-sm text-gray-600">
|
||||
{block.hardcodedValues.description}
|
||||
</Label>
|
||||
)}
|
||||
|
||||
<div className="rounded-md bg-gray-100 p-2">
|
||||
<Textarea
|
||||
readOnly
|
||||
value={formatOutput(block.result ?? "No output yet")}
|
||||
className="resize-none whitespace-pre-wrap break-words border-none bg-transparent text-sm"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
))
|
||||
) : (
|
||||
<p>No output blocks available.</p>
|
||||
)}
|
||||
</div>
|
||||
</ScrollArea>
|
||||
</div>
|
||||
</SheetContent>
|
||||
</Sheet>
|
||||
);
|
||||
}
|
||||
|
||||
export default RunnerOutputUI;
|
||||
@@ -1,3 +1,4 @@
|
||||
import { sendGAEvent } from "@next/third-parties/google";
|
||||
import Shepherd from "shepherd.js";
|
||||
import "shepherd.js/dist/css/shepherd.css";
|
||||
|
||||
@@ -493,6 +494,15 @@ export const startTutorial = (
|
||||
localStorage.setItem("shepherd-tour", "completed"); // Optionally mark the tutorial as completed
|
||||
});
|
||||
|
||||
for (const step of tour.steps) {
|
||||
step.on("show", () => {
|
||||
"use client";
|
||||
console.debug("sendTutorialStep");
|
||||
|
||||
sendGAEvent("event", "tutorial_step_shown", { value: step.id });
|
||||
});
|
||||
}
|
||||
|
||||
tour.on("cancel", () => {
|
||||
setPinBlocksPopover(false);
|
||||
localStorage.setItem("shepherd-tour", "canceled"); // Optionally mark the tutorial as canceled
|
||||
|
||||
@@ -264,6 +264,43 @@ export const IconCircleUser = createIcon((props) => (
|
||||
</svg>
|
||||
));
|
||||
|
||||
/**
|
||||
* Refresh icon component.
|
||||
*
|
||||
* @component IconRefresh
|
||||
* @param {IconProps} props - The props object containing additional attributes and event handlers for the icon.
|
||||
* @returns {JSX.Element} - The refresh icon.
|
||||
*
|
||||
* @example
|
||||
* // Default usage this is the standard usage
|
||||
* <IconRefresh />
|
||||
*
|
||||
* @example
|
||||
* // With custom color and size these should be used sparingly and only when necessary
|
||||
* <IconRefresh className="text-primary" size="lg" />
|
||||
*
|
||||
* @example
|
||||
* // With custom size and onClick handler
|
||||
* <IconRefresh size="sm" onClick={handleOnClick} />
|
||||
*/
|
||||
export const IconRefresh = createIcon((props) => (
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
viewBox="0 0 24 24"
|
||||
fill="none"
|
||||
stroke="currentColor"
|
||||
strokeWidth="2"
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
{...props}
|
||||
>
|
||||
<polyline points="23 4 23 10 17 10" />
|
||||
<polyline points="1 20 1 14 7 14" />
|
||||
<path d="M3.51 9a9 9 0 0 1 14.136 -5.36L23 10" />
|
||||
<path d="M20.49 15a9 9 0 0 1 -14.136 5.36L1 14" />
|
||||
</svg>
|
||||
));
|
||||
|
||||
/**
|
||||
* Menu icon component.
|
||||
*
|
||||
|
||||
@@ -6,20 +6,7 @@ export interface InputProps
|
||||
extends React.InputHTMLAttributes<HTMLInputElement> {}
|
||||
|
||||
const Input = React.forwardRef<HTMLInputElement, InputProps>(
|
||||
({ className, type, value, ...props }, ref) => {
|
||||
// This ref allows the `Input` component to be both controlled and uncontrolled.
|
||||
// The HTMLvalue will only be updated if the value prop changes, but the user can still type in the input.
|
||||
ref = ref || React.createRef<HTMLInputElement>();
|
||||
React.useEffect(() => {
|
||||
if (
|
||||
ref &&
|
||||
ref.current &&
|
||||
ref.current.value !== value &&
|
||||
type !== "file"
|
||||
) {
|
||||
ref.current.value = value;
|
||||
}
|
||||
}, [value, type, ref]);
|
||||
({ className, type, ...props }, ref) => {
|
||||
return (
|
||||
<input
|
||||
type={type}
|
||||
@@ -29,7 +16,6 @@ const Input = React.forwardRef<HTMLInputElement, InputProps>(
|
||||
className,
|
||||
)}
|
||||
ref={ref}
|
||||
defaultValue={type !== "file" ? value : undefined}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
|
||||
@@ -16,6 +16,7 @@ import {
|
||||
import { Connection, MarkerType } from "@xyflow/react";
|
||||
import Ajv from "ajv";
|
||||
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
|
||||
import { useRouter, useSearchParams, usePathname } from "next/navigation";
|
||||
|
||||
const ajv = new Ajv({ strict: false, allErrors: true });
|
||||
|
||||
@@ -24,6 +25,11 @@ export default function useAgentGraph(
|
||||
template?: boolean,
|
||||
passDataToBeads?: boolean,
|
||||
) {
|
||||
const [router, searchParams, pathname] = [
|
||||
useRouter(),
|
||||
useSearchParams(),
|
||||
usePathname(),
|
||||
];
|
||||
const [savedAgent, setSavedAgent] = useState<Graph | null>(null);
|
||||
const [agentDescription, setAgentDescription] = useState<string>("");
|
||||
const [agentName, setAgentName] = useState<string>("");
|
||||
@@ -133,12 +139,13 @@ export default function useAgentGraph(
|
||||
id: node.id,
|
||||
type: "custom",
|
||||
position: {
|
||||
x: node.metadata.position.x,
|
||||
y: node.metadata.position.y,
|
||||
x: node?.metadata?.position?.x || 0,
|
||||
y: node?.metadata?.position?.y || 0,
|
||||
},
|
||||
data: {
|
||||
block_id: block.id,
|
||||
blockType: block.name,
|
||||
blockCosts: block.costs,
|
||||
categories: block.categories,
|
||||
description: block.description,
|
||||
title: `${block.name} ${node.id}`,
|
||||
@@ -307,7 +314,7 @@ export default function useAgentGraph(
|
||||
|
||||
(template ? api.getTemplate(flowID) : api.getGraph(flowID)).then(
|
||||
(graph) => {
|
||||
console.log("Loading graph");
|
||||
console.debug("Loading graph");
|
||||
loadGraph(graph);
|
||||
},
|
||||
);
|
||||
@@ -638,31 +645,59 @@ export default function useAgentGraph(
|
||||
links: links,
|
||||
};
|
||||
|
||||
if (savedAgent && deepEquals(payload, savedAgent)) {
|
||||
console.debug(
|
||||
"No need to save: Graph is the same as version on server",
|
||||
);
|
||||
// Trigger state change
|
||||
setSavedAgent(savedAgent);
|
||||
return;
|
||||
// To avoid saving the same graph, we compare the payload with the saved agent.
|
||||
// Differences in IDs are ignored.
|
||||
const comparedPayload = {
|
||||
...(({ id, ...rest }) => rest)(payload),
|
||||
nodes: payload.nodes.map(
|
||||
({ id, data, input_nodes, output_nodes, ...rest }) => rest,
|
||||
),
|
||||
links: payload.links.map(({ source_id, sink_id, ...rest }) => rest),
|
||||
};
|
||||
const comparedSavedAgent = {
|
||||
name: savedAgent?.name,
|
||||
description: savedAgent?.description,
|
||||
nodes: savedAgent?.nodes.map((v) => ({
|
||||
block_id: v.block_id,
|
||||
input_default: v.input_default,
|
||||
metadata: v.metadata,
|
||||
})),
|
||||
links: savedAgent?.links.map((v) => ({
|
||||
sink_name: v.sink_name,
|
||||
source_name: v.source_name,
|
||||
})),
|
||||
};
|
||||
|
||||
let newSavedAgent = null;
|
||||
if (savedAgent && deepEquals(comparedPayload, comparedSavedAgent)) {
|
||||
console.warn("No need to save: Graph is the same as version on server");
|
||||
newSavedAgent = savedAgent;
|
||||
} else {
|
||||
console.debug(
|
||||
"Saving new Graph version; old vs new:",
|
||||
savedAgent,
|
||||
comparedPayload,
|
||||
payload,
|
||||
);
|
||||
setNodesSyncedWithSavedAgent(false);
|
||||
|
||||
newSavedAgent = savedAgent
|
||||
? await (savedAgent.is_template
|
||||
? api.updateTemplate(savedAgent.id, payload)
|
||||
: api.updateGraph(savedAgent.id, payload))
|
||||
: await (asTemplate
|
||||
? api.createTemplate(payload)
|
||||
: api.createGraph(payload));
|
||||
|
||||
console.debug("Response from the API:", newSavedAgent);
|
||||
}
|
||||
|
||||
setNodesSyncedWithSavedAgent(false);
|
||||
|
||||
const newSavedAgent = savedAgent
|
||||
? await (savedAgent.is_template
|
||||
? api.updateTemplate(savedAgent.id, payload)
|
||||
: api.updateGraph(savedAgent.id, payload))
|
||||
: await (asTemplate
|
||||
? api.createTemplate(payload)
|
||||
: api.createGraph(payload));
|
||||
console.debug("Response from the API:", newSavedAgent);
|
||||
// Route the URL to the new flow ID if it's a new agent.
|
||||
if (!savedAgent) {
|
||||
const path = new URLSearchParams(searchParams);
|
||||
path.set("flowID", newSavedAgent.id);
|
||||
router.push(`${pathname}?${path.toString()}`);
|
||||
return;
|
||||
}
|
||||
|
||||
// Update the node IDs on the frontend
|
||||
setSavedAgent(newSavedAgent);
|
||||
|
||||
13
rnd/autogpt_builder/src/instrumentation.ts
Normal file
13
rnd/autogpt_builder/src/instrumentation.ts
Normal file
@@ -0,0 +1,13 @@
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
|
||||
export async function register() {
|
||||
if (process.env.NEXT_RUNTIME === "nodejs") {
|
||||
await import("../sentry.server.config");
|
||||
}
|
||||
|
||||
if (process.env.NEXT_RUNTIME === "edge") {
|
||||
await import("../sentry.edge.config");
|
||||
}
|
||||
}
|
||||
|
||||
export const onRequestError = Sentry.captureRequestError;
|
||||
321
rnd/autogpt_builder/src/lib/autogpt-server-api/baseClient.ts
Normal file
321
rnd/autogpt_builder/src/lib/autogpt-server-api/baseClient.ts
Normal file
@@ -0,0 +1,321 @@
|
||||
import { SupabaseClient } from "@supabase/supabase-js";
|
||||
import {
|
||||
Block,
|
||||
Graph,
|
||||
GraphCreatable,
|
||||
GraphUpdateable,
|
||||
GraphMeta,
|
||||
GraphExecuteResponse,
|
||||
NodeExecutionResult,
|
||||
User,
|
||||
AnalyticsMetrics,
|
||||
AnalyticsDetails,
|
||||
} from "./types";
|
||||
|
||||
export default class BaseAutoGPTServerAPI {
|
||||
private baseUrl: string;
|
||||
private wsUrl: string;
|
||||
private webSocket: WebSocket | null = null;
|
||||
private wsConnecting: Promise<void> | null = null;
|
||||
private wsMessageHandlers: Record<string, Set<(data: any) => void>> = {};
|
||||
private supabaseClient: SupabaseClient | null = null;
|
||||
|
||||
constructor(
|
||||
baseUrl: string = process.env.NEXT_PUBLIC_AGPT_SERVER_URL ||
|
||||
"http://localhost:8006/api",
|
||||
wsUrl: string = process.env.NEXT_PUBLIC_AGPT_WS_SERVER_URL ||
|
||||
"ws://localhost:8001/ws",
|
||||
supabaseClient: SupabaseClient | null = null,
|
||||
) {
|
||||
this.baseUrl = baseUrl;
|
||||
this.wsUrl = wsUrl;
|
||||
this.supabaseClient = supabaseClient;
|
||||
}
|
||||
|
||||
async createUser(): Promise<User> {
|
||||
return this._request("POST", "/auth/user", {});
|
||||
}
|
||||
|
||||
async getUserCredit(): Promise<{ credits: number }> {
|
||||
return this._get(`/credits`);
|
||||
}
|
||||
|
||||
async getBlocks(): Promise<Block[]> {
|
||||
return await this._get("/blocks");
|
||||
}
|
||||
|
||||
async listGraphs(): Promise<GraphMeta[]> {
|
||||
return this._get("/graphs");
|
||||
}
|
||||
|
||||
async listTemplates(): Promise<GraphMeta[]> {
|
||||
return this._get("/templates");
|
||||
}
|
||||
|
||||
async getGraph(id: string, version?: number): Promise<Graph> {
|
||||
const query = version !== undefined ? `?version=${version}` : "";
|
||||
return this._get(`/graphs/${id}` + query);
|
||||
}
|
||||
|
||||
async getTemplate(id: string, version?: number): Promise<Graph> {
|
||||
const query = version !== undefined ? `?version=${version}` : "";
|
||||
return this._get(`/templates/${id}` + query);
|
||||
}
|
||||
|
||||
async getGraphAllVersions(id: string): Promise<Graph[]> {
|
||||
return this._get(`/graphs/${id}/versions`);
|
||||
}
|
||||
|
||||
async getTemplateAllVersions(id: string): Promise<Graph[]> {
|
||||
return this._get(`/templates/${id}/versions`);
|
||||
}
|
||||
|
||||
async createGraph(graphCreateBody: GraphCreatable): Promise<Graph>;
|
||||
async createGraph(
|
||||
fromTemplateID: string,
|
||||
templateVersion: number,
|
||||
): Promise<Graph>;
|
||||
async createGraph(
|
||||
graphOrTemplateID: GraphCreatable | string,
|
||||
templateVersion?: number,
|
||||
): Promise<Graph> {
|
||||
let requestBody: GraphCreateRequestBody;
|
||||
|
||||
if (typeof graphOrTemplateID == "string") {
|
||||
if (templateVersion == undefined) {
|
||||
throw new Error("templateVersion not specified");
|
||||
}
|
||||
requestBody = {
|
||||
template_id: graphOrTemplateID,
|
||||
template_version: templateVersion,
|
||||
};
|
||||
} else {
|
||||
requestBody = { graph: graphOrTemplateID };
|
||||
}
|
||||
|
||||
return this._request("POST", "/graphs", requestBody);
|
||||
}
|
||||
|
||||
async createTemplate(templateCreateBody: GraphCreatable): Promise<Graph> {
|
||||
const requestBody: GraphCreateRequestBody = { graph: templateCreateBody };
|
||||
return this._request("POST", "/templates", requestBody);
|
||||
}
|
||||
|
||||
async updateGraph(id: string, graph: GraphUpdateable): Promise<Graph> {
|
||||
return await this._request("PUT", `/graphs/${id}`, graph);
|
||||
}
|
||||
|
||||
async updateTemplate(id: string, template: GraphUpdateable): Promise<Graph> {
|
||||
return await this._request("PUT", `/templates/${id}`, template);
|
||||
}
|
||||
|
||||
async setGraphActiveVersion(id: string, version: number): Promise<Graph> {
|
||||
return this._request("PUT", `/graphs/${id}/versions/active`, {
|
||||
active_graph_version: version,
|
||||
});
|
||||
}
|
||||
|
||||
async executeGraph(
|
||||
id: string,
|
||||
inputData: { [key: string]: any } = {},
|
||||
): Promise<GraphExecuteResponse> {
|
||||
return this._request("POST", `/graphs/${id}/execute`, inputData);
|
||||
}
|
||||
|
||||
async listGraphRunIDs(
|
||||
graphID: string,
|
||||
graphVersion?: number,
|
||||
): Promise<string[]> {
|
||||
const query =
|
||||
graphVersion !== undefined ? `?graph_version=${graphVersion}` : "";
|
||||
return this._get(`/graphs/${graphID}/executions` + query);
|
||||
}
|
||||
|
||||
async getGraphExecutionInfo(
|
||||
graphID: string,
|
||||
runID: string,
|
||||
): Promise<NodeExecutionResult[]> {
|
||||
return (await this._get(`/graphs/${graphID}/executions/${runID}`)).map(
|
||||
parseNodeExecutionResultTimestamps,
|
||||
);
|
||||
}
|
||||
|
||||
async stopGraphExecution(
|
||||
graphID: string,
|
||||
runID: string,
|
||||
): Promise<NodeExecutionResult[]> {
|
||||
return (
|
||||
await this._request("POST", `/graphs/${graphID}/executions/${runID}/stop`)
|
||||
).map(parseNodeExecutionResultTimestamps);
|
||||
}
|
||||
|
||||
async logMetric(metric: AnalyticsMetrics) {
|
||||
return this._request("POST", "/analytics/log_raw_metric", metric);
|
||||
}
|
||||
|
||||
async logAnalytic(analytic: AnalyticsDetails) {
|
||||
return this._request("POST", "/analytics/log_raw_analytics", analytic);
|
||||
}
|
||||
|
||||
private async _get(path: string) {
|
||||
return this._request("GET", path);
|
||||
}
|
||||
|
||||
private async _request(
|
||||
method: "GET" | "POST" | "PUT" | "PATCH",
|
||||
path: string,
|
||||
payload?: { [key: string]: any },
|
||||
) {
|
||||
if (method != "GET") {
|
||||
console.debug(`${method} ${path} payload:`, payload);
|
||||
}
|
||||
|
||||
const token =
|
||||
(await this.supabaseClient?.auth.getSession())?.data.session
|
||||
?.access_token || "";
|
||||
|
||||
const response = await fetch(this.baseUrl + path, {
|
||||
method,
|
||||
headers:
|
||||
method != "GET"
|
||||
? {
|
||||
"Content-Type": "application/json",
|
||||
Authorization: token ? `Bearer ${token}` : "",
|
||||
}
|
||||
: {
|
||||
Authorization: token ? `Bearer ${token}` : "",
|
||||
},
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
const response_data = await response.json();
|
||||
|
||||
if (!response.ok) {
|
||||
console.warn(
|
||||
`${method} ${path} returned non-OK response:`,
|
||||
response_data.detail,
|
||||
response,
|
||||
);
|
||||
throw new Error(`HTTP error ${response.status}! ${response_data.detail}`);
|
||||
}
|
||||
return response_data;
|
||||
}
|
||||
|
||||
async connectWebSocket(): Promise<void> {
|
||||
this.wsConnecting ??= new Promise(async (resolve, reject) => {
|
||||
try {
|
||||
const token =
|
||||
(await this.supabaseClient?.auth.getSession())?.data.session
|
||||
?.access_token || "";
|
||||
|
||||
const wsUrlWithToken = `${this.wsUrl}?token=${token}`;
|
||||
this.webSocket = new WebSocket(wsUrlWithToken);
|
||||
|
||||
this.webSocket.onopen = () => {
|
||||
console.debug("WebSocket connection established");
|
||||
resolve();
|
||||
};
|
||||
|
||||
this.webSocket.onclose = (event) => {
|
||||
console.debug("WebSocket connection closed", event);
|
||||
this.webSocket = null;
|
||||
};
|
||||
|
||||
this.webSocket.onerror = (error) => {
|
||||
console.error("WebSocket error:", error);
|
||||
reject(error);
|
||||
};
|
||||
|
||||
this.webSocket.onmessage = (event) => {
|
||||
const message: WebsocketMessage = JSON.parse(event.data);
|
||||
if (message.method == "execution_event") {
|
||||
message.data = parseNodeExecutionResultTimestamps(message.data);
|
||||
}
|
||||
this.wsMessageHandlers[message.method]?.forEach((handler) =>
|
||||
handler(message.data),
|
||||
);
|
||||
};
|
||||
} catch (error) {
|
||||
console.error("Error connecting to WebSocket:", error);
|
||||
reject(error);
|
||||
}
|
||||
});
|
||||
return this.wsConnecting;
|
||||
}
|
||||
|
||||
disconnectWebSocket() {
|
||||
if (this.webSocket && this.webSocket.readyState === WebSocket.OPEN) {
|
||||
this.webSocket.close();
|
||||
}
|
||||
}
|
||||
|
||||
sendWebSocketMessage<M extends keyof WebsocketMessageTypeMap>(
|
||||
method: M,
|
||||
data: WebsocketMessageTypeMap[M],
|
||||
callCount = 0,
|
||||
) {
|
||||
if (this.webSocket && this.webSocket.readyState === WebSocket.OPEN) {
|
||||
this.webSocket.send(JSON.stringify({ method, data }));
|
||||
} else {
|
||||
this.connectWebSocket().then(() => {
|
||||
callCount == 0
|
||||
? this.sendWebSocketMessage(method, data, callCount + 1)
|
||||
: setTimeout(
|
||||
() => {
|
||||
this.sendWebSocketMessage(method, data, callCount + 1);
|
||||
},
|
||||
2 ** (callCount - 1) * 1000,
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
onWebSocketMessage<M extends keyof WebsocketMessageTypeMap>(
|
||||
method: M,
|
||||
handler: (data: WebsocketMessageTypeMap[M]) => void,
|
||||
): () => void {
|
||||
this.wsMessageHandlers[method] ??= new Set();
|
||||
this.wsMessageHandlers[method].add(handler);
|
||||
|
||||
// Return detacher
|
||||
return () => this.wsMessageHandlers[method].delete(handler);
|
||||
}
|
||||
|
||||
subscribeToExecution(graphId: string) {
|
||||
this.sendWebSocketMessage("subscribe", { graph_id: graphId });
|
||||
}
|
||||
}
|
||||
|
||||
/* *** UTILITY TYPES *** */
|
||||
|
||||
type GraphCreateRequestBody =
|
||||
| {
|
||||
template_id: string;
|
||||
template_version: number;
|
||||
}
|
||||
| {
|
||||
graph: GraphCreatable;
|
||||
};
|
||||
|
||||
type WebsocketMessageTypeMap = {
|
||||
subscribe: { graph_id: string };
|
||||
execution_event: NodeExecutionResult;
|
||||
};
|
||||
|
||||
type WebsocketMessage = {
|
||||
[M in keyof WebsocketMessageTypeMap]: {
|
||||
method: M;
|
||||
data: WebsocketMessageTypeMap[M];
|
||||
};
|
||||
}[keyof WebsocketMessageTypeMap];
|
||||
|
||||
/* *** HELPER FUNCTIONS *** */
|
||||
|
||||
function parseNodeExecutionResultTimestamps(result: any): NodeExecutionResult {
|
||||
return {
|
||||
...result,
|
||||
add_time: new Date(result.add_time),
|
||||
queue_time: result.queue_time ? new Date(result.queue_time) : undefined,
|
||||
start_time: result.start_time ? new Date(result.start_time) : undefined,
|
||||
end_time: result.end_time ? new Date(result.end_time) : undefined,
|
||||
};
|
||||
}
|
||||
@@ -1,305 +1,14 @@
|
||||
import { createClient } from "../supabase/client";
|
||||
import {
|
||||
Block,
|
||||
Graph,
|
||||
GraphCreatable,
|
||||
GraphUpdateable,
|
||||
GraphMeta,
|
||||
GraphExecuteResponse,
|
||||
NodeExecutionResult,
|
||||
User,
|
||||
} from "./types";
|
||||
|
||||
export default class AutoGPTServerAPI {
|
||||
private baseUrl: string;
|
||||
private wsUrl: string;
|
||||
private webSocket: WebSocket | null = null;
|
||||
private wsConnecting: Promise<void> | null = null;
|
||||
private wsMessageHandlers: Record<string, Set<(data: any) => void>> = {};
|
||||
private supabaseClient = createClient();
|
||||
import BaseAutoGPTServerAPI from "./baseClient";
|
||||
|
||||
export default class AutoGPTServerAPI extends BaseAutoGPTServerAPI {
|
||||
constructor(
|
||||
baseUrl: string = process.env.NEXT_PUBLIC_AGPT_SERVER_URL ||
|
||||
"http://localhost:8000/api",
|
||||
"http://localhost:8006/api",
|
||||
wsUrl: string = process.env.NEXT_PUBLIC_AGPT_WS_SERVER_URL ||
|
||||
"ws://localhost:8001/ws",
|
||||
) {
|
||||
this.baseUrl = baseUrl;
|
||||
this.wsUrl = wsUrl;
|
||||
}
|
||||
|
||||
async createUser(): Promise<User> {
|
||||
return this._request("POST", "/auth/user", {});
|
||||
}
|
||||
|
||||
async getBlocks(): Promise<Block[]> {
|
||||
return await this._get("/blocks");
|
||||
}
|
||||
|
||||
async listGraphs(): Promise<GraphMeta[]> {
|
||||
return this._get("/graphs");
|
||||
}
|
||||
|
||||
async listTemplates(): Promise<GraphMeta[]> {
|
||||
return this._get("/templates");
|
||||
}
|
||||
|
||||
async getGraph(id: string, version?: number): Promise<Graph> {
|
||||
const query = version !== undefined ? `?version=${version}` : "";
|
||||
return this._get(`/graphs/${id}` + query);
|
||||
}
|
||||
|
||||
async getTemplate(id: string, version?: number): Promise<Graph> {
|
||||
const query = version !== undefined ? `?version=${version}` : "";
|
||||
return this._get(`/templates/${id}` + query);
|
||||
}
|
||||
|
||||
async getGraphAllVersions(id: string): Promise<Graph[]> {
|
||||
return this._get(`/graphs/${id}/versions`);
|
||||
}
|
||||
|
||||
async getTemplateAllVersions(id: string): Promise<Graph[]> {
|
||||
return this._get(`/templates/${id}/versions`);
|
||||
}
|
||||
|
||||
async createGraph(graphCreateBody: GraphCreatable): Promise<Graph>;
|
||||
async createGraph(
|
||||
fromTemplateID: string,
|
||||
templateVersion: number,
|
||||
): Promise<Graph>;
|
||||
async createGraph(
|
||||
graphOrTemplateID: GraphCreatable | string,
|
||||
templateVersion?: number,
|
||||
): Promise<Graph> {
|
||||
let requestBody: GraphCreateRequestBody;
|
||||
|
||||
if (typeof graphOrTemplateID == "string") {
|
||||
if (templateVersion == undefined) {
|
||||
throw new Error("templateVersion not specified");
|
||||
}
|
||||
requestBody = {
|
||||
template_id: graphOrTemplateID,
|
||||
template_version: templateVersion,
|
||||
};
|
||||
} else {
|
||||
requestBody = { graph: graphOrTemplateID };
|
||||
}
|
||||
|
||||
return this._request("POST", "/graphs", requestBody);
|
||||
}
|
||||
|
||||
async createTemplate(templateCreateBody: GraphCreatable): Promise<Graph> {
|
||||
const requestBody: GraphCreateRequestBody = { graph: templateCreateBody };
|
||||
return this._request("POST", "/templates", requestBody);
|
||||
}
|
||||
|
||||
async updateGraph(id: string, graph: GraphUpdateable): Promise<Graph> {
|
||||
return await this._request("PUT", `/graphs/${id}`, graph);
|
||||
}
|
||||
|
||||
async updateTemplate(id: string, template: GraphUpdateable): Promise<Graph> {
|
||||
return await this._request("PUT", `/templates/${id}`, template);
|
||||
}
|
||||
|
||||
async setGraphActiveVersion(id: string, version: number): Promise<Graph> {
|
||||
return this._request("PUT", `/graphs/${id}/versions/active`, {
|
||||
active_graph_version: version,
|
||||
});
|
||||
}
|
||||
|
||||
async executeGraph(
|
||||
id: string,
|
||||
inputData: { [key: string]: any } = {},
|
||||
): Promise<GraphExecuteResponse> {
|
||||
return this._request("POST", `/graphs/${id}/execute`, inputData);
|
||||
}
|
||||
|
||||
async listGraphRunIDs(
|
||||
graphID: string,
|
||||
graphVersion?: number,
|
||||
): Promise<string[]> {
|
||||
const query =
|
||||
graphVersion !== undefined ? `?graph_version=${graphVersion}` : "";
|
||||
return this._get(`/graphs/${graphID}/executions` + query);
|
||||
}
|
||||
|
||||
async getGraphExecutionInfo(
|
||||
graphID: string,
|
||||
runID: string,
|
||||
): Promise<NodeExecutionResult[]> {
|
||||
return (await this._get(`/graphs/${graphID}/executions/${runID}`)).map(
|
||||
parseNodeExecutionResultTimestamps,
|
||||
);
|
||||
}
|
||||
|
||||
async stopGraphExecution(
|
||||
graphID: string,
|
||||
runID: string,
|
||||
): Promise<NodeExecutionResult[]> {
|
||||
return (
|
||||
await this._request("POST", `/graphs/${graphID}/executions/${runID}/stop`)
|
||||
).map(parseNodeExecutionResultTimestamps);
|
||||
}
|
||||
|
||||
private async _get(path: string) {
|
||||
return this._request("GET", path);
|
||||
}
|
||||
|
||||
private async _request(
|
||||
method: "GET" | "POST" | "PUT" | "PATCH",
|
||||
path: string,
|
||||
payload?: { [key: string]: any },
|
||||
) {
|
||||
if (method != "GET") {
|
||||
console.debug(`${method} ${path} payload:`, payload);
|
||||
}
|
||||
|
||||
const token =
|
||||
(await this.supabaseClient?.auth.getSession())?.data.session
|
||||
?.access_token || "";
|
||||
|
||||
const response = await fetch(this.baseUrl + path, {
|
||||
method,
|
||||
headers:
|
||||
method != "GET"
|
||||
? {
|
||||
"Content-Type": "application/json",
|
||||
Authorization: token ? `Bearer ${token}` : "",
|
||||
}
|
||||
: {
|
||||
Authorization: token ? `Bearer ${token}` : "",
|
||||
},
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
const response_data = await response.json();
|
||||
|
||||
if (!response.ok) {
|
||||
console.warn(
|
||||
`${method} ${path} returned non-OK response:`,
|
||||
response_data.detail,
|
||||
response,
|
||||
);
|
||||
throw new Error(`HTTP error ${response.status}! ${response_data.detail}`);
|
||||
}
|
||||
return response_data;
|
||||
}
|
||||
|
||||
async connectWebSocket(): Promise<void> {
|
||||
this.wsConnecting ??= new Promise(async (resolve, reject) => {
|
||||
try {
|
||||
const token =
|
||||
(await this.supabaseClient?.auth.getSession())?.data.session
|
||||
?.access_token || "";
|
||||
|
||||
const wsUrlWithToken = `${this.wsUrl}?token=${token}`;
|
||||
this.webSocket = new WebSocket(wsUrlWithToken);
|
||||
|
||||
this.webSocket.onopen = () => {
|
||||
console.debug("WebSocket connection established");
|
||||
resolve();
|
||||
};
|
||||
|
||||
this.webSocket.onclose = (event) => {
|
||||
console.debug("WebSocket connection closed", event);
|
||||
this.webSocket = null;
|
||||
};
|
||||
|
||||
this.webSocket.onerror = (error) => {
|
||||
console.error("WebSocket error:", error);
|
||||
reject(error);
|
||||
};
|
||||
|
||||
this.webSocket.onmessage = (event) => {
|
||||
const message: WebsocketMessage = JSON.parse(event.data);
|
||||
if (message.method == "execution_event") {
|
||||
message.data = parseNodeExecutionResultTimestamps(message.data);
|
||||
}
|
||||
this.wsMessageHandlers[message.method]?.forEach((handler) =>
|
||||
handler(message.data),
|
||||
);
|
||||
};
|
||||
} catch (error) {
|
||||
console.error("Error connecting to WebSocket:", error);
|
||||
reject(error);
|
||||
}
|
||||
});
|
||||
return this.wsConnecting;
|
||||
}
|
||||
|
||||
disconnectWebSocket() {
|
||||
if (this.webSocket && this.webSocket.readyState === WebSocket.OPEN) {
|
||||
this.webSocket.close();
|
||||
}
|
||||
}
|
||||
|
||||
sendWebSocketMessage<M extends keyof WebsocketMessageTypeMap>(
|
||||
method: M,
|
||||
data: WebsocketMessageTypeMap[M],
|
||||
callCount = 0,
|
||||
) {
|
||||
if (this.webSocket && this.webSocket.readyState === WebSocket.OPEN) {
|
||||
this.webSocket.send(JSON.stringify({ method, data }));
|
||||
} else {
|
||||
this.connectWebSocket().then(() => {
|
||||
callCount == 0
|
||||
? this.sendWebSocketMessage(method, data, callCount + 1)
|
||||
: setTimeout(
|
||||
() => {
|
||||
this.sendWebSocketMessage(method, data, callCount + 1);
|
||||
},
|
||||
2 ** (callCount - 1) * 1000,
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
onWebSocketMessage<M extends keyof WebsocketMessageTypeMap>(
|
||||
method: M,
|
||||
handler: (data: WebsocketMessageTypeMap[M]) => void,
|
||||
): () => void {
|
||||
this.wsMessageHandlers[method] ??= new Set();
|
||||
this.wsMessageHandlers[method].add(handler);
|
||||
|
||||
// Return detacher
|
||||
return () => this.wsMessageHandlers[method].delete(handler);
|
||||
}
|
||||
|
||||
subscribeToExecution(graphId: string) {
|
||||
this.sendWebSocketMessage("subscribe", { graph_id: graphId });
|
||||
const supabaseClient = createClient();
|
||||
super(baseUrl, wsUrl, supabaseClient);
|
||||
}
|
||||
}
|
||||
|
||||
/* *** UTILITY TYPES *** */
|
||||
|
||||
type GraphCreateRequestBody =
|
||||
| {
|
||||
template_id: string;
|
||||
template_version: number;
|
||||
}
|
||||
| {
|
||||
graph: GraphCreatable;
|
||||
};
|
||||
|
||||
type WebsocketMessageTypeMap = {
|
||||
subscribe: { graph_id: string };
|
||||
execution_event: NodeExecutionResult;
|
||||
};
|
||||
|
||||
type WebsocketMessage = {
|
||||
[M in keyof WebsocketMessageTypeMap]: {
|
||||
method: M;
|
||||
data: WebsocketMessageTypeMap[M];
|
||||
};
|
||||
}[keyof WebsocketMessageTypeMap];
|
||||
|
||||
/* *** HELPER FUNCTIONS *** */
|
||||
|
||||
function parseNodeExecutionResultTimestamps(result: any): NodeExecutionResult {
|
||||
return {
|
||||
...result,
|
||||
add_time: new Date(result.add_time),
|
||||
queue_time: result.queue_time ? new Date(result.queue_time) : undefined,
|
||||
start_time: result.start_time ? new Date(result.start_time) : undefined,
|
||||
end_time: result.end_time ? new Date(result.end_time) : undefined,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -0,0 +1,14 @@
|
||||
import { createServerClient } from "../supabase/server";
|
||||
import BaseAutoGPTServerAPI from "./baseClient";
|
||||
|
||||
export default class AutoGPTServerAPIServerSide extends BaseAutoGPTServerAPI {
|
||||
constructor(
|
||||
baseUrl: string = process.env.NEXT_PUBLIC_AGPT_SERVER_URL ||
|
||||
"http://localhost:8006/api",
|
||||
wsUrl: string = process.env.NEXT_PUBLIC_AGPT_WS_SERVER_URL ||
|
||||
"ws://localhost:8001/ws",
|
||||
) {
|
||||
const supabaseClient = createServerClient();
|
||||
super(baseUrl, wsUrl, supabaseClient);
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,18 @@ export type Category = {
|
||||
description: string;
|
||||
};
|
||||
|
||||
export enum BlockCostType {
|
||||
RUN = "run",
|
||||
BYTE = "byte",
|
||||
SECOND = "second",
|
||||
}
|
||||
|
||||
export type BlockCost = {
|
||||
cost_amount: number;
|
||||
cost_type: BlockCostType;
|
||||
cost_filter: { [key: string]: any };
|
||||
};
|
||||
|
||||
export type Block = {
|
||||
id: string;
|
||||
name: string;
|
||||
@@ -13,6 +25,8 @@ export type Block = {
|
||||
inputSchema: BlockIORootSchema;
|
||||
outputSchema: BlockIORootSchema;
|
||||
staticOutput: boolean;
|
||||
uiType: BlockUIType;
|
||||
costs: BlockCost[];
|
||||
};
|
||||
|
||||
export type BlockIORootSchema = {
|
||||
@@ -182,3 +196,22 @@ export type User = {
|
||||
id: string;
|
||||
email: string;
|
||||
};
|
||||
|
||||
export enum BlockUIType {
|
||||
STANDARD = "Standard",
|
||||
INPUT = "Input",
|
||||
OUTPUT = "Output",
|
||||
NOTE = "Note",
|
||||
}
|
||||
|
||||
export type AnalyticsMetrics = {
|
||||
metric_name: string;
|
||||
metric_value: number;
|
||||
data_string: string;
|
||||
};
|
||||
|
||||
export type AnalyticsDetails = {
|
||||
type: string;
|
||||
data: { [key: string]: any };
|
||||
index: string;
|
||||
};
|
||||
|
||||
@@ -17,7 +17,7 @@ export default class MarketplaceAPI {
|
||||
|
||||
constructor(
|
||||
baseUrl: string = process.env.NEXT_PUBLIC_AGPT_MARKETPLACE_URL ||
|
||||
"http://localhost:8001/api/v1/market",
|
||||
"http://localhost:8015/api/v1/market",
|
||||
) {
|
||||
this.baseUrl = baseUrl;
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ export function createClient() {
|
||||
process.env.NEXT_PUBLIC_SUPABASE_ANON_KEY!,
|
||||
);
|
||||
} catch (error) {
|
||||
console.error("error creating client", error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,15 +24,16 @@ export function deepEquals(x: any, y: any): boolean {
|
||||
const ok = Object.keys,
|
||||
tx = typeof x,
|
||||
ty = typeof y;
|
||||
return (
|
||||
|
||||
const res =
|
||||
x &&
|
||||
y &&
|
||||
tx === ty &&
|
||||
(tx === "object"
|
||||
? ok(x).length === ok(y).length &&
|
||||
ok(x).every((key) => deepEquals(x[key], y[key]))
|
||||
: x === y)
|
||||
);
|
||||
: x === y);
|
||||
return res;
|
||||
}
|
||||
|
||||
/** Get tailwind text color class from type name */
|
||||
@@ -184,7 +185,7 @@ export const categoryColorMap: Record<string, string> = {
|
||||
SEARCH: "bg-blue-300/[.7]",
|
||||
BASIC: "bg-purple-300/[.7]",
|
||||
INPUT: "bg-cyan-300/[.7]",
|
||||
OUTPUT: "bg-brown-300/[.7]",
|
||||
OUTPUT: "bg-red-300/[.7]",
|
||||
LOGIC: "bg-teal-300/[.7]",
|
||||
};
|
||||
|
||||
@@ -194,3 +195,10 @@ export function getPrimaryCategoryColor(categories: Category[]): string {
|
||||
}
|
||||
return categoryColorMap[categories[0].category] || "bg-gray-300/[.7]";
|
||||
}
|
||||
|
||||
export function filterBlocksByType<T>(
|
||||
blocks: T[],
|
||||
predicate: (block: T) => boolean,
|
||||
): T[] {
|
||||
return blocks.filter(predicate);
|
||||
}
|
||||
|
||||
@@ -1,16 +1,23 @@
|
||||
import { redirect } from "next/navigation";
|
||||
import getServerUser from "@/hooks/getServerUser";
|
||||
import React from "react";
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
|
||||
export async function withRoleAccess(allowedRoles: string[]) {
|
||||
"use server";
|
||||
return async function <T extends React.ComponentType<any>>(Component: T) {
|
||||
const { user, role, error } = await getServerUser();
|
||||
return await Sentry.withServerActionInstrumentation(
|
||||
"withRoleAccess",
|
||||
{},
|
||||
async () => {
|
||||
return async function <T extends React.ComponentType<any>>(Component: T) {
|
||||
const { user, role, error } = await getServerUser();
|
||||
|
||||
if (error || !user || !role || !allowedRoles.includes(role)) {
|
||||
redirect("/unauthorized");
|
||||
}
|
||||
if (error || !user || !role || !allowedRoles.includes(role)) {
|
||||
redirect("/unauthorized");
|
||||
}
|
||||
|
||||
return Component;
|
||||
};
|
||||
return Component;
|
||||
};
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
import pytest
|
||||
|
||||
from .depends import verify_user, requires_admin_user, requires_user
|
||||
from .depends import requires_admin_user, requires_user, verify_user
|
||||
|
||||
|
||||
def test_verify_user_no_payload():
|
||||
|
||||
9
rnd/autogpt_libs/autogpt_libs/logging/__init__.py
Normal file
9
rnd/autogpt_libs/autogpt_libs/logging/__init__.py
Normal file
@@ -0,0 +1,9 @@
|
||||
from .config import configure_logging
|
||||
from .filters import BelowLevelFilter
|
||||
from .formatters import FancyConsoleFormatter
|
||||
|
||||
__all__ = [
|
||||
"configure_logging",
|
||||
"BelowLevelFilter",
|
||||
"FancyConsoleFormatter",
|
||||
]
|
||||
166
rnd/autogpt_libs/autogpt_libs/logging/config.py
Normal file
166
rnd/autogpt_libs/autogpt_libs/logging/config.py
Normal file
@@ -0,0 +1,166 @@
|
||||
"""Logging module for Auto-GPT."""
|
||||
|
||||
import logging
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import Field, field_validator
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
from .filters import BelowLevelFilter
|
||||
from .formatters import AGPTFormatter, StructuredLoggingFormatter
|
||||
|
||||
LOG_DIR = Path(__file__).parent.parent.parent.parent / "logs"
|
||||
LOG_FILE = "activity.log"
|
||||
DEBUG_LOG_FILE = "debug.log"
|
||||
ERROR_LOG_FILE = "error.log"
|
||||
|
||||
SIMPLE_LOG_FORMAT = "%(asctime)s %(levelname)s %(title)s%(message)s"
|
||||
|
||||
DEBUG_LOG_FORMAT = (
|
||||
"%(asctime)s %(levelname)s %(filename)s:%(lineno)d" " %(title)s%(message)s"
|
||||
)
|
||||
|
||||
|
||||
class LoggingConfig(BaseSettings):
|
||||
|
||||
level: str = Field(
|
||||
default="INFO",
|
||||
description="Logging level",
|
||||
validation_alias="LOG_LEVEL",
|
||||
)
|
||||
|
||||
enable_cloud_logging: bool = Field(
|
||||
default=False,
|
||||
description="Enable logging to Google Cloud Logging",
|
||||
)
|
||||
|
||||
enable_file_logging: bool = Field(
|
||||
default=False,
|
||||
description="Enable logging to file",
|
||||
)
|
||||
# File output
|
||||
log_dir: Path = Field(
|
||||
default=LOG_DIR,
|
||||
description="Log directory",
|
||||
)
|
||||
|
||||
model_config = SettingsConfigDict(
|
||||
env_prefix="",
|
||||
env_file=".env",
|
||||
env_file_encoding="utf-8",
|
||||
extra="ignore",
|
||||
)
|
||||
|
||||
@field_validator("level", mode="before")
|
||||
@classmethod
|
||||
def parse_log_level(cls, v):
|
||||
if isinstance(v, str):
|
||||
v = v.upper()
|
||||
if v not in ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]:
|
||||
raise ValueError(f"Invalid log level: {v}")
|
||||
return v
|
||||
return v
|
||||
|
||||
|
||||
def configure_logging(force_cloud_logging: bool = False) -> None:
|
||||
"""Configure the native logging module based on the LoggingConfig settings.
|
||||
|
||||
This function sets up logging handlers and formatters according to the
|
||||
configuration specified in the LoggingConfig object. It supports various
|
||||
logging outputs including console, file, cloud, and JSON logging.
|
||||
|
||||
The function uses the LoggingConfig object to determine which logging
|
||||
features to enable and how to configure them. This includes setting
|
||||
log levels, log formats, and output destinations.
|
||||
|
||||
No arguments are required as the function creates its own LoggingConfig
|
||||
instance internally.
|
||||
|
||||
Note: This function is typically called at the start of the application
|
||||
to set up the logging infrastructure.
|
||||
"""
|
||||
|
||||
config = LoggingConfig()
|
||||
|
||||
log_handlers: list[logging.Handler] = []
|
||||
|
||||
# Cloud logging setup
|
||||
if config.enable_cloud_logging or force_cloud_logging:
|
||||
import google.cloud.logging
|
||||
from google.cloud.logging.handlers import CloudLoggingHandler
|
||||
from google.cloud.logging_v2.handlers.transports.sync import SyncTransport
|
||||
|
||||
client = google.cloud.logging.Client()
|
||||
cloud_handler = CloudLoggingHandler(
|
||||
client,
|
||||
name="autogpt_logs",
|
||||
transport=SyncTransport,
|
||||
)
|
||||
cloud_handler.setLevel(config.level)
|
||||
cloud_handler.setFormatter(StructuredLoggingFormatter())
|
||||
log_handlers.append(cloud_handler)
|
||||
print("Cloud logging enabled")
|
||||
else:
|
||||
# Console output handlers
|
||||
stdout = logging.StreamHandler(stream=sys.stdout)
|
||||
stdout.setLevel(config.level)
|
||||
stdout.addFilter(BelowLevelFilter(logging.WARNING))
|
||||
if config.level == logging.DEBUG:
|
||||
stdout.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT))
|
||||
else:
|
||||
stdout.setFormatter(AGPTFormatter(SIMPLE_LOG_FORMAT))
|
||||
|
||||
stderr = logging.StreamHandler()
|
||||
stderr.setLevel(logging.WARNING)
|
||||
if config.level == logging.DEBUG:
|
||||
stderr.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT))
|
||||
else:
|
||||
stderr.setFormatter(AGPTFormatter(SIMPLE_LOG_FORMAT))
|
||||
|
||||
log_handlers += [stdout, stderr]
|
||||
print("Console logging enabled")
|
||||
|
||||
# File logging setup
|
||||
if config.enable_file_logging:
|
||||
# create log directory if it doesn't exist
|
||||
if not config.log_dir.exists():
|
||||
config.log_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
print(f"Log directory: {config.log_dir}")
|
||||
|
||||
# Activity log handler (INFO and above)
|
||||
activity_log_handler = logging.FileHandler(
|
||||
config.log_dir / LOG_FILE, "a", "utf-8"
|
||||
)
|
||||
activity_log_handler.setLevel(config.level)
|
||||
activity_log_handler.setFormatter(
|
||||
AGPTFormatter(SIMPLE_LOG_FORMAT, no_color=True)
|
||||
)
|
||||
log_handlers.append(activity_log_handler)
|
||||
|
||||
if config.level == logging.DEBUG:
|
||||
# Debug log handler (all levels)
|
||||
debug_log_handler = logging.FileHandler(
|
||||
config.log_dir / DEBUG_LOG_FILE, "a", "utf-8"
|
||||
)
|
||||
debug_log_handler.setLevel(logging.DEBUG)
|
||||
debug_log_handler.setFormatter(
|
||||
AGPTFormatter(DEBUG_LOG_FORMAT, no_color=True)
|
||||
)
|
||||
log_handlers.append(debug_log_handler)
|
||||
|
||||
# Error log handler (ERROR and above)
|
||||
error_log_handler = logging.FileHandler(
|
||||
config.log_dir / ERROR_LOG_FILE, "a", "utf-8"
|
||||
)
|
||||
error_log_handler.setLevel(logging.ERROR)
|
||||
error_log_handler.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT, no_color=True))
|
||||
log_handlers.append(error_log_handler)
|
||||
print("File logging enabled")
|
||||
|
||||
# Configure the root logger
|
||||
logging.basicConfig(
|
||||
format=DEBUG_LOG_FORMAT if config.level == logging.DEBUG else SIMPLE_LOG_FORMAT,
|
||||
level=config.level,
|
||||
handlers=log_handlers,
|
||||
)
|
||||
12
rnd/autogpt_libs/autogpt_libs/logging/filters.py
Normal file
12
rnd/autogpt_libs/autogpt_libs/logging/filters.py
Normal file
@@ -0,0 +1,12 @@
|
||||
import logging
|
||||
|
||||
|
||||
class BelowLevelFilter(logging.Filter):
|
||||
"""Filter for logging levels below a certain threshold."""
|
||||
|
||||
def __init__(self, below_level: int):
|
||||
super().__init__()
|
||||
self.below_level = below_level
|
||||
|
||||
def filter(self, record: logging.LogRecord):
|
||||
return record.levelno < self.below_level
|
||||
95
rnd/autogpt_libs/autogpt_libs/logging/formatters.py
Normal file
95
rnd/autogpt_libs/autogpt_libs/logging/formatters.py
Normal file
@@ -0,0 +1,95 @@
|
||||
import logging
|
||||
|
||||
from colorama import Fore, Style
|
||||
from google.cloud.logging_v2.handlers import CloudLoggingFilter, StructuredLogHandler
|
||||
|
||||
from .utils import remove_color_codes
|
||||
|
||||
|
||||
class FancyConsoleFormatter(logging.Formatter):
|
||||
"""
|
||||
A custom logging formatter designed for console output.
|
||||
|
||||
This formatter enhances the standard logging output with color coding. The color
|
||||
coding is based on the level of the log message, making it easier to distinguish
|
||||
between different types of messages in the console output.
|
||||
|
||||
The color for each level is defined in the LEVEL_COLOR_MAP class attribute.
|
||||
"""
|
||||
|
||||
# level -> (level & text color, title color)
|
||||
LEVEL_COLOR_MAP = {
|
||||
logging.DEBUG: Fore.LIGHTBLACK_EX,
|
||||
logging.INFO: Fore.BLUE,
|
||||
logging.WARNING: Fore.YELLOW,
|
||||
logging.ERROR: Fore.RED,
|
||||
logging.CRITICAL: Fore.RED + Style.BRIGHT,
|
||||
}
|
||||
|
||||
def format(self, record: logging.LogRecord) -> str:
|
||||
# Make sure `msg` is a string
|
||||
if not hasattr(record, "msg"):
|
||||
record.msg = ""
|
||||
elif type(record.msg) is not str:
|
||||
record.msg = str(record.msg)
|
||||
|
||||
# Determine default color based on error level
|
||||
level_color = ""
|
||||
if record.levelno in self.LEVEL_COLOR_MAP:
|
||||
level_color = self.LEVEL_COLOR_MAP[record.levelno]
|
||||
record.levelname = f"{level_color}{record.levelname}{Style.RESET_ALL}"
|
||||
|
||||
# Determine color for message
|
||||
color = getattr(record, "color", level_color)
|
||||
color_is_specified = hasattr(record, "color")
|
||||
|
||||
# Don't color INFO messages unless the color is explicitly specified.
|
||||
if color and (record.levelno != logging.INFO or color_is_specified):
|
||||
record.msg = f"{color}{record.msg}{Style.RESET_ALL}"
|
||||
|
||||
return super().format(record)
|
||||
|
||||
|
||||
class AGPTFormatter(FancyConsoleFormatter):
|
||||
def __init__(self, *args, no_color: bool = False, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.no_color = no_color
|
||||
|
||||
def format(self, record: logging.LogRecord) -> str:
|
||||
# Make sure `msg` is a string
|
||||
if not hasattr(record, "msg"):
|
||||
record.msg = ""
|
||||
elif type(record.msg) is not str:
|
||||
record.msg = str(record.msg)
|
||||
|
||||
# Strip color from the message to prevent color spoofing
|
||||
if record.msg and not getattr(record, "preserve_color", False):
|
||||
record.msg = remove_color_codes(record.msg)
|
||||
|
||||
# Determine color for title
|
||||
title = getattr(record, "title", "")
|
||||
title_color = getattr(record, "title_color", "") or self.LEVEL_COLOR_MAP.get(
|
||||
record.levelno, ""
|
||||
)
|
||||
if title and title_color:
|
||||
title = f"{title_color + Style.BRIGHT}{title}{Style.RESET_ALL}"
|
||||
# Make sure record.title is set, and padded with a space if not empty
|
||||
record.title = f"{title} " if title else ""
|
||||
|
||||
if self.no_color:
|
||||
return remove_color_codes(super().format(record))
|
||||
else:
|
||||
return super().format(record)
|
||||
|
||||
|
||||
class StructuredLoggingFormatter(StructuredLogHandler, logging.Formatter):
|
||||
def __init__(self):
|
||||
# Set up CloudLoggingFilter to add diagnostic info to the log records
|
||||
self.cloud_logging_filter = CloudLoggingFilter()
|
||||
|
||||
# Init StructuredLogHandler
|
||||
super().__init__()
|
||||
|
||||
def format(self, record: logging.LogRecord) -> str:
|
||||
self.cloud_logging_filter.filter(record)
|
||||
return super().format(record)
|
||||
14
rnd/autogpt_libs/autogpt_libs/logging/handlers.py
Normal file
14
rnd/autogpt_libs/autogpt_libs/logging/handlers.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
||||
|
||||
class JsonFileHandler(logging.FileHandler):
|
||||
def format(self, record: logging.LogRecord) -> str:
|
||||
record.json_data = json.loads(record.getMessage())
|
||||
return json.dumps(getattr(record, "json_data"), ensure_ascii=False, indent=4)
|
||||
|
||||
def emit(self, record: logging.LogRecord) -> None:
|
||||
with open(self.baseFilename, "w", encoding="utf-8") as f:
|
||||
f.write(self.format(record))
|
||||
36
rnd/autogpt_libs/autogpt_libs/logging/test_utils.py
Normal file
36
rnd/autogpt_libs/autogpt_libs/logging/test_utils.py
Normal file
@@ -0,0 +1,36 @@
|
||||
import pytest
|
||||
|
||||
from .utils import remove_color_codes
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"raw_text, clean_text",
|
||||
[
|
||||
(
|
||||
"COMMAND = \x1b[36mbrowse_website\x1b[0m "
|
||||
"ARGUMENTS = \x1b[36m{'url': 'https://www.google.com',"
|
||||
" 'question': 'What is the capital of France?'}\x1b[0m",
|
||||
"COMMAND = browse_website "
|
||||
"ARGUMENTS = {'url': 'https://www.google.com',"
|
||||
" 'question': 'What is the capital of France?'}",
|
||||
),
|
||||
(
|
||||
"{'Schaue dir meine Projekte auf github () an, als auch meine Webseiten': "
|
||||
"'https://github.com/Significant-Gravitas/AutoGPT,"
|
||||
" https://discord.gg/autogpt und https://twitter.com/Auto_GPT'}",
|
||||
"{'Schaue dir meine Projekte auf github () an, als auch meine Webseiten': "
|
||||
"'https://github.com/Significant-Gravitas/AutoGPT,"
|
||||
" https://discord.gg/autogpt und https://twitter.com/Auto_GPT'}",
|
||||
),
|
||||
("", ""),
|
||||
("hello", "hello"),
|
||||
("hello\x1B[31m world", "hello world"),
|
||||
("\x1B[36mHello,\x1B[32m World!", "Hello, World!"),
|
||||
(
|
||||
"\x1B[1m\x1B[31mError:\x1B[0m\x1B[31m file not found",
|
||||
"Error: file not found",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_remove_color_codes(raw_text, clean_text):
|
||||
assert remove_color_codes(raw_text) == clean_text
|
||||
27
rnd/autogpt_libs/autogpt_libs/logging/utils.py
Normal file
27
rnd/autogpt_libs/autogpt_libs/logging/utils.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import logging
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
from colorama import Fore
|
||||
|
||||
|
||||
def remove_color_codes(s: str) -> str:
|
||||
return re.sub(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])", "", s)
|
||||
|
||||
|
||||
def fmt_kwargs(kwargs: dict) -> str:
|
||||
return ", ".join(f"{n}={repr(v)}" for n, v in kwargs.items())
|
||||
|
||||
|
||||
def print_attribute(
|
||||
title: str, value: Any, title_color: str = Fore.GREEN, value_color: str = ""
|
||||
) -> None:
|
||||
logger = logging.getLogger()
|
||||
logger.info(
|
||||
str(value),
|
||||
extra={
|
||||
"title": f"{title.rstrip(':')}:",
|
||||
"title_color": title_color,
|
||||
"color": value_color,
|
||||
},
|
||||
)
|
||||
@@ -1,13 +1,21 @@
|
||||
import secrets
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import cast
|
||||
|
||||
from supabase import Client, create_client
|
||||
from supabase import Client
|
||||
|
||||
from .types import Credentials, OAuth2Credentials, UserMetadata, UserMetadataRaw
|
||||
from .types import (
|
||||
Credentials,
|
||||
OAuth2Credentials,
|
||||
OAuthState,
|
||||
UserMetadata,
|
||||
UserMetadataRaw,
|
||||
)
|
||||
|
||||
|
||||
class SupabaseIntegrationCredentialsStore:
|
||||
def __init__(self, url: str, key: str):
|
||||
self.supabase: Client = create_client(url, key)
|
||||
def __init__(self, supabase: Client):
|
||||
self.supabase = supabase
|
||||
|
||||
def add_creds(self, user_id: str, credentials: Credentials) -> None:
|
||||
if self.get_creds_by_id(user_id, credentials.id):
|
||||
@@ -73,6 +81,52 @@ class SupabaseIntegrationCredentialsStore:
|
||||
]
|
||||
self._set_user_integration_creds(user_id, filtered_credentials)
|
||||
|
||||
async def store_state_token(self, user_id: str, provider: str) -> str:
|
||||
token = secrets.token_urlsafe(32)
|
||||
expires_at = datetime.now(timezone.utc) + timedelta(minutes=10)
|
||||
|
||||
state = OAuthState(
|
||||
token=token, provider=provider, expires_at=int(expires_at.timestamp())
|
||||
)
|
||||
|
||||
user_metadata = self._get_user_metadata(user_id)
|
||||
oauth_states = user_metadata.get("integration_oauth_states", [])
|
||||
oauth_states.append(state.model_dump())
|
||||
user_metadata["integration_oauth_states"] = oauth_states
|
||||
|
||||
self.supabase.auth.admin.update_user_by_id(
|
||||
user_id, {"user_metadata": user_metadata}
|
||||
)
|
||||
|
||||
return token
|
||||
|
||||
async def verify_state_token(self, user_id: str, token: str, provider: str) -> bool:
|
||||
user_metadata = self._get_user_metadata(user_id)
|
||||
oauth_states = user_metadata.get("integration_oauth_states", [])
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
valid_state = next(
|
||||
(
|
||||
state
|
||||
for state in oauth_states
|
||||
if state["token"] == token
|
||||
and state["provider"] == provider
|
||||
and state["expires_at"] > now.timestamp()
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
if valid_state:
|
||||
# Remove the used state
|
||||
oauth_states.remove(valid_state)
|
||||
user_metadata["integration_oauth_states"] = oauth_states
|
||||
self.supabase.auth.admin.update_user_by_id(
|
||||
user_id, {"user_metadata": user_metadata}
|
||||
)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _set_user_integration_creds(
|
||||
self, user_id: str, credentials: list[Credentials]
|
||||
) -> None:
|
||||
|
||||
@@ -7,7 +7,7 @@ from pydantic import BaseModel, Field, SecretStr, field_serializer
|
||||
class _BaseCredentials(BaseModel):
|
||||
id: str = Field(default_factory=lambda: str(uuid4()))
|
||||
provider: str
|
||||
title: str
|
||||
title: Optional[str]
|
||||
|
||||
@field_serializer("*")
|
||||
def dump_secret_strings(value: Any, _info):
|
||||
@@ -18,10 +18,14 @@ class _BaseCredentials(BaseModel):
|
||||
|
||||
class OAuth2Credentials(_BaseCredentials):
|
||||
type: Literal["oauth2"] = "oauth2"
|
||||
username: Optional[str]
|
||||
"""Username of the third-party service user that these credentials belong to"""
|
||||
access_token: SecretStr
|
||||
access_token_expires_at: Optional[int] # seconds
|
||||
access_token_expires_at: Optional[int]
|
||||
"""Unix timestamp (seconds) indicating when the access token expires (if at all)"""
|
||||
refresh_token: Optional[SecretStr]
|
||||
refresh_token_expires_at: Optional[int] # seconds
|
||||
refresh_token_expires_at: Optional[int]
|
||||
"""Unix timestamp (seconds) indicating when the refresh token expires (if at all)"""
|
||||
scopes: list[str]
|
||||
metadata: dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
@@ -29,7 +33,8 @@ class OAuth2Credentials(_BaseCredentials):
|
||||
class APIKeyCredentials(_BaseCredentials):
|
||||
type: Literal["api_key"] = "api_key"
|
||||
api_key: SecretStr
|
||||
expires_at: Optional[int] # seconds
|
||||
expires_at: Optional[int]
|
||||
"""Unix timestamp (seconds) indicating when the API key expires (if at all)"""
|
||||
|
||||
|
||||
Credentials = Annotated[
|
||||
@@ -38,9 +43,18 @@ Credentials = Annotated[
|
||||
]
|
||||
|
||||
|
||||
class OAuthState(BaseModel):
|
||||
token: str
|
||||
provider: str
|
||||
expires_at: int
|
||||
"""Unix timestamp (seconds) indicating when this OAuth state expires"""
|
||||
|
||||
|
||||
class UserMetadata(BaseModel):
|
||||
integration_credentials: list[Credentials] = Field(default_factory=list)
|
||||
integration_oauth_states: list[OAuthState] = Field(default_factory=list)
|
||||
|
||||
|
||||
class UserMetadataRaw(TypedDict, total=False):
|
||||
integration_credentials: list[dict]
|
||||
integration_oauth_states: list[dict]
|
||||
|
||||
1439
rnd/autogpt_libs/poetry.lock
generated
1439
rnd/autogpt_libs/poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,19 +1,21 @@
|
||||
[tool.poetry]
|
||||
name = "autogpt-libs"
|
||||
version = "0.1.0"
|
||||
version = "0.2.0"
|
||||
description = "Shared libraries across NextGen AutoGPT"
|
||||
authors = ["Aarushi <aarushik93@gmail.com>"]
|
||||
readme = "README.md"
|
||||
packages = [{ include = "autogpt_libs" }]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.10,<4.0"
|
||||
colorama = "^0.4.6"
|
||||
google-cloud-logging = "^3.8.0"
|
||||
pydantic = "^2.8.2"
|
||||
pydantic-settings = "^2.5.2"
|
||||
pyjwt = "^2.8.0"
|
||||
python = ">=3.10,<4.0"
|
||||
python-dotenv = "^1.0.1"
|
||||
supabase = "^2.7.2"
|
||||
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
DB_USER=agpt_user
|
||||
DB_PASS=pass123
|
||||
DB_NAME=agpt_local
|
||||
DB_PORT=5432
|
||||
DB_PORT=5433
|
||||
DATABASE_URL="postgresql://${DB_USER}:${DB_PASS}@localhost:${DB_PORT}/${DB_NAME}"
|
||||
PRISMA_SCHEMA="postgres/schema.prisma"
|
||||
|
||||
@@ -9,9 +9,13 @@ REDIS_HOST=localhost
|
||||
REDIS_PORT=6379
|
||||
REDIS_PASSWORD=password
|
||||
|
||||
AUTH_ENABLED=false
|
||||
ENABLE_AUTH=false
|
||||
ENABLE_CREDIT=false
|
||||
APP_ENV="local"
|
||||
PYRO_HOST=localhost
|
||||
SENTRY_DSN=
|
||||
# This is needed when ENABLE_AUTH is true
|
||||
SUPABASE_JWT_SECRET=
|
||||
|
||||
## ===== OPTIONAL API KEYS ===== ##
|
||||
|
||||
@@ -50,3 +54,11 @@ SMTP_PASSWORD=
|
||||
# Medium
|
||||
MEDIUM_API_KEY=
|
||||
MEDIUM_AUTHOR_ID=
|
||||
|
||||
|
||||
# Logging Configuration
|
||||
LOG_LEVEL=INFO
|
||||
ENABLE_CLOUD_LOGGING=false
|
||||
ENABLE_FILE_LOGGING=false
|
||||
# Use to manually set the log directory
|
||||
# LOG_DIR=./logs
|
||||
|
||||
3
rnd/autogpt_server/.vscode/settings.json
vendored
3
rnd/autogpt_server/.vscode/settings.json
vendored
@@ -1,3 +1,6 @@
|
||||
{
|
||||
"python.analysis.typeCheckingMode": "basic",
|
||||
"python.testing.pytestArgs": ["test"],
|
||||
"python.testing.unittestEnabled": false,
|
||||
"python.testing.pytestEnabled": true
|
||||
}
|
||||
|
||||
@@ -1,46 +1,79 @@
|
||||
FROM python:3.11-slim-buster as server_base
|
||||
FROM python:3.11-slim-buster AS builder
|
||||
|
||||
# Set environment variables
|
||||
ENV PYTHONDONTWRITEBYTECODE 1
|
||||
ENV PYTHONUNBUFFERED 1
|
||||
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Install build dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y build-essential curl ffmpeg wget libcurl4-gnutls-dev libexpat1-dev gettext libz-dev libssl-dev \
|
||||
&& apt-get install -y build-essential curl ffmpeg wget libcurl4-gnutls-dev libexpat1-dev gettext libz-dev libssl-dev postgresql-client git \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& wget https://github.com/git/git/archive/v2.28.0.tar.gz -O git.tar.gz \
|
||||
&& tar -zxf git.tar.gz \
|
||||
&& cd git-* \
|
||||
&& make prefix=/usr all \
|
||||
&& make prefix=/usr install
|
||||
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV POETRY_VERSION=1.8.3 \
|
||||
POETRY_HOME="/opt/poetry" \
|
||||
POETRY_NO_INTERACTION=1 \
|
||||
POETRY_VIRTUALENVS_CREATE=false \
|
||||
PATH="$POETRY_HOME/bin:$PATH"
|
||||
|
||||
# Upgrade pip and setuptools to fix security vulnerabilities
|
||||
RUN pip3 install --upgrade pip setuptools
|
||||
|
||||
RUN pip3 install poetry
|
||||
|
||||
COPY autogpt /app/autogpt
|
||||
COPY forge /app/forge
|
||||
# Copy and install dependencies
|
||||
COPY rnd/autogpt_libs /app/rnd/autogpt_libs
|
||||
COPY rnd/autogpt_server/poetry.lock rnd/autogpt_server/pyproject.toml /app/rnd/autogpt_server/
|
||||
WORKDIR /app/rnd/autogpt_server
|
||||
RUN poetry config virtualenvs.create false \
|
||||
&& poetry install --no-interaction --no-ansi
|
||||
|
||||
# Generate Prisma client
|
||||
COPY rnd/autogpt_server/schema.prisma ./
|
||||
RUN poetry config virtualenvs.create false \
|
||||
&& poetry run prisma generate
|
||||
|
||||
FROM python:3.11-slim-buster AS server_dependencies
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
ENV POETRY_VERSION=1.8.3 \
|
||||
POETRY_HOME="/opt/poetry" \
|
||||
POETRY_NO_INTERACTION=1 \
|
||||
POETRY_VIRTUALENVS_CREATE=false \
|
||||
PATH="$POETRY_HOME/bin:$PATH"
|
||||
|
||||
|
||||
# Upgrade pip and setuptools to fix security vulnerabilities
|
||||
RUN pip3 install --upgrade pip setuptools
|
||||
|
||||
# Copy only necessary files from builder
|
||||
COPY --from=builder /app /app
|
||||
COPY --from=builder /usr/local/lib/python3.11 /usr/local/lib/python3.11
|
||||
COPY --from=builder /usr/local/bin /usr/local/bin
|
||||
# Copy Prisma binaries
|
||||
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
||||
|
||||
|
||||
ENV PATH="/app/.venv/bin:$PATH"
|
||||
|
||||
RUN mkdir -p /app/rnd/autogpt_libs
|
||||
RUN mkdir -p /app/rnd/autogpt_server
|
||||
|
||||
COPY rnd/autogpt_libs /app/rnd/autogpt_libs
|
||||
|
||||
COPY rnd/autogpt_server/poetry.lock rnd/autogpt_server/pyproject.toml /app/rnd/autogpt_server/
|
||||
|
||||
WORKDIR /app/rnd/autogpt_server
|
||||
|
||||
COPY rnd/autogpt_server/pyproject.toml rnd/autogpt_server/poetry.lock ./
|
||||
RUN poetry install --no-interaction --no-ansi
|
||||
|
||||
COPY rnd/autogpt_server/schema.prisma ./
|
||||
RUN poetry run prisma generate
|
||||
FROM server_dependencies AS server
|
||||
|
||||
COPY rnd/autogpt_server /app/rnd/autogpt_server
|
||||
FROM server_base as server
|
||||
|
||||
ENV PORT=8000
|
||||
ENV DATABASE_URL=""
|
||||
ENV PORT=8000
|
||||
|
||||
CMD ["poetry", "run", "rest"]
|
||||
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
FROM python:3.11-slim-buster as server_base
|
||||
|
||||
# Set environment variables
|
||||
ENV PYTHONDONTWRITEBYTECODE 1
|
||||
ENV PYTHONUNBUFFERED 1
|
||||
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y build-essential curl ffmpeg wget libcurl4-gnutls-dev libexpat1-dev gettext libz-dev libssl-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& wget https://github.com/git/git/archive/v2.28.0.tar.gz -O git.tar.gz \
|
||||
&& tar -zxf git.tar.gz \
|
||||
&& cd git-* \
|
||||
&& make prefix=/usr all \
|
||||
&& make prefix=/usr install
|
||||
|
||||
|
||||
ENV POETRY_VERSION=1.8.3 \
|
||||
POETRY_HOME="/opt/poetry" \
|
||||
POETRY_NO_INTERACTION=1 \
|
||||
POETRY_VIRTUALENVS_CREATE=false \
|
||||
PATH="$POETRY_HOME/bin:$PATH"
|
||||
RUN pip3 install poetry
|
||||
|
||||
COPY autogpt /app/autogpt
|
||||
COPY forge /app/forge
|
||||
COPY rnd/autogpt_libs /app/rnd/autogpt_libs
|
||||
|
||||
WORKDIR /app/rnd/autogpt_server
|
||||
|
||||
COPY rnd/autogpt_server/pyproject.toml rnd/autogpt_server/poetry.lock ./
|
||||
RUN poetry install --no-interaction --no-ansi
|
||||
|
||||
COPY rnd/autogpt_server/schema.prisma ./
|
||||
RUN poetry run prisma generate
|
||||
|
||||
COPY rnd/autogpt_server /app/rnd/autogpt_server
|
||||
FROM server_base as server
|
||||
|
||||
FROM server_base as server
|
||||
|
||||
ENV PORT=8001
|
||||
ENV DATABASE_URL=""
|
||||
|
||||
CMD ["poetry", "run", "ws"]
|
||||
@@ -101,7 +101,7 @@ docker compose down
|
||||
If you run into issues with dangling orphans, try:
|
||||
|
||||
```sh
|
||||
docker-compose down --volumes --remove-orphans && docker-compose up --force-recreate --renew-anon-volumes --remove-orphans
|
||||
docker compose down --volumes --remove-orphans && docker-compose up --force-recreate --renew-anon-volumes --remove-orphans
|
||||
```
|
||||
|
||||
## Testing
|
||||
@@ -183,6 +183,13 @@ A communication layer (`service.py`) is created to decouple the communication li
|
||||
|
||||
Currently, the IPC is done using Pyro5 and abstracted in a way that allows a function decorated with `@expose` to be called from a different process.
|
||||
|
||||
|
||||
By default the daemons run on the following ports:
|
||||
|
||||
Execution Manager Daemon: 8002
|
||||
Execution Scheduler Daemon: 8003
|
||||
Rest Server Daemon: 8004
|
||||
|
||||
## Adding a New Agent Block
|
||||
|
||||
To add a new agent block, you need to create a new class that inherits from `Block` and provides the following information:
|
||||
|
||||
@@ -26,10 +26,8 @@ def main(**kwargs):
|
||||
|
||||
from autogpt_server.executor import ExecutionManager, ExecutionScheduler
|
||||
from autogpt_server.server import AgentServer, WebsocketServer
|
||||
from autogpt_server.util.service import PyroNameServer
|
||||
|
||||
run_processes(
|
||||
PyroNameServer(),
|
||||
ExecutionManager(),
|
||||
ExecutionScheduler(),
|
||||
WebsocketServer(),
|
||||
|
||||
@@ -55,15 +55,15 @@ for cls in all_subclasses(Block):
|
||||
raise ValueError(f"Block ID {block.name} error: {block.id} is already in use")
|
||||
|
||||
# Prevent duplicate field name in input_schema and output_schema
|
||||
duplicate_field_names = set(block.input_schema.__fields__.keys()) & set(
|
||||
block.output_schema.__fields__.keys()
|
||||
duplicate_field_names = set(block.input_schema.model_fields.keys()) & set(
|
||||
block.output_schema.model_fields.keys()
|
||||
)
|
||||
if duplicate_field_names:
|
||||
raise ValueError(
|
||||
f"{block.name} has duplicate field names in input_schema and output_schema: {duplicate_field_names}"
|
||||
)
|
||||
|
||||
for field in block.input_schema.__fields__.values():
|
||||
for field in block.input_schema.model_fields.values():
|
||||
if field.annotation is bool and field.default not in (True, False):
|
||||
raise ValueError(f"{block.name} has a boolean field with no default value")
|
||||
|
||||
|
||||
@@ -1,190 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Iterator
|
||||
|
||||
from autogpt.agents.agent import Agent, AgentSettings
|
||||
from autogpt.app.config import ConfigBuilder
|
||||
from forge.agent.components import AgentComponent
|
||||
from forge.agent.protocols import CommandProvider
|
||||
from forge.command import command
|
||||
from forge.command.command import Command
|
||||
from forge.file_storage import FileStorageBackendName, get_storage
|
||||
from forge.file_storage.base import FileStorage
|
||||
from forge.llm.providers import MultiProvider
|
||||
from forge.llm.providers.openai import OpenAICredentials, OpenAIProvider
|
||||
from forge.llm.providers.schema import ModelProviderName
|
||||
from forge.models.json_schema import JSONSchema
|
||||
from pydantic import Field, SecretStr
|
||||
|
||||
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from autogpt_server.data.model import BlockSecret, SchemaField, SecretField
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.app.config import AppConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BlockAgentSettings(AgentSettings):
|
||||
enabled_components: list[str] = Field(default_factory=list)
|
||||
|
||||
|
||||
class OutputComponent(CommandProvider):
|
||||
def get_commands(self) -> Iterator[Command]:
|
||||
yield self.output
|
||||
|
||||
@command(
|
||||
parameters={
|
||||
"output": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="Output data to be returned.",
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
)
|
||||
def output(self, output: str) -> str:
|
||||
"""Use this to output the result."""
|
||||
return output
|
||||
|
||||
|
||||
class BlockAgent(Agent):
|
||||
def __init__(
|
||||
self,
|
||||
settings: BlockAgentSettings,
|
||||
llm_provider: MultiProvider,
|
||||
file_storage: FileStorage,
|
||||
app_config: AppConfig,
|
||||
):
|
||||
super().__init__(settings, llm_provider, file_storage, app_config)
|
||||
|
||||
self.output = OutputComponent()
|
||||
|
||||
# Disable components
|
||||
for attr_name in list(self.__dict__.keys()):
|
||||
attr_value = getattr(self, attr_name)
|
||||
if not isinstance(attr_value, AgentComponent):
|
||||
continue
|
||||
component_name = type(attr_value).__name__
|
||||
if (
|
||||
component_name != "SystemComponent"
|
||||
and component_name not in settings.enabled_components
|
||||
):
|
||||
delattr(self, attr_name)
|
||||
|
||||
|
||||
class AutoGPTAgentBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
task: str = SchemaField(
|
||||
description="Task description for the agent.",
|
||||
placeholder="Calculate and use Output command",
|
||||
)
|
||||
input: str = SchemaField(
|
||||
description="Input data for the task",
|
||||
placeholder="8 + 5",
|
||||
)
|
||||
openai_api_key: BlockSecret = SecretField(
|
||||
key="openai_api_key", description="OpenAI API key"
|
||||
)
|
||||
enabled_components: list[str] = Field(
|
||||
default_factory=lambda: [OutputComponent.__name__],
|
||||
description="List of [AgentComponents](https://docs.agpt.co/forge/components/built-in-components/) enabled for the agent.",
|
||||
)
|
||||
disabled_commands: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="List of commands from enabled components to disable.",
|
||||
)
|
||||
fast_mode: bool = Field(
|
||||
False,
|
||||
description="If true uses fast llm, otherwise uses smart and slow llm.",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
result: str
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="d2e2ecd2-9ae6-422d-8dfe-ceca500ce6a6",
|
||||
description="AutoGPT agent, it utilizes a Large Language Model and enabled components/tools to perform a task.",
|
||||
categories={BlockCategory.AI},
|
||||
input_schema=AutoGPTAgentBlock.Input,
|
||||
output_schema=AutoGPTAgentBlock.Output,
|
||||
test_input={
|
||||
"task": "Make calculations and use output command to output the result",
|
||||
"input": "5 + 3",
|
||||
"openai_api_key": "openai_api_key",
|
||||
"enabled_components": [OutputComponent.__name__],
|
||||
"disabled_commands": ["finish"],
|
||||
"fast_mode": True,
|
||||
},
|
||||
test_output=[
|
||||
("result", "8"),
|
||||
],
|
||||
test_mock={
|
||||
"get_provider": lambda _: MultiProvider(),
|
||||
"get_result": lambda _: "8",
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_provider(openai_api_key: str) -> MultiProvider:
|
||||
# LLM provider
|
||||
settings = OpenAIProvider.default_settings.model_copy()
|
||||
settings.credentials = OpenAICredentials(api_key=SecretStr(openai_api_key))
|
||||
openai_provider = OpenAIProvider(settings=settings)
|
||||
|
||||
multi_provider = MultiProvider()
|
||||
# HACK: Add OpenAI provider to the multi provider with api key
|
||||
multi_provider._provider_instances[ModelProviderName.OPENAI] = openai_provider
|
||||
|
||||
return multi_provider
|
||||
|
||||
@staticmethod
|
||||
def get_result(agent: BlockAgent) -> str:
|
||||
error: Exception | None = None
|
||||
|
||||
for tries in range(3):
|
||||
try:
|
||||
proposal = asyncio.run(agent.propose_action())
|
||||
result = asyncio.run(agent.execute(proposal))
|
||||
return str(result)
|
||||
except Exception as e:
|
||||
error = e
|
||||
|
||||
raise error or Exception("Failed to get result")
|
||||
|
||||
def run(self, input_data: Input) -> BlockOutput:
|
||||
# Set up configuration
|
||||
config = ConfigBuilder.build_config_from_env()
|
||||
# Disable commands
|
||||
config.disabled_commands.extend(input_data.disabled_commands)
|
||||
|
||||
# Storage
|
||||
local = config.file_storage_backend == FileStorageBackendName.LOCAL
|
||||
restrict_to_root = not local or config.restrict_to_workspace
|
||||
file_storage = get_storage(
|
||||
config.file_storage_backend,
|
||||
root_path=Path("data"),
|
||||
restrict_to_root=restrict_to_root,
|
||||
)
|
||||
file_storage.initialize()
|
||||
|
||||
# State
|
||||
state = BlockAgentSettings(
|
||||
agent_id="TemporaryAgentID",
|
||||
name="WrappedAgent",
|
||||
description="Wrapped agent for the Agent Server.",
|
||||
task=f"Your task: {input_data.task}\n" f"Input data: {input_data.input}",
|
||||
enabled_components=input_data.enabled_components,
|
||||
)
|
||||
# Switch big brain mode
|
||||
state.config.big_brain = not input_data.fast_mode
|
||||
provider = self.get_provider(input_data.openai_api_key.get_secret_value())
|
||||
|
||||
agent = BlockAgent(state, provider, file_storage, config)
|
||||
|
||||
result = self.get_result(agent)
|
||||
|
||||
yield "result", result
|
||||
@@ -1,5 +1,7 @@
|
||||
import re
|
||||
from typing import Any, List
|
||||
|
||||
from jinja2 import BaseLoader, Environment
|
||||
from pydantic import Field
|
||||
|
||||
from autogpt_server.data.block import (
|
||||
@@ -12,6 +14,8 @@ from autogpt_server.data.block import (
|
||||
from autogpt_server.data.model import SchemaField
|
||||
from autogpt_server.util.mock import MockObject
|
||||
|
||||
jinja = Environment(loader=BaseLoader())
|
||||
|
||||
|
||||
class StoreValueBlock(Block):
|
||||
"""
|
||||
@@ -136,7 +140,7 @@ class FindInDictionaryBlock(Block):
|
||||
yield "missing", input_data.input
|
||||
|
||||
|
||||
class InputBlock(Block):
|
||||
class AgentInputBlock(Block):
|
||||
"""
|
||||
This block is used to provide input to the graph.
|
||||
|
||||
@@ -148,13 +152,20 @@ class InputBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
value: Any = SchemaField(description="The value to be passed as input.")
|
||||
name: str = SchemaField(description="The name of the input.")
|
||||
description: str = SchemaField(description="The description of the input.")
|
||||
description: str = SchemaField(
|
||||
description="The description of the input.",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
placeholder_values: List[Any] = SchemaField(
|
||||
description="The placeholder values to be passed as input."
|
||||
description="The placeholder values to be passed as input.",
|
||||
default=[],
|
||||
advanced=True,
|
||||
)
|
||||
limit_to_placeholder_values: bool = SchemaField(
|
||||
description="Whether to limit the selection to placeholder values.",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
@@ -164,8 +175,8 @@ class InputBlock(Block):
|
||||
super().__init__(
|
||||
id="c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
description="This block is used to provide input to the graph.",
|
||||
input_schema=InputBlock.Input,
|
||||
output_schema=InputBlock.Output,
|
||||
input_schema=AgentInputBlock.Input,
|
||||
output_schema=AgentInputBlock.Output,
|
||||
test_input=[
|
||||
{
|
||||
"value": "Hello, World!",
|
||||
@@ -194,7 +205,7 @@ class InputBlock(Block):
|
||||
yield "result", input_data.value
|
||||
|
||||
|
||||
class OutputBlock(Block):
|
||||
class AgentOutputBlock(Block):
|
||||
"""
|
||||
Records the output of the graph for users to see.
|
||||
|
||||
@@ -215,13 +226,17 @@ class OutputBlock(Block):
|
||||
"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
recorded_value: Any = SchemaField(
|
||||
description="The value to be recorded as output."
|
||||
)
|
||||
value: Any = SchemaField(description="The value to be recorded as output.")
|
||||
name: str = SchemaField(description="The name of the output.")
|
||||
description: str = SchemaField(description="The description of the output.")
|
||||
fmt_string: str = SchemaField(
|
||||
description="The format string to be used to format the recorded_value."
|
||||
description: str = SchemaField(
|
||||
description="The description of the output.",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
format: str = SchemaField(
|
||||
description="The format string to be used to format the recorded_value.",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
@@ -238,31 +253,31 @@ class OutputBlock(Block):
|
||||
"This block is key for capturing and presenting final results or "
|
||||
"important intermediate outputs of the graph execution."
|
||||
),
|
||||
input_schema=OutputBlock.Input,
|
||||
output_schema=OutputBlock.Output,
|
||||
input_schema=AgentOutputBlock.Input,
|
||||
output_schema=AgentOutputBlock.Output,
|
||||
test_input=[
|
||||
{
|
||||
"recorded_value": "Hello, World!",
|
||||
"value": "Hello, World!",
|
||||
"name": "output_1",
|
||||
"description": "This is a test output.",
|
||||
"fmt_string": "{value}",
|
||||
"format": "{{ output_1 }}!!",
|
||||
},
|
||||
{
|
||||
"recorded_value": 42,
|
||||
"value": "42",
|
||||
"name": "output_2",
|
||||
"description": "This is another test output.",
|
||||
"fmt_string": "{value}",
|
||||
"format": "{{ output_2 }}",
|
||||
},
|
||||
{
|
||||
"recorded_value": MockObject(value="!!", key="key"),
|
||||
"value": MockObject(value="!!", key="key"),
|
||||
"name": "output_3",
|
||||
"description": "This is a test output with a mock object.",
|
||||
"fmt_string": "{value}",
|
||||
"format": "{{ output_3 }}",
|
||||
},
|
||||
],
|
||||
test_output=[
|
||||
("output", "Hello, World!"),
|
||||
("output", 42),
|
||||
("output", "Hello, World!!!"),
|
||||
("output", "42"),
|
||||
("output", MockObject(value="!!", key="key")),
|
||||
],
|
||||
categories={BlockCategory.OUTPUT, BlockCategory.BASIC},
|
||||
@@ -274,13 +289,15 @@ class OutputBlock(Block):
|
||||
Attempts to format the recorded_value using the fmt_string if provided.
|
||||
If formatting fails or no fmt_string is given, returns the original recorded_value.
|
||||
"""
|
||||
if input_data.fmt_string:
|
||||
if input_data.format:
|
||||
try:
|
||||
yield "output", input_data.fmt_string.format(input_data.recorded_value)
|
||||
except Exception:
|
||||
yield "output", input_data.recorded_value
|
||||
fmt = re.sub(r"(?<!{){[ a-zA-Z0-9_]+}", r"{\g<0>}", input_data.format)
|
||||
template = jinja.from_string(fmt)
|
||||
yield "output", template.render({input_data.name: input_data.value})
|
||||
except Exception as e:
|
||||
yield "output", f"Error: {e}, {input_data.value}"
|
||||
else:
|
||||
yield "output", input_data.recorded_value
|
||||
yield "output", input_data.value
|
||||
|
||||
|
||||
class AddToDictionaryBlock(Block):
|
||||
@@ -422,7 +439,8 @@ class NoteBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
text: str = SchemaField(description="The text to display in the sticky note.")
|
||||
|
||||
class Output(BlockSchema): ...
|
||||
class Output(BlockSchema):
|
||||
output: str = SchemaField(description="The text to display in the sticky note.")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -432,8 +450,11 @@ class NoteBlock(Block):
|
||||
input_schema=NoteBlock.Input,
|
||||
output_schema=NoteBlock.Output,
|
||||
test_input={"text": "Hello, World!"},
|
||||
test_output=None,
|
||||
test_output=[
|
||||
("output", "Hello, World!"),
|
||||
],
|
||||
ui_type=BlockUIType.NOTE,
|
||||
)
|
||||
|
||||
def run(self, input_data: Input) -> BlockOutput: ...
|
||||
def run(self, input_data: Input) -> BlockOutput:
|
||||
yield "output", input_data.text
|
||||
|
||||
@@ -14,7 +14,8 @@ class ReadCsvBlock(Block):
|
||||
skip_columns: list[str] = []
|
||||
|
||||
class Output(BlockSchema):
|
||||
data: dict[str, str]
|
||||
row: dict[str, str]
|
||||
all_data: list[dict[str, str]]
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -27,8 +28,15 @@ class ReadCsvBlock(Block):
|
||||
"contents": "a, b, c\n1,2,3\n4,5,6",
|
||||
},
|
||||
test_output=[
|
||||
("data", {"a": "1", "b": "2", "c": "3"}),
|
||||
("data", {"a": "4", "b": "5", "c": "6"}),
|
||||
("row", {"a": "1", "b": "2", "c": "3"}),
|
||||
("row", {"a": "4", "b": "5", "c": "6"}),
|
||||
(
|
||||
"all_data",
|
||||
[
|
||||
{"a": "1", "b": "2", "c": "3"},
|
||||
{"a": "4", "b": "5", "c": "6"},
|
||||
],
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -53,8 +61,7 @@ class ReadCsvBlock(Block):
|
||||
for _ in range(input_data.skip_rows):
|
||||
next(reader)
|
||||
|
||||
# join the data with the header
|
||||
for row in reader:
|
||||
def process_row(row):
|
||||
data = {}
|
||||
for i, value in enumerate(row):
|
||||
if i not in input_data.skip_columns:
|
||||
@@ -62,4 +69,12 @@ class ReadCsvBlock(Block):
|
||||
data[header[i]] = value.strip() if input_data.strip else value
|
||||
else:
|
||||
data[str(i)] = value.strip() if input_data.strip else value
|
||||
yield "data", data
|
||||
return data
|
||||
|
||||
all_data = []
|
||||
for row in reader:
|
||||
processed_row = process_row(row)
|
||||
all_data.append(processed_row)
|
||||
yield "row", processed_row
|
||||
|
||||
yield "all_data", all_data
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import logging
|
||||
from enum import Enum
|
||||
from typing import List, NamedTuple
|
||||
from json import JSONDecodeError
|
||||
from typing import Any, List, NamedTuple
|
||||
|
||||
import anthropic
|
||||
import ollama
|
||||
@@ -24,6 +25,7 @@ LlmApiKeys = {
|
||||
class ModelMetadata(NamedTuple):
|
||||
provider: str
|
||||
context_window: int
|
||||
cost_factor: int
|
||||
|
||||
|
||||
class LlmModel(str, Enum):
|
||||
@@ -55,26 +57,29 @@ class LlmModel(str, Enum):
|
||||
|
||||
|
||||
MODEL_METADATA = {
|
||||
LlmModel.GPT4O_MINI: ModelMetadata("openai", 128000),
|
||||
LlmModel.GPT4O: ModelMetadata("openai", 128000),
|
||||
LlmModel.GPT4_TURBO: ModelMetadata("openai", 128000),
|
||||
LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385),
|
||||
LlmModel.CLAUDE_3_5_SONNET: ModelMetadata("anthropic", 200000),
|
||||
LlmModel.CLAUDE_3_HAIKU: ModelMetadata("anthropic", 200000),
|
||||
LlmModel.LLAMA3_8B: ModelMetadata("groq", 8192),
|
||||
LlmModel.LLAMA3_70B: ModelMetadata("groq", 8192),
|
||||
LlmModel.MIXTRAL_8X7B: ModelMetadata("groq", 32768),
|
||||
LlmModel.GEMMA_7B: ModelMetadata("groq", 8192),
|
||||
LlmModel.GEMMA2_9B: ModelMetadata("groq", 8192),
|
||||
LlmModel.LLAMA3_1_405B: ModelMetadata(
|
||||
"groq", 8192
|
||||
), # Limited to 16k during preview
|
||||
LlmModel.LLAMA3_1_70B: ModelMetadata("groq", 131072),
|
||||
LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 131072),
|
||||
LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192),
|
||||
LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192),
|
||||
LlmModel.GPT4O_MINI: ModelMetadata("openai", 128000, cost_factor=10),
|
||||
LlmModel.GPT4O: ModelMetadata("openai", 128000, cost_factor=12),
|
||||
LlmModel.GPT4_TURBO: ModelMetadata("openai", 128000, cost_factor=11),
|
||||
LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385, cost_factor=8),
|
||||
LlmModel.CLAUDE_3_5_SONNET: ModelMetadata("anthropic", 200000, cost_factor=14),
|
||||
LlmModel.CLAUDE_3_HAIKU: ModelMetadata("anthropic", 200000, cost_factor=13),
|
||||
LlmModel.LLAMA3_8B: ModelMetadata("groq", 8192, cost_factor=6),
|
||||
LlmModel.LLAMA3_70B: ModelMetadata("groq", 8192, cost_factor=9),
|
||||
LlmModel.MIXTRAL_8X7B: ModelMetadata("groq", 32768, cost_factor=7),
|
||||
LlmModel.GEMMA_7B: ModelMetadata("groq", 8192, cost_factor=6),
|
||||
LlmModel.GEMMA2_9B: ModelMetadata("groq", 8192, cost_factor=7),
|
||||
LlmModel.LLAMA3_1_405B: ModelMetadata("groq", 8192, cost_factor=10),
|
||||
# Limited to 16k during preview
|
||||
LlmModel.LLAMA3_1_70B: ModelMetadata("groq", 131072, cost_factor=15),
|
||||
LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 131072, cost_factor=13),
|
||||
LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192, cost_factor=7),
|
||||
LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192, cost_factor=11),
|
||||
}
|
||||
|
||||
for model in LlmModel:
|
||||
if model not in MODEL_METADATA:
|
||||
raise ValueError(f"Missing MODEL_METADATA metadata for model: {model}")
|
||||
|
||||
|
||||
class AIStructuredResponseGeneratorBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
@@ -89,7 +94,7 @@ class AIStructuredResponseGeneratorBlock(Block):
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
response: dict[str, str]
|
||||
response: dict[str, Any]
|
||||
error: str
|
||||
|
||||
def __init__(self):
|
||||
@@ -135,16 +140,33 @@ class AIStructuredResponseGeneratorBlock(Block):
|
||||
)
|
||||
return response.choices[0].message.content or ""
|
||||
elif provider == "anthropic":
|
||||
sysprompt = "".join([p["content"] for p in prompt if p["role"] == "system"])
|
||||
usrprompt = [p for p in prompt if p["role"] == "user"]
|
||||
system_messages = [p["content"] for p in prompt if p["role"] == "system"]
|
||||
sysprompt = " ".join(system_messages)
|
||||
|
||||
messages = []
|
||||
last_role = None
|
||||
for p in prompt:
|
||||
if p["role"] in ["user", "assistant"]:
|
||||
if p["role"] != last_role:
|
||||
messages.append({"role": p["role"], "content": p["content"]})
|
||||
last_role = p["role"]
|
||||
else:
|
||||
# If the role is the same as the last one, combine the content
|
||||
messages[-1]["content"] += "\n" + p["content"]
|
||||
|
||||
client = anthropic.Anthropic(api_key=api_key)
|
||||
response = client.messages.create(
|
||||
model=model.value,
|
||||
max_tokens=4096,
|
||||
system=sysprompt,
|
||||
messages=usrprompt, # type: ignore
|
||||
)
|
||||
return response.content[0].text if response.content else ""
|
||||
try:
|
||||
response = client.messages.create(
|
||||
model=model.value,
|
||||
max_tokens=4096,
|
||||
system=sysprompt,
|
||||
messages=messages,
|
||||
)
|
||||
return response.content[0].text if response.content else ""
|
||||
except anthropic.APIError as e:
|
||||
error_message = f"Anthropic API error: {str(e)}"
|
||||
logger.error(error_message)
|
||||
raise ValueError(error_message)
|
||||
elif provider == "groq":
|
||||
client = Groq(api_key=api_key)
|
||||
response_format = {"type": "json_object"} if json_format else None
|
||||
@@ -195,14 +217,16 @@ class AIStructuredResponseGeneratorBlock(Block):
|
||||
|
||||
prompt.append({"role": "user", "content": input_data.prompt})
|
||||
|
||||
def parse_response(resp: str) -> tuple[dict[str, str], str | None]:
|
||||
def parse_response(resp: str) -> tuple[dict[str, Any], str | None]:
|
||||
try:
|
||||
parsed = json.loads(resp)
|
||||
if not isinstance(parsed, dict):
|
||||
return {}, f"Expected a dictionary, but got {type(parsed)}"
|
||||
miss_keys = set(input_data.expected_format.keys()) - set(parsed.keys())
|
||||
if miss_keys:
|
||||
return parsed, f"Missing keys: {miss_keys}"
|
||||
return parsed, None
|
||||
except Exception as e:
|
||||
except JSONDecodeError as e:
|
||||
return {}, f"JSON decode error: {e}"
|
||||
|
||||
logger.info(f"LLM request: {prompt}")
|
||||
@@ -226,7 +250,16 @@ class AIStructuredResponseGeneratorBlock(Block):
|
||||
if input_data.expected_format:
|
||||
parsed_dict, parsed_error = parse_response(response_text)
|
||||
if not parsed_error:
|
||||
yield "response", {k: str(v) for k, v in parsed_dict.items()}
|
||||
yield "response", {
|
||||
k: (
|
||||
json.loads(v)
|
||||
if isinstance(v, str)
|
||||
and v.startswith("[")
|
||||
and v.endswith("]")
|
||||
else (", ".join(v) if isinstance(v, list) else v)
|
||||
)
|
||||
for k, v in parsed_dict.items()
|
||||
}
|
||||
return
|
||||
else:
|
||||
yield "response", {"response": response_text}
|
||||
@@ -301,7 +334,7 @@ class AITextGeneratorBlock(Block):
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TextSummarizerBlock(Block):
|
||||
class AITextSummarizerBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
text: str
|
||||
model: LlmModel = LlmModel.GPT4_TURBO
|
||||
@@ -319,8 +352,8 @@ class TextSummarizerBlock(Block):
|
||||
id="c3d4e5f6-7g8h-9i0j-1k2l-m3n4o5p6q7r8",
|
||||
description="Utilize a Large Language Model (LLM) to summarize a long text.",
|
||||
categories={BlockCategory.AI, BlockCategory.TEXT},
|
||||
input_schema=TextSummarizerBlock.Input,
|
||||
output_schema=TextSummarizerBlock.Output,
|
||||
input_schema=AITextSummarizerBlock.Input,
|
||||
output_schema=AITextSummarizerBlock.Output,
|
||||
test_input={"text": "Lorem ipsum..." * 100},
|
||||
test_output=("summary", "Final summary of a long text"),
|
||||
test_mock={
|
||||
@@ -412,7 +445,7 @@ class TextSummarizerBlock(Block):
|
||||
else:
|
||||
# If combined summaries are still too long, recursively summarize
|
||||
return self._run(
|
||||
TextSummarizerBlock.Input(
|
||||
AITextSummarizerBlock.Input(
|
||||
text=combined_text,
|
||||
api_key=input_data.api_key,
|
||||
model=input_data.model,
|
||||
@@ -438,7 +471,7 @@ class Message(BlockSchema):
|
||||
class AIConversationBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
messages: List[Message] = SchemaField(
|
||||
description="List of messages in the conversation.", min_items=1
|
||||
description="List of messages in the conversation.", min_length=1
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
default=LlmModel.GPT4_TURBO,
|
||||
|
||||
264
rnd/autogpt_server/autogpt_server/blocks/sampling.py
Normal file
264
rnd/autogpt_server/autogpt_server/blocks/sampling.py
Normal file
@@ -0,0 +1,264 @@
|
||||
import random
|
||||
from collections import defaultdict
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from autogpt_server.data.model import SchemaField
|
||||
|
||||
|
||||
class SamplingMethod(str, Enum):
|
||||
RANDOM = "random"
|
||||
SYSTEMATIC = "systematic"
|
||||
TOP = "top"
|
||||
BOTTOM = "bottom"
|
||||
STRATIFIED = "stratified"
|
||||
WEIGHTED = "weighted"
|
||||
RESERVOIR = "reservoir"
|
||||
CLUSTER = "cluster"
|
||||
|
||||
|
||||
class DataSamplingBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
data: Union[Dict[str, Any], List[Union[dict, List[Any]]]] = SchemaField(
|
||||
description="The dataset to sample from. Can be a single dictionary, a list of dictionaries, or a list of lists.",
|
||||
placeholder="{'id': 1, 'value': 'a'} or [{'id': 1, 'value': 'a'}, {'id': 2, 'value': 'b'}, ...]",
|
||||
)
|
||||
sample_size: int = SchemaField(
|
||||
description="The number of samples to take from the dataset.",
|
||||
placeholder="10",
|
||||
default=10,
|
||||
)
|
||||
sampling_method: SamplingMethod = SchemaField(
|
||||
description="The method to use for sampling.",
|
||||
default=SamplingMethod.RANDOM,
|
||||
)
|
||||
accumulate: bool = SchemaField(
|
||||
description="Whether to accumulate data before sampling.",
|
||||
default=False,
|
||||
)
|
||||
random_seed: Optional[int] = SchemaField(
|
||||
description="Seed for random number generator (optional).",
|
||||
default=None,
|
||||
)
|
||||
stratify_key: Optional[str] = SchemaField(
|
||||
description="Key to use for stratified sampling (required for stratified sampling).",
|
||||
default=None,
|
||||
)
|
||||
weight_key: Optional[str] = SchemaField(
|
||||
description="Key to use for weighted sampling (required for weighted sampling).",
|
||||
default=None,
|
||||
)
|
||||
cluster_key: Optional[str] = SchemaField(
|
||||
description="Key to use for cluster sampling (required for cluster sampling).",
|
||||
default=None,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
sampled_data: List[Union[dict, List[Any]]] = SchemaField(
|
||||
description="The sampled subset of the input data."
|
||||
)
|
||||
sample_indices: List[int] = SchemaField(
|
||||
description="The indices of the sampled data in the original dataset."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="4a448883-71fa-49cf-91cf-70d793bd7d87",
|
||||
description="This block samples data from a given dataset using various sampling methods.",
|
||||
categories={BlockCategory.LOGIC},
|
||||
input_schema=DataSamplingBlock.Input,
|
||||
output_schema=DataSamplingBlock.Output,
|
||||
test_input={
|
||||
"data": [
|
||||
{"id": i, "value": chr(97 + i), "group": i % 3} for i in range(10)
|
||||
],
|
||||
"sample_size": 3,
|
||||
"sampling_method": SamplingMethod.STRATIFIED,
|
||||
"accumulate": False,
|
||||
"random_seed": 42,
|
||||
"stratify_key": "group",
|
||||
},
|
||||
test_output=[
|
||||
(
|
||||
"sampled_data",
|
||||
[
|
||||
{"id": 0, "value": "a", "group": 0},
|
||||
{"id": 1, "value": "b", "group": 1},
|
||||
{"id": 8, "value": "i", "group": 2},
|
||||
],
|
||||
),
|
||||
("sample_indices", [0, 1, 8]),
|
||||
],
|
||||
)
|
||||
self.accumulated_data = []
|
||||
|
||||
def run(self, input_data: Input) -> BlockOutput:
|
||||
if input_data.accumulate:
|
||||
if isinstance(input_data.data, dict):
|
||||
self.accumulated_data.append(input_data.data)
|
||||
elif isinstance(input_data.data, list):
|
||||
self.accumulated_data.extend(input_data.data)
|
||||
else:
|
||||
raise ValueError(f"Unsupported data type: {type(input_data.data)}")
|
||||
|
||||
# If we don't have enough data yet, return without sampling
|
||||
if len(self.accumulated_data) < input_data.sample_size:
|
||||
return
|
||||
|
||||
data_to_sample = self.accumulated_data
|
||||
else:
|
||||
# If not accumulating, use the input data directly
|
||||
data_to_sample = (
|
||||
input_data.data
|
||||
if isinstance(input_data.data, list)
|
||||
else [input_data.data]
|
||||
)
|
||||
|
||||
if input_data.random_seed is not None:
|
||||
random.seed(input_data.random_seed)
|
||||
|
||||
data_size = len(data_to_sample)
|
||||
|
||||
if input_data.sample_size > data_size:
|
||||
raise ValueError(
|
||||
f"Sample size ({input_data.sample_size}) cannot be larger than the dataset size ({data_size})."
|
||||
)
|
||||
|
||||
indices = []
|
||||
|
||||
if input_data.sampling_method == SamplingMethod.RANDOM:
|
||||
indices = random.sample(range(data_size), input_data.sample_size)
|
||||
elif input_data.sampling_method == SamplingMethod.SYSTEMATIC:
|
||||
step = data_size // input_data.sample_size
|
||||
start = random.randint(0, step - 1)
|
||||
indices = list(range(start, data_size, step))[: input_data.sample_size]
|
||||
elif input_data.sampling_method == SamplingMethod.TOP:
|
||||
indices = list(range(input_data.sample_size))
|
||||
elif input_data.sampling_method == SamplingMethod.BOTTOM:
|
||||
indices = list(range(data_size - input_data.sample_size, data_size))
|
||||
elif input_data.sampling_method == SamplingMethod.STRATIFIED:
|
||||
if not input_data.stratify_key:
|
||||
raise ValueError(
|
||||
"Stratify key must be provided for stratified sampling."
|
||||
)
|
||||
strata = defaultdict(list)
|
||||
for i, item in enumerate(data_to_sample):
|
||||
if isinstance(item, dict):
|
||||
strata_value = item.get(input_data.stratify_key)
|
||||
elif hasattr(item, input_data.stratify_key):
|
||||
strata_value = getattr(item, input_data.stratify_key)
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Stratify key '{input_data.stratify_key}' not found in item {item}"
|
||||
)
|
||||
|
||||
if strata_value is None:
|
||||
raise ValueError(
|
||||
f"Stratify value for key '{input_data.stratify_key}' is None"
|
||||
)
|
||||
|
||||
strata[str(strata_value)].append(i)
|
||||
|
||||
# Calculate the number of samples to take from each stratum
|
||||
stratum_sizes = {
|
||||
k: max(1, int(len(v) / data_size * input_data.sample_size))
|
||||
for k, v in strata.items()
|
||||
}
|
||||
|
||||
# Adjust sizes to ensure we get exactly sample_size samples
|
||||
while sum(stratum_sizes.values()) != input_data.sample_size:
|
||||
if sum(stratum_sizes.values()) < input_data.sample_size:
|
||||
stratum_sizes[
|
||||
max(stratum_sizes, key=lambda k: stratum_sizes[k])
|
||||
] += 1
|
||||
else:
|
||||
stratum_sizes[
|
||||
max(stratum_sizes, key=lambda k: stratum_sizes[k])
|
||||
] -= 1
|
||||
|
||||
for stratum, size in stratum_sizes.items():
|
||||
indices.extend(random.sample(strata[stratum], size))
|
||||
elif input_data.sampling_method == SamplingMethod.WEIGHTED:
|
||||
if not input_data.weight_key:
|
||||
raise ValueError("Weight key must be provided for weighted sampling.")
|
||||
weights = []
|
||||
for item in data_to_sample:
|
||||
if isinstance(item, dict):
|
||||
weight = item.get(input_data.weight_key)
|
||||
elif hasattr(item, input_data.weight_key):
|
||||
weight = getattr(item, input_data.weight_key)
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Weight key '{input_data.weight_key}' not found in item {item}"
|
||||
)
|
||||
|
||||
if weight is None:
|
||||
raise ValueError(
|
||||
f"Weight value for key '{input_data.weight_key}' is None"
|
||||
)
|
||||
try:
|
||||
weights.append(float(weight))
|
||||
except ValueError:
|
||||
raise ValueError(
|
||||
f"Weight value '{weight}' cannot be converted to a number"
|
||||
)
|
||||
|
||||
if not weights:
|
||||
raise ValueError(
|
||||
f"No valid weights found using key '{input_data.weight_key}'"
|
||||
)
|
||||
|
||||
indices = random.choices(
|
||||
range(data_size), weights=weights, k=input_data.sample_size
|
||||
)
|
||||
elif input_data.sampling_method == SamplingMethod.RESERVOIR:
|
||||
indices = list(range(input_data.sample_size))
|
||||
for i in range(input_data.sample_size, data_size):
|
||||
j = random.randint(0, i)
|
||||
if j < input_data.sample_size:
|
||||
indices[j] = i
|
||||
elif input_data.sampling_method == SamplingMethod.CLUSTER:
|
||||
if not input_data.cluster_key:
|
||||
raise ValueError("Cluster key must be provided for cluster sampling.")
|
||||
clusters = defaultdict(list)
|
||||
for i, item in enumerate(data_to_sample):
|
||||
if isinstance(item, dict):
|
||||
cluster_value = item.get(input_data.cluster_key)
|
||||
elif hasattr(item, input_data.cluster_key):
|
||||
cluster_value = getattr(item, input_data.cluster_key)
|
||||
else:
|
||||
raise TypeError(
|
||||
f"Item {item} does not have the cluster key '{input_data.cluster_key}'"
|
||||
)
|
||||
|
||||
clusters[str(cluster_value)].append(i)
|
||||
|
||||
# Randomly select clusters until we have enough samples
|
||||
selected_clusters = []
|
||||
while (
|
||||
sum(len(clusters[c]) for c in selected_clusters)
|
||||
< input_data.sample_size
|
||||
):
|
||||
available_clusters = [c for c in clusters if c not in selected_clusters]
|
||||
if not available_clusters:
|
||||
break
|
||||
selected_clusters.append(random.choice(available_clusters))
|
||||
|
||||
for cluster in selected_clusters:
|
||||
indices.extend(clusters[cluster])
|
||||
|
||||
# If we have more samples than needed, randomly remove some
|
||||
if len(indices) > input_data.sample_size:
|
||||
indices = random.sample(indices, input_data.sample_size)
|
||||
else:
|
||||
raise ValueError(f"Unknown sampling method: {input_data.sampling_method}")
|
||||
|
||||
sampled_data = [data_to_sample[i] for i in indices]
|
||||
|
||||
# Clear accumulated data after sampling if accumulation is enabled
|
||||
if input_data.accumulate:
|
||||
self.accumulated_data = []
|
||||
|
||||
yield "sampled_data", sampled_data
|
||||
yield "sample_indices", indices
|
||||
43
rnd/autogpt_server/autogpt_server/data/analytics.py
Normal file
43
rnd/autogpt_server/autogpt_server/data/analytics.py
Normal file
@@ -0,0 +1,43 @@
|
||||
import logging
|
||||
|
||||
import prisma.types
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def log_raw_analytics(
|
||||
user_id: str,
|
||||
type: str,
|
||||
data: dict,
|
||||
data_index: str,
|
||||
):
|
||||
details = await prisma.models.AnalyticsDetails.prisma().create(
|
||||
data={
|
||||
"userId": user_id,
|
||||
"type": type,
|
||||
"data": prisma.Json(data),
|
||||
"dataIndex": data_index,
|
||||
}
|
||||
)
|
||||
return details
|
||||
|
||||
|
||||
async def log_raw_metric(
|
||||
user_id: str,
|
||||
metric_name: str,
|
||||
metric_value: float,
|
||||
data_string: str,
|
||||
):
|
||||
if metric_value < 0:
|
||||
raise ValueError("metric_value must be non-negative")
|
||||
|
||||
result = await prisma.models.AnalyticsMetrics.prisma().create(
|
||||
data={
|
||||
"value": metric_value,
|
||||
"analyticMetric": metric_name,
|
||||
"userId": user_id,
|
||||
"dataString": data_string,
|
||||
},
|
||||
)
|
||||
|
||||
return result
|
||||
274
rnd/autogpt_server/autogpt_server/data/credit.py
Normal file
274
rnd/autogpt_server/autogpt_server/data/credit.py
Normal file
@@ -0,0 +1,274 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from datetime import datetime, timezone
|
||||
from enum import Enum
|
||||
from typing import Any, Optional, Type
|
||||
|
||||
import prisma.errors
|
||||
from prisma import Json
|
||||
from prisma.enums import UserBlockCreditType
|
||||
from prisma.models import UserBlockCredit
|
||||
from pydantic import BaseModel
|
||||
|
||||
from autogpt_server.blocks.llm import (
|
||||
MODEL_METADATA,
|
||||
AIConversationBlock,
|
||||
AIStructuredResponseGeneratorBlock,
|
||||
AITextGeneratorBlock,
|
||||
AITextSummarizerBlock,
|
||||
LlmModel,
|
||||
)
|
||||
from autogpt_server.blocks.talking_head import CreateTalkingAvatarVideoBlock
|
||||
from autogpt_server.data.block import Block, BlockInput
|
||||
from autogpt_server.util.settings import Config
|
||||
|
||||
|
||||
class BlockCostType(str, Enum):
|
||||
RUN = "run" # cost X credits per run
|
||||
BYTE = "byte" # cost X credits per byte
|
||||
SECOND = "second" # cost X credits per second
|
||||
|
||||
|
||||
class BlockCost(BaseModel):
|
||||
cost_amount: int
|
||||
cost_filter: BlockInput
|
||||
cost_type: BlockCostType
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
cost_amount: int,
|
||||
cost_type: BlockCostType = BlockCostType.RUN,
|
||||
cost_filter: Optional[BlockInput] = None,
|
||||
**data: Any,
|
||||
) -> None:
|
||||
super().__init__(
|
||||
cost_amount=cost_amount,
|
||||
cost_filter=cost_filter or {},
|
||||
cost_type=cost_type,
|
||||
**data,
|
||||
)
|
||||
|
||||
|
||||
llm_cost = [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"api_key": None, # Running LLM with user own API key is free.
|
||||
},
|
||||
cost_amount=metadata.cost_factor,
|
||||
)
|
||||
for model, metadata in MODEL_METADATA.items()
|
||||
] + [
|
||||
BlockCost(
|
||||
# Default cost is running LlmModel.GPT4O.
|
||||
cost_amount=MODEL_METADATA[LlmModel.GPT4O].cost_factor,
|
||||
cost_filter={"api_key": None},
|
||||
),
|
||||
]
|
||||
|
||||
BLOCK_COSTS: dict[Type[Block], list[BlockCost]] = {
|
||||
AIConversationBlock: llm_cost,
|
||||
AITextGeneratorBlock: llm_cost,
|
||||
AIStructuredResponseGeneratorBlock: llm_cost,
|
||||
AITextSummarizerBlock: llm_cost,
|
||||
CreateTalkingAvatarVideoBlock: [
|
||||
BlockCost(cost_amount=15, cost_filter={"api_key": None})
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
class UserCreditBase(ABC):
|
||||
def __init__(self, num_user_credits_refill: int):
|
||||
self.num_user_credits_refill = num_user_credits_refill
|
||||
|
||||
@abstractmethod
|
||||
async def get_or_refill_credit(self, user_id: str) -> int:
|
||||
"""
|
||||
Get the current credit for the user and refill if no transaction has been made in the current cycle.
|
||||
|
||||
Returns:
|
||||
int: The current credit for the user.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def spend_credits(
|
||||
self,
|
||||
user_id: str,
|
||||
user_credit: int,
|
||||
block: Block,
|
||||
input_data: BlockInput,
|
||||
data_size: float,
|
||||
run_time: float,
|
||||
) -> int:
|
||||
"""
|
||||
Spend the credits for the user based on the block usage.
|
||||
|
||||
Args:
|
||||
user_id (str): The user ID.
|
||||
user_credit (int): The current credit for the user.
|
||||
block (Block): The block that is being used.
|
||||
input_data (BlockInput): The input data for the block.
|
||||
data_size (float): The size of the data being processed.
|
||||
run_time (float): The time taken to run the block.
|
||||
|
||||
Returns:
|
||||
int: amount of credit spent
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def top_up_credits(self, user_id: str, amount: int):
|
||||
"""
|
||||
Top up the credits for the user.
|
||||
|
||||
Args:
|
||||
user_id (str): The user ID.
|
||||
amount (int): The amount to top up.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class UserCredit(UserCreditBase):
|
||||
async def get_or_refill_credit(self, user_id: str) -> int:
|
||||
cur_time = self.time_now()
|
||||
cur_month = cur_time.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
|
||||
nxt_month = cur_month.replace(month=cur_month.month + 1)
|
||||
|
||||
user_credit = await UserBlockCredit.prisma().group_by(
|
||||
by=["userId"],
|
||||
sum={"amount": True},
|
||||
where={
|
||||
"userId": user_id,
|
||||
"createdAt": {"gte": cur_month, "lt": nxt_month},
|
||||
"isActive": True,
|
||||
},
|
||||
)
|
||||
|
||||
if user_credit:
|
||||
credit_sum = user_credit[0].get("_sum") or {}
|
||||
return credit_sum.get("amount", 0)
|
||||
|
||||
key = f"MONTHLY-CREDIT-TOP-UP-{cur_month}"
|
||||
|
||||
try:
|
||||
await UserBlockCredit.prisma().create(
|
||||
data={
|
||||
"amount": self.num_user_credits_refill,
|
||||
"type": UserBlockCreditType.TOP_UP,
|
||||
"userId": user_id,
|
||||
"transactionKey": key,
|
||||
"createdAt": self.time_now(),
|
||||
}
|
||||
)
|
||||
except prisma.errors.UniqueViolationError:
|
||||
pass # Already refilled this month
|
||||
|
||||
return self.num_user_credits_refill
|
||||
|
||||
@staticmethod
|
||||
def time_now():
|
||||
return datetime.now(timezone.utc)
|
||||
|
||||
@staticmethod
|
||||
def _block_usage_cost(
|
||||
block: Block,
|
||||
input_data: BlockInput,
|
||||
data_size: float,
|
||||
run_time: float,
|
||||
) -> tuple[int, BlockInput]:
|
||||
block_costs = BLOCK_COSTS.get(type(block))
|
||||
if not block_costs:
|
||||
return 0, {}
|
||||
|
||||
for block_cost in block_costs:
|
||||
if all(
|
||||
# None, [], {}, "", are considered the same value.
|
||||
input_data.get(k) == b or (not input_data.get(k) and not b)
|
||||
for k, b in block_cost.cost_filter.items()
|
||||
):
|
||||
if block_cost.cost_type == BlockCostType.RUN:
|
||||
return block_cost.cost_amount, block_cost.cost_filter
|
||||
|
||||
if block_cost.cost_type == BlockCostType.SECOND:
|
||||
return (
|
||||
int(run_time * block_cost.cost_amount),
|
||||
block_cost.cost_filter,
|
||||
)
|
||||
|
||||
if block_cost.cost_type == BlockCostType.BYTE:
|
||||
return (
|
||||
int(data_size * block_cost.cost_amount),
|
||||
block_cost.cost_filter,
|
||||
)
|
||||
|
||||
return 0, {}
|
||||
|
||||
async def spend_credits(
|
||||
self,
|
||||
user_id: str,
|
||||
user_credit: int,
|
||||
block: Block,
|
||||
input_data: BlockInput,
|
||||
data_size: float,
|
||||
run_time: float,
|
||||
validate_balance: bool = True,
|
||||
) -> int:
|
||||
cost, matching_filter = self._block_usage_cost(
|
||||
block=block, input_data=input_data, data_size=data_size, run_time=run_time
|
||||
)
|
||||
if cost <= 0:
|
||||
return 0
|
||||
|
||||
if validate_balance and user_credit < cost:
|
||||
raise ValueError(f"Insufficient credit: {user_credit} < {cost}")
|
||||
|
||||
await UserBlockCredit.prisma().create(
|
||||
data={
|
||||
"userId": user_id,
|
||||
"amount": -cost,
|
||||
"type": UserBlockCreditType.USAGE,
|
||||
"blockId": block.id,
|
||||
"metadata": Json(
|
||||
{
|
||||
"block": block.name,
|
||||
"input": matching_filter,
|
||||
}
|
||||
),
|
||||
"createdAt": self.time_now(),
|
||||
}
|
||||
)
|
||||
return cost
|
||||
|
||||
async def top_up_credits(self, user_id: str, amount: int):
|
||||
await UserBlockCredit.prisma().create(
|
||||
data={
|
||||
"userId": user_id,
|
||||
"amount": amount,
|
||||
"type": UserBlockCreditType.TOP_UP,
|
||||
"createdAt": self.time_now(),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class DisabledUserCredit(UserCreditBase):
|
||||
async def get_or_refill_credit(self, *args, **kwargs) -> int:
|
||||
return 0
|
||||
|
||||
async def spend_credits(self, *args, **kwargs) -> int:
|
||||
return 0
|
||||
|
||||
async def top_up_credits(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
def get_user_credit_model() -> UserCreditBase:
|
||||
config = Config()
|
||||
if config.enable_credit.lower() == "true":
|
||||
return UserCredit(config.num_user_credits_refill)
|
||||
else:
|
||||
return DisabledUserCredit(0)
|
||||
|
||||
|
||||
def get_block_costs() -> dict[str, list[BlockCost]]:
|
||||
return {block().id: costs for block, costs in BLOCK_COSTS.items()}
|
||||
@@ -31,7 +31,7 @@ async def connect(call_count=0):
|
||||
except Exception as e:
|
||||
if call_count <= 5:
|
||||
logger.info(f"[Prisma-{conn_id}] Connection failed: {e}. Retrying now..")
|
||||
await asyncio.sleep(call_count)
|
||||
await asyncio.sleep(2**call_count)
|
||||
await connect(call_count + 1)
|
||||
else:
|
||||
raise e
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timezone
|
||||
from enum import Enum
|
||||
from multiprocessing import Manager
|
||||
from typing import Any, Generic, TypeVar
|
||||
|
||||
from prisma.enums import AgentExecutionStatus
|
||||
from prisma.models import (
|
||||
AgentGraphExecution,
|
||||
AgentNodeExecution,
|
||||
@@ -21,12 +21,14 @@ from autogpt_server.util import json, mock
|
||||
|
||||
|
||||
class GraphExecution(BaseModel):
|
||||
user_id: str
|
||||
graph_exec_id: str
|
||||
start_node_execs: list["NodeExecution"]
|
||||
graph_id: str
|
||||
start_node_execs: list["NodeExecution"]
|
||||
|
||||
|
||||
class NodeExecution(BaseModel):
|
||||
user_id: str
|
||||
graph_exec_id: str
|
||||
graph_id: str
|
||||
node_exec_id: str
|
||||
@@ -34,13 +36,7 @@ class NodeExecution(BaseModel):
|
||||
data: BlockInput
|
||||
|
||||
|
||||
class ExecutionStatus(str, Enum):
|
||||
INCOMPLETE = "INCOMPLETE"
|
||||
QUEUED = "QUEUED"
|
||||
RUNNING = "RUNNING"
|
||||
COMPLETED = "COMPLETED"
|
||||
FAILED = "FAILED"
|
||||
|
||||
ExecutionStatus = AgentExecutionStatus
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
@@ -148,6 +144,7 @@ async def create_graph_execution(
|
||||
data={
|
||||
"agentGraphId": graph_id,
|
||||
"agentGraphVersion": graph_version,
|
||||
"executionStatus": ExecutionStatus.QUEUED,
|
||||
"AgentNodeExecutions": {
|
||||
"create": [ # type: ignore
|
||||
{
|
||||
@@ -259,10 +256,20 @@ async def upsert_execution_output(
|
||||
)
|
||||
|
||||
|
||||
async def update_graph_execution_start_time(graph_exec_id: str):
|
||||
await AgentGraphExecution.prisma().update(
|
||||
where={"id": graph_exec_id},
|
||||
data={
|
||||
"executionStatus": ExecutionStatus.RUNNING,
|
||||
"startedAt": datetime.now(tz=timezone.utc),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
async def update_graph_execution_stats(graph_exec_id: str, stats: dict[str, Any]):
|
||||
await AgentGraphExecution.prisma().update(
|
||||
where={"id": graph_exec_id},
|
||||
data={"stats": json.dumps(stats)},
|
||||
data={"executionStatus": ExecutionStatus.COMPLETED, "stats": json.dumps(stats)},
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ from prisma.models import AgentGraph, AgentNode, AgentNodeLink
|
||||
from pydantic import BaseModel, PrivateAttr
|
||||
from pydantic_core import PydanticUndefinedType
|
||||
|
||||
from autogpt_server.blocks.basic import InputBlock, OutputBlock
|
||||
from autogpt_server.blocks.basic import AgentInputBlock, AgentOutputBlock
|
||||
from autogpt_server.data.block import BlockInput, get_block, get_blocks
|
||||
from autogpt_server.data.db import BaseDbModel, transaction
|
||||
from autogpt_server.data.user import DEFAULT_USER_ID
|
||||
@@ -106,7 +106,9 @@ class Graph(GraphMeta):
|
||||
def starting_nodes(self) -> list[Node]:
|
||||
outbound_nodes = {link.sink_id for link in self.links}
|
||||
input_nodes = {
|
||||
v.id for v in self.nodes if isinstance(get_block(v.block_id), InputBlock)
|
||||
v.id
|
||||
for v in self.nodes
|
||||
if isinstance(get_block(v.block_id), AgentInputBlock)
|
||||
}
|
||||
return [
|
||||
node
|
||||
@@ -116,7 +118,9 @@ class Graph(GraphMeta):
|
||||
|
||||
@property
|
||||
def ending_nodes(self) -> list[Node]:
|
||||
return [v for v in self.nodes if isinstance(get_block(v.block_id), OutputBlock)]
|
||||
return [
|
||||
v for v in self.nodes if isinstance(get_block(v.block_id), AgentOutputBlock)
|
||||
]
|
||||
|
||||
@property
|
||||
def subgraph_map(self) -> dict[str, str]:
|
||||
@@ -179,7 +183,9 @@ class Graph(GraphMeta):
|
||||
+ [sanitize(link.sink_name) for link in node.input_links]
|
||||
)
|
||||
for name in block.input_schema.get_required_fields():
|
||||
if name not in provided_inputs and not isinstance(block, InputBlock):
|
||||
if name not in provided_inputs and not isinstance(
|
||||
block, AgentInputBlock
|
||||
):
|
||||
raise ValueError(
|
||||
f"Node {block.name} #{node.id} required input missing: `{name}`"
|
||||
)
|
||||
@@ -193,7 +199,7 @@ class Graph(GraphMeta):
|
||||
def is_input_output_block(nid: str) -> bool:
|
||||
bid = node_map[nid].block_id
|
||||
b = get_block(bid)
|
||||
return isinstance(b, InputBlock) or isinstance(b, OutputBlock)
|
||||
return isinstance(b, AgentInputBlock) or isinstance(b, AgentOutputBlock)
|
||||
|
||||
# subgraphs: all nodes in subgraph must be present in the graph.
|
||||
for subgraph_id, node_ids in self.subgraphs.items():
|
||||
|
||||
15
rnd/autogpt_server/autogpt_server/exec.py
Normal file
15
rnd/autogpt_server/autogpt_server/exec.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from autogpt_server.app import run_processes
|
||||
from autogpt_server.executor import ExecutionManager
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Run all the processes required for the AutoGPT-server REST API.
|
||||
"""
|
||||
run_processes(
|
||||
ExecutionManager(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -14,11 +14,13 @@ from typing import TYPE_CHECKING, Any, Coroutine, Generator, TypeVar
|
||||
if TYPE_CHECKING:
|
||||
from autogpt_server.server.rest_api import AgentServer
|
||||
|
||||
from autogpt_server.blocks.basic import InputBlock
|
||||
from autogpt_server.blocks.basic import AgentInputBlock
|
||||
from autogpt_server.data import db
|
||||
from autogpt_server.data.block import Block, BlockData, BlockInput, get_block
|
||||
from autogpt_server.data.credit import get_user_credit_model
|
||||
from autogpt_server.data.execution import (
|
||||
ExecutionQueue,
|
||||
ExecutionResult,
|
||||
ExecutionStatus,
|
||||
GraphExecution,
|
||||
NodeExecution,
|
||||
@@ -45,21 +47,41 @@ from autogpt_server.util.type import convert
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_log_metadata(
|
||||
graph_eid: str,
|
||||
graph_id: str,
|
||||
node_eid: str,
|
||||
node_id: str,
|
||||
block_name: str,
|
||||
) -> dict:
|
||||
return {
|
||||
"component": "ExecutionManager",
|
||||
"graph_eid": graph_eid,
|
||||
"graph_id": graph_id,
|
||||
"node_eid": node_eid,
|
||||
"node_id": node_id,
|
||||
"block_name": block_name,
|
||||
}
|
||||
class LogMetadata:
|
||||
def __init__(
|
||||
self,
|
||||
user_id: str,
|
||||
graph_eid: str,
|
||||
graph_id: str,
|
||||
node_eid: str,
|
||||
node_id: str,
|
||||
block_name: str,
|
||||
):
|
||||
self.metadata = {
|
||||
"component": "ExecutionManager",
|
||||
"user_id": user_id,
|
||||
"graph_eid": graph_eid,
|
||||
"graph_id": graph_id,
|
||||
"node_eid": node_eid,
|
||||
"node_id": node_id,
|
||||
"block_name": block_name,
|
||||
}
|
||||
self.prefix = f"[ExecutionManager|uid:{user_id}|gid:{graph_id}|nid:{node_id}]|geid:{graph_eid}|nid:{node_eid}|{block_name}]"
|
||||
|
||||
def info(self, msg: str, **extra):
|
||||
logger.info(msg, extra={"json_fields": {**self.metadata, **extra}})
|
||||
|
||||
def warning(self, msg: str, **extra):
|
||||
logger.warning(msg, extra={"json_fields": {**self.metadata, **extra}})
|
||||
|
||||
def error(self, msg: str, **extra):
|
||||
logger.error(msg, extra={"json_fields": {**self.metadata, **extra}})
|
||||
|
||||
def debug(self, msg: str, **extra):
|
||||
logger.debug(msg, extra={"json_fields": {**self.metadata, **extra}})
|
||||
|
||||
def exception(self, msg: str, **extra):
|
||||
logger.exception(msg, extra={"json_fields": {**self.metadata, **extra}})
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
@@ -85,6 +107,7 @@ def execute_node(
|
||||
Returns:
|
||||
The subsequent node to be enqueued, or None if there is no subsequent node.
|
||||
"""
|
||||
user_id = data.user_id
|
||||
graph_exec_id = data.graph_exec_id
|
||||
graph_id = data.graph_id
|
||||
node_exec_id = data.node_exec_id
|
||||
@@ -95,9 +118,10 @@ def execute_node(
|
||||
def wait(f: Coroutine[Any, Any, T]) -> T:
|
||||
return loop.run_until_complete(f)
|
||||
|
||||
def update_execution(status: ExecutionStatus):
|
||||
def update_execution(status: ExecutionStatus) -> ExecutionResult:
|
||||
exec_update = wait(update_execution_status(node_exec_id, status))
|
||||
api_client.send_execution_update(exec_update.model_dump())
|
||||
return exec_update
|
||||
|
||||
node = wait(get_node(node_id))
|
||||
|
||||
@@ -107,7 +131,8 @@ def execute_node(
|
||||
return
|
||||
|
||||
# Sanity check: validate the execution input.
|
||||
log_metadata = get_log_metadata(
|
||||
log_metadata = LogMetadata(
|
||||
user_id=user_id,
|
||||
graph_eid=graph_exec_id,
|
||||
graph_id=graph_id,
|
||||
node_eid=node_exec_id,
|
||||
@@ -116,29 +141,25 @@ def execute_node(
|
||||
)
|
||||
input_data, error = validate_exec(node, data.data, resolve_input=False)
|
||||
if input_data is None:
|
||||
logger.error(
|
||||
"Skip execution, input validation error",
|
||||
extra={"json_fields": {**log_metadata, "error": error}},
|
||||
)
|
||||
log_metadata.error(f"Skip execution, input validation error: {error}")
|
||||
return
|
||||
|
||||
# Execute the node
|
||||
input_data_str = json.dumps(input_data)
|
||||
input_size = len(input_data_str)
|
||||
logger.info(
|
||||
"Executed node with input",
|
||||
extra={"json_fields": {**log_metadata, "input": input_data_str}},
|
||||
)
|
||||
log_metadata.info("Executed node with input", input=input_data_str)
|
||||
update_execution(ExecutionStatus.RUNNING)
|
||||
user_credit = get_user_credit_model()
|
||||
|
||||
output_size = 0
|
||||
try:
|
||||
credit = wait(user_credit.get_or_refill_credit(user_id))
|
||||
if credit < 0:
|
||||
raise ValueError(f"Insufficient credit: {credit}")
|
||||
|
||||
for output_name, output_data in node_block.execute(input_data):
|
||||
output_size += len(json.dumps(output_data))
|
||||
logger.info(
|
||||
"Node produced output",
|
||||
extra={"json_fields": {**log_metadata, output_name: output_data}},
|
||||
)
|
||||
log_metadata.info("Node produced output", output_name=output_data)
|
||||
wait(upsert_execution_output(node_exec_id, output_name, output_data))
|
||||
|
||||
for execution in _enqueue_next_nodes(
|
||||
@@ -146,20 +167,25 @@ def execute_node(
|
||||
loop=loop,
|
||||
node=node,
|
||||
output=(output_name, output_data),
|
||||
user_id=user_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
graph_id=graph_id,
|
||||
log_metadata=log_metadata,
|
||||
):
|
||||
yield execution
|
||||
|
||||
update_execution(ExecutionStatus.COMPLETED)
|
||||
r = update_execution(ExecutionStatus.COMPLETED)
|
||||
s = input_size + output_size
|
||||
t = (
|
||||
(r.end_time - r.start_time).total_seconds()
|
||||
if r.end_time and r.start_time
|
||||
else 0
|
||||
)
|
||||
wait(user_credit.spend_credits(user_id, credit, node_block, input_data, s, t))
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"{e.__class__.__name__}: {e}"
|
||||
logger.exception(
|
||||
"Node execution failed with error",
|
||||
extra={"json_fields": {**log_metadata, error: error_msg}},
|
||||
)
|
||||
error_msg = str(e)
|
||||
log_metadata.exception(f"Node execution failed with error {error_msg}")
|
||||
wait(upsert_execution_output(node_exec_id, "error", error_msg))
|
||||
update_execution(ExecutionStatus.FAILED)
|
||||
|
||||
@@ -185,9 +211,10 @@ def _enqueue_next_nodes(
|
||||
loop: asyncio.AbstractEventLoop,
|
||||
node: Node,
|
||||
output: BlockData,
|
||||
user_id: str,
|
||||
graph_exec_id: str,
|
||||
graph_id: str,
|
||||
log_metadata: dict,
|
||||
log_metadata: LogMetadata,
|
||||
) -> list[NodeExecution]:
|
||||
def wait(f: Coroutine[Any, Any, T]) -> T:
|
||||
return loop.run_until_complete(f)
|
||||
@@ -200,6 +227,7 @@ def _enqueue_next_nodes(
|
||||
)
|
||||
api_client.send_execution_update(exec_update.model_dump())
|
||||
return NodeExecution(
|
||||
user_id=user_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
graph_id=graph_id,
|
||||
node_exec_id=node_exec_id,
|
||||
@@ -253,17 +281,11 @@ def _enqueue_next_nodes(
|
||||
|
||||
# Incomplete input data, skip queueing the execution.
|
||||
if not next_node_input:
|
||||
logger.warning(
|
||||
f"Skipped queueing {suffix}",
|
||||
extra={"json_fields": {**log_metadata}},
|
||||
)
|
||||
log_metadata.warning(f"Skipped queueing {suffix}")
|
||||
return enqueued_executions
|
||||
|
||||
# Input is complete, enqueue the execution.
|
||||
logger.info(
|
||||
f"Enqueued {suffix}",
|
||||
extra={"json_fields": {**log_metadata}},
|
||||
)
|
||||
log_metadata.info(f"Enqueued {suffix}")
|
||||
enqueued_executions.append(
|
||||
add_enqueued_execution(next_node_exec_id, next_node_id, next_node_input)
|
||||
)
|
||||
@@ -289,11 +311,9 @@ def _enqueue_next_nodes(
|
||||
idata, msg = validate_exec(next_node, idata)
|
||||
suffix = f"{next_output_name}>{next_input_name}~{ineid}:{msg}"
|
||||
if not idata:
|
||||
logger.info(
|
||||
f"{log_metadata} Enqueueing static-link skipped: {suffix}"
|
||||
)
|
||||
log_metadata.info(f"Enqueueing static-link skipped: {suffix}")
|
||||
continue
|
||||
logger.info(f"{log_metadata} Enqueueing static-link execution {suffix}")
|
||||
log_metadata.info(f"Enqueueing static-link execution {suffix}")
|
||||
enqueued_executions.append(
|
||||
add_enqueued_execution(iexec.node_exec_id, next_node_id, idata)
|
||||
)
|
||||
@@ -364,7 +384,7 @@ def validate_exec(
|
||||
def get_agent_server_client() -> "AgentServer":
|
||||
from autogpt_server.server.rest_api import AgentServer
|
||||
|
||||
return get_service_client(AgentServer)
|
||||
return get_service_client(AgentServer, Config().agent_server_port)
|
||||
|
||||
|
||||
class Executor:
|
||||
@@ -434,7 +454,8 @@ class Executor:
|
||||
def on_node_execution(
|
||||
cls, q: ExecutionQueue[NodeExecution], node_exec: NodeExecution
|
||||
):
|
||||
log_metadata = get_log_metadata(
|
||||
log_metadata = LogMetadata(
|
||||
user_id=node_exec.user_id,
|
||||
graph_eid=node_exec.graph_exec_id,
|
||||
graph_id=node_exec.graph_id,
|
||||
node_eid=node_exec.node_exec_id,
|
||||
@@ -459,28 +480,19 @@ class Executor:
|
||||
cls,
|
||||
q: ExecutionQueue[NodeExecution],
|
||||
node_exec: NodeExecution,
|
||||
log_metadata: dict,
|
||||
log_metadata: LogMetadata,
|
||||
stats: dict[str, Any] | None = None,
|
||||
):
|
||||
try:
|
||||
logger.info(
|
||||
f"Start node execution {node_exec.node_exec_id}",
|
||||
extra={"json_fields": {**log_metadata}},
|
||||
)
|
||||
log_metadata.info(f"Start node execution {node_exec.node_exec_id}")
|
||||
for execution in execute_node(
|
||||
cls.loop, cls.agent_server_client, node_exec, stats
|
||||
):
|
||||
q.add(execution)
|
||||
logger.info(
|
||||
f"Finished node execution {node_exec.node_exec_id}",
|
||||
extra={"json_fields": {**log_metadata}},
|
||||
)
|
||||
log_metadata.info(f"Finished node execution {node_exec.node_exec_id}")
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
f"Failed node execution {node_exec.node_exec_id}: {e}",
|
||||
extra={
|
||||
**log_metadata,
|
||||
},
|
||||
log_metadata.exception(
|
||||
f"Failed node execution {node_exec.node_exec_id}: {e}"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@@ -502,10 +514,12 @@ class Executor:
|
||||
|
||||
@classmethod
|
||||
def on_graph_executor_stop(cls):
|
||||
logger.info(
|
||||
f"[on_graph_executor_stop {cls.pid}] ⏳ Terminating node executor pool..."
|
||||
)
|
||||
prefix = f"[on_graph_executor_stop {cls.pid}]"
|
||||
logger.info(f"{prefix} ⏳ Disconnecting DB...")
|
||||
cls.loop.run_until_complete(db.disconnect())
|
||||
logger.info(f"{prefix} ⏳ Terminating node executor pool...")
|
||||
cls.executor.terminate()
|
||||
logger.info(f"{prefix} ✅ Finished cleanup")
|
||||
|
||||
@classmethod
|
||||
def _init_node_executor_pool(cls):
|
||||
@@ -517,7 +531,8 @@ class Executor:
|
||||
@classmethod
|
||||
@error_logged
|
||||
def on_graph_execution(cls, graph_exec: GraphExecution, cancel: threading.Event):
|
||||
log_metadata = get_log_metadata(
|
||||
log_metadata = LogMetadata(
|
||||
user_id=graph_exec.user_id,
|
||||
graph_eid=graph_exec.graph_exec_id,
|
||||
graph_id=graph_exec.graph_id,
|
||||
node_id="*",
|
||||
@@ -542,12 +557,12 @@ class Executor:
|
||||
@classmethod
|
||||
@time_measured
|
||||
def _on_graph_execution(
|
||||
cls, graph_exec: GraphExecution, cancel: threading.Event, log_metadata: dict
|
||||
cls,
|
||||
graph_exec: GraphExecution,
|
||||
cancel: threading.Event,
|
||||
log_metadata: LogMetadata,
|
||||
) -> int:
|
||||
logger.info(
|
||||
f"Start graph execution {graph_exec.graph_exec_id}",
|
||||
extra={"json_fields": {**log_metadata}},
|
||||
)
|
||||
log_metadata.info(f"Start graph execution {graph_exec.graph_exec_id}")
|
||||
n_node_executions = 0
|
||||
finished = False
|
||||
|
||||
@@ -557,10 +572,7 @@ class Executor:
|
||||
if finished:
|
||||
return
|
||||
cls.executor.terminate()
|
||||
logger.info(
|
||||
f"Terminated graph execution {graph_exec.graph_exec_id}",
|
||||
extra={"json_fields": {**log_metadata}},
|
||||
)
|
||||
log_metadata.info(f"Terminated graph execution {graph_exec.graph_exec_id}")
|
||||
cls._init_node_executor_pool()
|
||||
|
||||
cancel_thread = threading.Thread(target=cancel_handler)
|
||||
@@ -598,10 +610,9 @@ class Executor:
|
||||
# Re-enqueueing the data back to the queue will disrupt the order.
|
||||
execution.wait()
|
||||
|
||||
logger.debug(
|
||||
log_metadata.debug(
|
||||
f"Dispatching node execution {exec_data.node_exec_id} "
|
||||
f"for node {exec_data.node_id}",
|
||||
extra={**log_metadata},
|
||||
)
|
||||
running_executions[exec_data.node_id] = cls.executor.apply_async(
|
||||
cls.on_node_execution,
|
||||
@@ -611,10 +622,8 @@ class Executor:
|
||||
|
||||
# Avoid terminating graph execution when some nodes are still running.
|
||||
while queue.empty() and running_executions:
|
||||
logger.debug(
|
||||
"Queue empty; running nodes: "
|
||||
f"{list(running_executions.keys())}",
|
||||
extra={"json_fields": {**log_metadata}},
|
||||
log_metadata.debug(
|
||||
f"Queue empty; running nodes: {list(running_executions.keys())}"
|
||||
)
|
||||
for node_id, execution in list(running_executions.items()):
|
||||
if cancel.is_set():
|
||||
@@ -623,20 +632,13 @@ class Executor:
|
||||
if not queue.empty():
|
||||
break # yield to parent loop to execute new queue items
|
||||
|
||||
logger.debug(
|
||||
f"Waiting on execution of node {node_id}",
|
||||
extra={"json_fields": {**log_metadata}},
|
||||
)
|
||||
log_metadata.debug(f"Waiting on execution of node {node_id}")
|
||||
execution.wait(3)
|
||||
|
||||
logger.info(
|
||||
f"Finished graph execution {graph_exec.graph_exec_id}",
|
||||
extra={"json_fields": {**log_metadata}},
|
||||
)
|
||||
log_metadata.info(f"Finished graph execution {graph_exec.graph_exec_id}")
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
f"Failed graph execution {graph_exec.graph_exec_id}: {e}",
|
||||
extra={"json_fields": {**log_metadata}},
|
||||
log_metadata.exception(
|
||||
f"Failed graph execution {graph_exec.graph_exec_id}: {e}"
|
||||
)
|
||||
finally:
|
||||
if not cancel.is_set():
|
||||
@@ -648,6 +650,7 @@ class Executor:
|
||||
|
||||
class ExecutionManager(AppService):
|
||||
def __init__(self):
|
||||
super().__init__(port=Config().execution_manager_port)
|
||||
self.use_db = True
|
||||
self.pool_size = Config().num_graph_workers
|
||||
self.queue = ExecutionQueue[GraphExecution]()
|
||||
@@ -698,7 +701,7 @@ class ExecutionManager(AppService):
|
||||
nodes_input = []
|
||||
for node in graph.starting_nodes:
|
||||
input_data = {}
|
||||
if isinstance(get_block(node.block_id), InputBlock):
|
||||
if isinstance(get_block(node.block_id), AgentInputBlock):
|
||||
name = node.input_default.get("name")
|
||||
if name and name in data:
|
||||
input_data = {"value": data[name]}
|
||||
@@ -722,6 +725,7 @@ class ExecutionManager(AppService):
|
||||
for node_exec in node_execs:
|
||||
starting_node_execs.append(
|
||||
NodeExecution(
|
||||
user_id=user_id,
|
||||
graph_exec_id=node_exec.graph_exec_id,
|
||||
graph_id=node_exec.graph_id,
|
||||
node_exec_id=node_exec.node_exec_id,
|
||||
@@ -737,6 +741,7 @@ class ExecutionManager(AppService):
|
||||
self.agent_server_client.send_execution_update(exec_update.model_dump())
|
||||
|
||||
graph_exec = GraphExecution(
|
||||
user_id=user_id,
|
||||
graph_id=graph_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
start_node_execs=starting_node_execs,
|
||||
|
||||
@@ -9,6 +9,7 @@ from autogpt_server.data import schedule as model
|
||||
from autogpt_server.data.block import BlockInput
|
||||
from autogpt_server.executor.manager import ExecutionManager
|
||||
from autogpt_server.util.service import AppService, expose, get_service_client
|
||||
from autogpt_server.util.settings import Config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -19,13 +20,15 @@ def log(msg, **kwargs):
|
||||
|
||||
class ExecutionScheduler(AppService):
|
||||
def __init__(self, refresh_interval=10):
|
||||
super().__init__(port=Config().execution_scheduler_port)
|
||||
self.use_db = True
|
||||
self.last_check = datetime.min
|
||||
self.refresh_interval = refresh_interval
|
||||
self.use_redis = False
|
||||
|
||||
@property
|
||||
def execution_manager_client(self) -> ExecutionManager:
|
||||
return get_service_client(ExecutionManager)
|
||||
return get_service_client(ExecutionManager, Config().execution_manager_port)
|
||||
|
||||
def run_service(self):
|
||||
scheduler = BackgroundScheduler()
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
from .base import BaseOAuthHandler
|
||||
from .github import GitHubOAuthHandler
|
||||
from .google import GoogleOAuthHandler
|
||||
from .notion import NotionOAuthHandler
|
||||
|
||||
HANDLERS_BY_NAME: dict[str, type[BaseOAuthHandler]] = {
|
||||
handler.PROVIDER_NAME: handler
|
||||
for handler in [
|
||||
GitHubOAuthHandler,
|
||||
GoogleOAuthHandler,
|
||||
NotionOAuthHandler,
|
||||
]
|
||||
}
|
||||
|
||||
__all__ = ["HANDLERS_BY_NAME"]
|
||||
@@ -5,7 +5,7 @@ from urllib.parse import urlencode
|
||||
import requests
|
||||
from autogpt_libs.supabase_integration_credentials_store import OAuth2Credentials
|
||||
|
||||
from autogpt_server.integrations.oauth import BaseOAuthHandler
|
||||
from .base import BaseOAuthHandler
|
||||
|
||||
|
||||
class GitHubOAuthHandler(BaseOAuthHandler):
|
||||
@@ -23,6 +23,7 @@ class GitHubOAuthHandler(BaseOAuthHandler):
|
||||
""" # noqa
|
||||
|
||||
PROVIDER_NAME = "github"
|
||||
EMAIL_ENDPOINT = "https://api.github.com/user/emails"
|
||||
|
||||
def __init__(self, client_id: str, client_secret: str, redirect_uri: str):
|
||||
self.client_id = client_id
|
||||
@@ -69,10 +70,13 @@ class GitHubOAuthHandler(BaseOAuthHandler):
|
||||
response.raise_for_status()
|
||||
token_data: dict = response.json()
|
||||
|
||||
username = self._request_username(token_data["access_token"])
|
||||
|
||||
now = int(time.time())
|
||||
new_credentials = OAuth2Credentials(
|
||||
provider=self.PROVIDER_NAME,
|
||||
title=current_credentials.title if current_credentials else "GitHub",
|
||||
title=current_credentials.title if current_credentials else None,
|
||||
username=username,
|
||||
access_token=token_data["access_token"],
|
||||
# Token refresh responses have an empty `scope` property (see docs),
|
||||
# so we have to get the scope from the existing credentials object.
|
||||
@@ -97,3 +101,19 @@ class GitHubOAuthHandler(BaseOAuthHandler):
|
||||
if current_credentials:
|
||||
new_credentials.id = current_credentials.id
|
||||
return new_credentials
|
||||
|
||||
def _request_username(self, access_token: str) -> str | None:
|
||||
url = "https://api.github.com/user"
|
||||
headers = {
|
||||
"Accept": "application/vnd.github+json",
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
"X-GitHub-Api-Version": "2022-11-28",
|
||||
}
|
||||
|
||||
response = requests.get(url, headers=headers)
|
||||
|
||||
if not response.ok:
|
||||
return None
|
||||
|
||||
# Get the login (username)
|
||||
return response.json().get("login")
|
||||
@@ -1,10 +1,13 @@
|
||||
from autogpt_libs.supabase_integration_credentials_store import OAuth2Credentials
|
||||
from google.auth.transport.requests import Request
|
||||
from google.auth.external_account_authorized_user import (
|
||||
Credentials as ExternalAccountCredentials,
|
||||
)
|
||||
from google.auth.transport.requests import AuthorizedSession, Request
|
||||
from google.oauth2.credentials import Credentials
|
||||
from google_auth_oauthlib.flow import Flow
|
||||
from pydantic import SecretStr
|
||||
|
||||
from .oauth import BaseOAuthHandler
|
||||
from .base import BaseOAuthHandler
|
||||
|
||||
|
||||
class GoogleOAuthHandler(BaseOAuthHandler):
|
||||
@@ -13,6 +16,7 @@ class GoogleOAuthHandler(BaseOAuthHandler):
|
||||
""" # noqa
|
||||
|
||||
PROVIDER_NAME = "google"
|
||||
EMAIL_ENDPOINT = "https://www.googleapis.com/oauth2/v2/userinfo"
|
||||
|
||||
def __init__(self, client_id: str, client_secret: str, redirect_uri: str):
|
||||
self.client_id = client_id
|
||||
@@ -37,6 +41,8 @@ class GoogleOAuthHandler(BaseOAuthHandler):
|
||||
flow.fetch_token(code=code)
|
||||
|
||||
google_creds = flow.credentials
|
||||
username = self._request_email(google_creds)
|
||||
|
||||
# Google's OAuth library is poorly typed so we need some of these:
|
||||
assert google_creds.token
|
||||
assert google_creds.refresh_token
|
||||
@@ -44,7 +50,8 @@ class GoogleOAuthHandler(BaseOAuthHandler):
|
||||
assert google_creds.scopes
|
||||
return OAuth2Credentials(
|
||||
provider=self.PROVIDER_NAME,
|
||||
title="Google",
|
||||
title=None,
|
||||
username=username,
|
||||
access_token=SecretStr(google_creds.token),
|
||||
refresh_token=SecretStr(google_creds.refresh_token),
|
||||
access_token_expires_at=int(google_creds.expiry.timestamp()),
|
||||
@@ -52,6 +59,15 @@ class GoogleOAuthHandler(BaseOAuthHandler):
|
||||
scopes=google_creds.scopes,
|
||||
)
|
||||
|
||||
def _request_email(
|
||||
self, creds: Credentials | ExternalAccountCredentials
|
||||
) -> str | None:
|
||||
session = AuthorizedSession(creds)
|
||||
response = session.get(self.EMAIL_ENDPOINT)
|
||||
if not response.ok:
|
||||
return None
|
||||
return response.json()["email"]
|
||||
|
||||
def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
|
||||
# Google credentials should ALWAYS have a refresh token
|
||||
assert credentials.refresh_token
|
||||
@@ -72,9 +88,10 @@ class GoogleOAuthHandler(BaseOAuthHandler):
|
||||
assert google_creds.expiry
|
||||
|
||||
return OAuth2Credentials(
|
||||
id=credentials.id,
|
||||
provider=self.PROVIDER_NAME,
|
||||
id=credentials.id,
|
||||
title=credentials.title,
|
||||
username=credentials.username,
|
||||
access_token=SecretStr(google_creds.token),
|
||||
refresh_token=SecretStr(google_creds.refresh_token),
|
||||
access_token_expires_at=int(google_creds.expiry.timestamp()),
|
||||
@@ -4,7 +4,7 @@ from urllib.parse import urlencode
|
||||
import requests
|
||||
from autogpt_libs.supabase_integration_credentials_store import OAuth2Credentials
|
||||
|
||||
from autogpt_server.integrations.oauth import BaseOAuthHandler
|
||||
from .base import BaseOAuthHandler
|
||||
|
||||
|
||||
class NotionOAuthHandler(BaseOAuthHandler):
|
||||
@@ -49,10 +49,18 @@ class NotionOAuthHandler(BaseOAuthHandler):
|
||||
response = requests.post(self.token_url, json=request_body, headers=headers)
|
||||
response.raise_for_status()
|
||||
token_data = response.json()
|
||||
# Email is only available for non-bot users
|
||||
email = (
|
||||
token_data["owner"]["person"]["email"]
|
||||
if "person" in token_data["owner"]
|
||||
and "email" in token_data["owner"]["person"]
|
||||
else None
|
||||
)
|
||||
|
||||
return OAuth2Credentials(
|
||||
provider=self.PROVIDER_NAME,
|
||||
title=token_data.get("workspace_name", "Notion"),
|
||||
title=token_data.get("workspace_name"),
|
||||
username=email,
|
||||
access_token=token_data["access_token"],
|
||||
refresh_token=None,
|
||||
access_token_expires_at=None, # Notion tokens don't expire
|
||||
@@ -1,7 +1,6 @@
|
||||
from autogpt_server.app import run_processes
|
||||
from autogpt_server.executor import ExecutionManager, ExecutionScheduler
|
||||
from autogpt_server.executor import ExecutionScheduler
|
||||
from autogpt_server.server import AgentServer
|
||||
from autogpt_server.util.service import PyroNameServer
|
||||
|
||||
|
||||
def main():
|
||||
@@ -9,8 +8,6 @@ def main():
|
||||
Run all the processes required for the AutoGPT-server REST API.
|
||||
"""
|
||||
run_processes(
|
||||
PyroNameServer(),
|
||||
ExecutionManager(),
|
||||
ExecutionScheduler(),
|
||||
AgentServer(),
|
||||
)
|
||||
|
||||
@@ -15,14 +15,16 @@ from autogpt_server.data import execution as execution_db
|
||||
from autogpt_server.data import graph as graph_db
|
||||
from autogpt_server.data import user as user_db
|
||||
from autogpt_server.data.block import BlockInput, CompletedBlockOutput
|
||||
from autogpt_server.data.credit import get_block_costs, get_user_credit_model
|
||||
from autogpt_server.data.queue import AsyncEventQueue, AsyncRedisEventQueue
|
||||
from autogpt_server.data.user import get_or_create_user
|
||||
from autogpt_server.executor import ExecutionManager, ExecutionScheduler
|
||||
from autogpt_server.server.model import CreateGraph, SetGraphActiveVersion
|
||||
from autogpt_server.util.auth import get_user_id
|
||||
from autogpt_server.util.lock import KeyedMutex
|
||||
from autogpt_server.util.service import AppService, expose, get_service_client
|
||||
from autogpt_server.util.settings import Settings
|
||||
from autogpt_server.util.settings import Config, Settings
|
||||
|
||||
from .utils import get_user_id
|
||||
|
||||
settings = Settings()
|
||||
|
||||
@@ -31,8 +33,10 @@ class AgentServer(AppService):
|
||||
mutex = KeyedMutex()
|
||||
use_redis = True
|
||||
_test_dependency_overrides = {}
|
||||
_user_credit_model = get_user_credit_model()
|
||||
|
||||
def __init__(self, event_queue: AsyncEventQueue | None = None):
|
||||
super().__init__(port=Config().agent_server_port)
|
||||
self.event_queue = event_queue or AsyncRedisEventQueue()
|
||||
|
||||
@asynccontextmanager
|
||||
@@ -70,137 +74,184 @@ class AgentServer(AppService):
|
||||
)
|
||||
|
||||
# Define the API routes
|
||||
router = APIRouter(prefix="/api")
|
||||
router.dependencies.append(Depends(auth_middleware))
|
||||
api_router = APIRouter(prefix="/api")
|
||||
api_router.dependencies.append(Depends(auth_middleware))
|
||||
|
||||
router.add_api_route(
|
||||
# Import & Attach sub-routers
|
||||
import autogpt_server.server.routers.analytics
|
||||
import autogpt_server.server.routers.integrations
|
||||
|
||||
api_router.include_router(
|
||||
autogpt_server.server.routers.integrations.router,
|
||||
prefix="/integrations",
|
||||
tags=["integrations"],
|
||||
dependencies=[Depends(auth_middleware)],
|
||||
)
|
||||
|
||||
api_router.include_router(
|
||||
autogpt_server.server.routers.analytics.router,
|
||||
prefix="/analytics",
|
||||
tags=["analytics"],
|
||||
dependencies=[Depends(auth_middleware)],
|
||||
)
|
||||
|
||||
api_router.add_api_route(
|
||||
path="/auth/user",
|
||||
endpoint=self.get_or_create_user_route,
|
||||
methods=["POST"],
|
||||
tags=["auth"],
|
||||
)
|
||||
|
||||
router.add_api_route(
|
||||
api_router.add_api_route(
|
||||
path="/blocks",
|
||||
endpoint=self.get_graph_blocks,
|
||||
methods=["GET"],
|
||||
tags=["blocks"],
|
||||
)
|
||||
router.add_api_route(
|
||||
api_router.add_api_route(
|
||||
path="/blocks/{block_id}/execute",
|
||||
endpoint=self.execute_graph_block,
|
||||
methods=["POST"],
|
||||
tags=["blocks"],
|
||||
)
|
||||
router.add_api_route(
|
||||
api_router.add_api_route(
|
||||
path="/graphs",
|
||||
endpoint=self.get_graphs,
|
||||
methods=["GET"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
router.add_api_route(
|
||||
api_router.add_api_route(
|
||||
path="/templates",
|
||||
endpoint=self.get_templates,
|
||||
methods=["GET"],
|
||||
tags=["templates", "graphs"],
|
||||
)
|
||||
router.add_api_route(
|
||||
api_router.add_api_route(
|
||||
path="/graphs",
|
||||
endpoint=self.create_new_graph,
|
||||
methods=["POST"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
router.add_api_route(
|
||||
api_router.add_api_route(
|
||||
path="/templates",
|
||||
endpoint=self.create_new_template,
|
||||
methods=["POST"],
|
||||
tags=["templates", "graphs"],
|
||||
)
|
||||
router.add_api_route(
|
||||
api_router.add_api_route(
|
||||
path="/graphs/{graph_id}",
|
||||
endpoint=self.get_graph,
|
||||
methods=["GET"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
router.add_api_route(
|
||||
api_router.add_api_route(
|
||||
path="/templates/{graph_id}",
|
||||
endpoint=self.get_template,
|
||||
methods=["GET"],
|
||||
tags=["templates", "graphs"],
|
||||
)
|
||||
router.add_api_route(
|
||||
api_router.add_api_route(
|
||||
path="/graphs/{graph_id}",
|
||||
endpoint=self.update_graph,
|
||||
methods=["PUT"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
router.add_api_route(
|
||||
api_router.add_api_route(
|
||||
path="/templates/{graph_id}",
|
||||
endpoint=self.update_graph,
|
||||
methods=["PUT"],
|
||||
tags=["templates", "graphs"],
|
||||
)
|
||||
router.add_api_route(
|
||||
api_router.add_api_route(
|
||||
path="/graphs/{graph_id}/versions",
|
||||
endpoint=self.get_graph_all_versions,
|
||||
methods=["GET"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
router.add_api_route(
|
||||
api_router.add_api_route(
|
||||
path="/templates/{graph_id}/versions",
|
||||
endpoint=self.get_graph_all_versions,
|
||||
methods=["GET"],
|
||||
tags=["templates", "graphs"],
|
||||
)
|
||||
router.add_api_route(
|
||||
api_router.add_api_route(
|
||||
path="/graphs/{graph_id}/versions/{version}",
|
||||
endpoint=self.get_graph,
|
||||
methods=["GET"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
router.add_api_route(
|
||||
api_router.add_api_route(
|
||||
path="/graphs/{graph_id}/versions/active",
|
||||
endpoint=self.set_graph_active_version,
|
||||
methods=["PUT"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
router.add_api_route(
|
||||
api_router.add_api_route(
|
||||
path="/graphs/{graph_id}/input_schema",
|
||||
endpoint=self.get_graph_input_schema,
|
||||
methods=["GET"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
router.add_api_route(
|
||||
api_router.add_api_route(
|
||||
path="/graphs/{graph_id}/execute",
|
||||
endpoint=self.execute_graph,
|
||||
methods=["POST"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
router.add_api_route(
|
||||
api_router.add_api_route(
|
||||
path="/graphs/{graph_id}/executions",
|
||||
endpoint=self.list_graph_runs,
|
||||
methods=["GET"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
router.add_api_route(
|
||||
api_router.add_api_route(
|
||||
path="/graphs/{graph_id}/executions/{graph_exec_id}",
|
||||
endpoint=self.get_graph_run_node_execution_results,
|
||||
methods=["GET"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
router.add_api_route(
|
||||
api_router.add_api_route(
|
||||
path="/graphs/{graph_id}/executions/{graph_exec_id}/stop",
|
||||
endpoint=self.stop_graph_run,
|
||||
methods=["POST"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
router.add_api_route(
|
||||
api_router.add_api_route(
|
||||
path="/graphs/{graph_id}/schedules",
|
||||
endpoint=self.create_schedule,
|
||||
methods=["POST"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
router.add_api_route(
|
||||
api_router.add_api_route(
|
||||
path="/graphs/{graph_id}/schedules",
|
||||
endpoint=self.get_execution_schedules,
|
||||
methods=["GET"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
router.add_api_route(
|
||||
api_router.add_api_route(
|
||||
path="/graphs/schedules/{schedule_id}",
|
||||
endpoint=self.update_schedule,
|
||||
methods=["PUT"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
api_router.add_api_route(
|
||||
path="/credits",
|
||||
endpoint=self.get_user_credits,
|
||||
methods=["GET"],
|
||||
)
|
||||
|
||||
router.add_api_route(
|
||||
api_router.add_api_route(
|
||||
path="/settings",
|
||||
endpoint=self.update_configuration,
|
||||
methods=["POST"],
|
||||
tags=["settings"],
|
||||
)
|
||||
|
||||
app.add_exception_handler(500, self.handle_internal_http_error)
|
||||
|
||||
app.include_router(router)
|
||||
app.include_router(api_router)
|
||||
|
||||
uvicorn.run(app, host="0.0.0.0", port=8000, log_config=None)
|
||||
uvicorn.run(app, host="0.0.0.0", port=Config().agent_api_port, log_config=None)
|
||||
|
||||
def set_test_dependency_overrides(self, overrides: dict):
|
||||
self._test_dependency_overrides = overrides
|
||||
@@ -233,11 +284,11 @@ class AgentServer(AppService):
|
||||
|
||||
@property
|
||||
def execution_manager_client(self) -> ExecutionManager:
|
||||
return get_service_client(ExecutionManager)
|
||||
return get_service_client(ExecutionManager, Config().execution_manager_port)
|
||||
|
||||
@property
|
||||
def execution_scheduler_client(self) -> ExecutionScheduler:
|
||||
return get_service_client(ExecutionScheduler)
|
||||
return get_service_client(ExecutionScheduler, Config().execution_scheduler_port)
|
||||
|
||||
@classmethod
|
||||
def handle_internal_http_error(cls, request: Request, exc: Exception):
|
||||
@@ -256,7 +307,9 @@ class AgentServer(AppService):
|
||||
|
||||
@classmethod
|
||||
def get_graph_blocks(cls) -> list[dict[Any, Any]]:
|
||||
return [v.to_dict() for v in block.get_blocks().values()]
|
||||
blocks = block.get_blocks()
|
||||
costs = get_block_costs()
|
||||
return [{**b.to_dict(), "costs": costs.get(b.id, [])} for b in blocks.values()]
|
||||
|
||||
@classmethod
|
||||
def execute_graph_block(
|
||||
@@ -474,6 +527,25 @@ class AgentServer(AppService):
|
||||
|
||||
return await execution_db.list_executions(graph_id, graph_version)
|
||||
|
||||
@classmethod
|
||||
async def get_graph_run_status(
|
||||
cls,
|
||||
graph_id: str,
|
||||
graph_exec_id: str,
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
) -> execution_db.ExecutionStatus:
|
||||
graph = await graph_db.get_graph(graph_id, user_id=user_id)
|
||||
if not graph:
|
||||
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
|
||||
|
||||
execution = await execution_db.get_graph_execution(graph_exec_id, user_id)
|
||||
if not execution:
|
||||
raise HTTPException(
|
||||
status_code=404, detail=f"Execution #{graph_exec_id} not found."
|
||||
)
|
||||
|
||||
return execution.executionStatus
|
||||
|
||||
@classmethod
|
||||
async def get_graph_run_node_execution_results(
|
||||
cls,
|
||||
@@ -515,6 +587,11 @@ class AgentServer(AppService):
|
||||
execution_scheduler.update_schedule(schedule_id, is_enabled, user_id=user_id)
|
||||
return {"id": schedule_id}
|
||||
|
||||
async def get_user_credits(
|
||||
self, user_id: Annotated[str, Depends(get_user_id)]
|
||||
) -> dict[str, int]:
|
||||
return {"credits": await self._user_credit_model.get_or_refill_credit(user_id)}
|
||||
|
||||
def get_execution_schedules(
|
||||
self, graph_id: str, user_id: Annotated[str, Depends(get_user_id)]
|
||||
) -> dict[str, str]:
|
||||
|
||||
@@ -0,0 +1,49 @@
|
||||
"""Analytics API"""
|
||||
|
||||
from typing import Annotated
|
||||
|
||||
import fastapi
|
||||
|
||||
import autogpt_server.data.analytics
|
||||
from autogpt_server.server.utils import get_user_id
|
||||
|
||||
router = fastapi.APIRouter()
|
||||
|
||||
|
||||
@router.post(path="/log_raw_metric")
|
||||
async def log_raw_metric(
|
||||
user_id: Annotated[str, fastapi.Depends(get_user_id)],
|
||||
metric_name: Annotated[str, fastapi.Body(..., embed=True)],
|
||||
metric_value: Annotated[float, fastapi.Body(..., embed=True)],
|
||||
data_string: Annotated[str, fastapi.Body(..., embed=True)],
|
||||
):
|
||||
result = await autogpt_server.data.analytics.log_raw_metric(
|
||||
user_id=user_id,
|
||||
metric_name=metric_name,
|
||||
metric_value=metric_value,
|
||||
data_string=data_string,
|
||||
)
|
||||
return result.id
|
||||
|
||||
|
||||
@router.post("/log_raw_analytics")
|
||||
async def log_raw_analytics(
|
||||
user_id: Annotated[str, fastapi.Depends(get_user_id)],
|
||||
type: Annotated[str, fastapi.Body(..., embed=True)],
|
||||
data: Annotated[
|
||||
dict,
|
||||
fastapi.Body(..., embed=True, description="The data to log"),
|
||||
],
|
||||
data_index: Annotated[
|
||||
str,
|
||||
fastapi.Body(
|
||||
...,
|
||||
embed=True,
|
||||
description="Indexable field for any count based analytical measures like page order clicking, tutorial step completion, etc.",
|
||||
),
|
||||
],
|
||||
):
|
||||
result = await autogpt_server.data.analytics.log_raw_analytics(
|
||||
user_id, type, data, data_index
|
||||
)
|
||||
return result.id
|
||||
152
rnd/autogpt_server/autogpt_server/server/routers/integrations.py
Normal file
152
rnd/autogpt_server/autogpt_server/server/routers/integrations.py
Normal file
@@ -0,0 +1,152 @@
|
||||
import logging
|
||||
from typing import Annotated, Literal
|
||||
|
||||
from autogpt_libs.supabase_integration_credentials_store import (
|
||||
SupabaseIntegrationCredentialsStore,
|
||||
)
|
||||
from autogpt_libs.supabase_integration_credentials_store.types import (
|
||||
Credentials,
|
||||
OAuth2Credentials,
|
||||
)
|
||||
from fastapi import APIRouter, Body, Depends, HTTPException, Path, Query, Request
|
||||
from pydantic import BaseModel
|
||||
from supabase import Client
|
||||
|
||||
from autogpt_server.integrations.oauth import HANDLERS_BY_NAME, BaseOAuthHandler
|
||||
from autogpt_server.util.settings import Settings
|
||||
|
||||
from ..utils import get_supabase, get_user_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
settings = Settings()
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
def get_store(supabase: Client = Depends(get_supabase)):
|
||||
return SupabaseIntegrationCredentialsStore(supabase)
|
||||
|
||||
|
||||
class LoginResponse(BaseModel):
|
||||
login_url: str
|
||||
|
||||
|
||||
@router.get("/{provider}/login")
|
||||
async def login(
|
||||
provider: Annotated[str, Path(title="The provider to initiate an OAuth flow for")],
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
request: Request,
|
||||
store: Annotated[SupabaseIntegrationCredentialsStore, Depends(get_store)],
|
||||
scopes: Annotated[
|
||||
str, Query(title="Comma-separated list of authorization scopes")
|
||||
] = "",
|
||||
) -> LoginResponse:
|
||||
handler = _get_provider_oauth_handler(request, provider)
|
||||
|
||||
# Generate and store a secure random state token
|
||||
state = await store.store_state_token(user_id, provider)
|
||||
|
||||
requested_scopes = scopes.split(",") if scopes else []
|
||||
login_url = handler.get_login_url(requested_scopes, state)
|
||||
|
||||
return LoginResponse(login_url=login_url)
|
||||
|
||||
|
||||
class CredentialsMetaResponse(BaseModel):
|
||||
id: str
|
||||
type: Literal["oauth2", "api_key"]
|
||||
title: str | None
|
||||
scopes: list[str] | None
|
||||
username: str | None
|
||||
|
||||
|
||||
@router.post("/{provider}/callback")
|
||||
async def callback(
|
||||
provider: Annotated[str, Path(title="The target provider for this OAuth exchange")],
|
||||
code: Annotated[str, Body(title="Authorization code acquired by user login")],
|
||||
state_token: Annotated[str, Body(title="Anti-CSRF nonce")],
|
||||
store: Annotated[SupabaseIntegrationCredentialsStore, Depends(get_store)],
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
request: Request,
|
||||
) -> CredentialsMetaResponse:
|
||||
handler = _get_provider_oauth_handler(request, provider)
|
||||
|
||||
# Verify the state token
|
||||
if not await store.verify_state_token(user_id, state_token, provider):
|
||||
raise HTTPException(status_code=400, detail="Invalid or expired state token")
|
||||
|
||||
try:
|
||||
credentials = handler.exchange_code_for_tokens(code)
|
||||
except Exception as e:
|
||||
logger.warning(f"Code->Token exchange failed for provider {provider}: {e}")
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
|
||||
# TODO: Allow specifying `title` to set on `credentials`
|
||||
store.add_creds(user_id, credentials)
|
||||
return CredentialsMetaResponse(
|
||||
id=credentials.id,
|
||||
type=credentials.type,
|
||||
title=credentials.title,
|
||||
scopes=credentials.scopes,
|
||||
username=credentials.username,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/{provider}/credentials")
|
||||
async def list_credentials(
|
||||
provider: Annotated[str, Path(title="The provider to list credentials for")],
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
store: Annotated[SupabaseIntegrationCredentialsStore, Depends(get_store)],
|
||||
) -> list[CredentialsMetaResponse]:
|
||||
credentials = store.get_creds_by_provider(user_id, provider)
|
||||
return [
|
||||
CredentialsMetaResponse(
|
||||
id=cred.id,
|
||||
type=cred.type,
|
||||
title=cred.title,
|
||||
scopes=cred.scopes if isinstance(cred, OAuth2Credentials) else None,
|
||||
username=cred.username if isinstance(cred, OAuth2Credentials) else None,
|
||||
)
|
||||
for cred in credentials
|
||||
]
|
||||
|
||||
|
||||
@router.get("/{provider}/credentials/{cred_id}")
|
||||
async def get_credential(
|
||||
provider: Annotated[str, Path(title="The provider to retrieve credentials for")],
|
||||
cred_id: Annotated[str, Path(title="The ID of the credentials to retrieve")],
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
store: Annotated[SupabaseIntegrationCredentialsStore, Depends(get_store)],
|
||||
) -> Credentials:
|
||||
credential = store.get_creds_by_id(user_id, cred_id)
|
||||
if not credential:
|
||||
raise HTTPException(status_code=404, detail="Credentials not found")
|
||||
if credential.provider != provider:
|
||||
raise HTTPException(
|
||||
status_code=404, detail="Credentials do not match the specified provider"
|
||||
)
|
||||
return credential
|
||||
|
||||
|
||||
# -------- UTILITIES --------- #
|
||||
|
||||
|
||||
def _get_provider_oauth_handler(req: Request, provider_name: str) -> BaseOAuthHandler:
|
||||
if provider_name not in HANDLERS_BY_NAME:
|
||||
raise HTTPException(
|
||||
status_code=404, detail=f"Unknown provider '{provider_name}'"
|
||||
)
|
||||
|
||||
client_id = getattr(settings.secrets, f"{provider_name}_client_id")
|
||||
client_secret = getattr(settings.secrets, f"{provider_name}_client_secret")
|
||||
if not (client_id and client_secret):
|
||||
raise HTTPException(
|
||||
status_code=501,
|
||||
detail=f"Integration with provider '{provider_name}' is not configured",
|
||||
)
|
||||
|
||||
handler_class = HANDLERS_BY_NAME[provider_name]
|
||||
return handler_class(
|
||||
client_id=client_id,
|
||||
client_secret=client_secret,
|
||||
redirect_uri=str(req.url_for("callback", provider=provider_name)),
|
||||
)
|
||||
@@ -1,7 +1,11 @@
|
||||
from autogpt_libs.auth import auth_middleware
|
||||
from autogpt_libs.auth.middleware import auth_middleware
|
||||
from fastapi import Depends, HTTPException
|
||||
from supabase import Client, create_client
|
||||
|
||||
from autogpt_server.data.user import DEFAULT_USER_ID
|
||||
from autogpt_server.util.settings import Settings
|
||||
|
||||
settings = Settings()
|
||||
|
||||
|
||||
def get_user_id(payload: dict = Depends(auth_middleware)) -> str:
|
||||
@@ -13,3 +17,7 @@ def get_user_id(payload: dict = Depends(auth_middleware)) -> str:
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=401, detail="User ID not found in token")
|
||||
return user_id
|
||||
|
||||
|
||||
def get_supabase() -> Client:
|
||||
return create_client(settings.secrets.supabase_url, settings.secrets.supabase_key)
|
||||
@@ -11,7 +11,7 @@ from autogpt_server.data.user import DEFAULT_USER_ID
|
||||
from autogpt_server.server.conn_manager import ConnectionManager
|
||||
from autogpt_server.server.model import ExecutionSubscription, Methods, WsMessage
|
||||
from autogpt_server.util.service import AppProcess
|
||||
from autogpt_server.util.settings import Settings
|
||||
from autogpt_server.util.settings import Config, Settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
settings = Settings()
|
||||
@@ -174,4 +174,4 @@ async def websocket_router(
|
||||
|
||||
class WebsocketServer(AppProcess):
|
||||
def run(self):
|
||||
uvicorn.run(app, host="0.0.0.0", port=8001)
|
||||
uvicorn.run(app, host="0.0.0.0", port=Config().websocket_server_port)
|
||||
|
||||
@@ -252,7 +252,6 @@ Here are a couple of sample of the Block class implementation:
|
||||
|
||||
async def block_autogen_agent():
|
||||
async with SpinTestServer() as server:
|
||||
test_manager = server.exec_manager
|
||||
test_user = await create_test_user()
|
||||
test_graph = await create_graph(create_test_graph(), user_id=test_user.id)
|
||||
input_data = {"input": "Write me a block that writes a string into a file."}
|
||||
@@ -261,10 +260,8 @@ async def block_autogen_agent():
|
||||
)
|
||||
print(response)
|
||||
result = await wait_execution(
|
||||
exec_manager=test_manager,
|
||||
graph_id=test_graph.id,
|
||||
graph_exec_id=response["id"],
|
||||
num_execs=10,
|
||||
timeout=1200,
|
||||
user_id=test_user.id,
|
||||
)
|
||||
|
||||
@@ -153,7 +153,6 @@ async def create_test_user() -> User:
|
||||
|
||||
async def reddit_marketing_agent():
|
||||
async with SpinTestServer() as server:
|
||||
exec_man = server.exec_manager
|
||||
test_user = await create_test_user()
|
||||
test_graph = await create_graph(create_test_graph(), user_id=test_user.id)
|
||||
input_data = {"subreddit": "AutoGPT"}
|
||||
@@ -161,9 +160,7 @@ async def reddit_marketing_agent():
|
||||
test_graph.id, input_data, test_user.id
|
||||
)
|
||||
print(response)
|
||||
result = await wait_execution(
|
||||
exec_man, test_user.id, test_graph.id, response["id"], 13, 120
|
||||
)
|
||||
result = await wait_execution(test_user.id, test_graph.id, response["id"], 120)
|
||||
print(result)
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from prisma.models import User
|
||||
|
||||
from autogpt_server.blocks.basic import InputBlock, PrintToConsoleBlock
|
||||
from autogpt_server.blocks.basic import AgentInputBlock, PrintToConsoleBlock
|
||||
from autogpt_server.blocks.text import FillTextTemplateBlock
|
||||
from autogpt_server.data import graph
|
||||
from autogpt_server.data.graph import create_graph
|
||||
@@ -28,22 +28,12 @@ def create_test_graph() -> graph.Graph:
|
||||
"""
|
||||
nodes = [
|
||||
graph.Node(
|
||||
block_id=InputBlock().id,
|
||||
input_default={
|
||||
"name": "input_1",
|
||||
"description": "First input value",
|
||||
"placeholder_values": [],
|
||||
"limit_to_placeholder_values": False,
|
||||
},
|
||||
block_id=AgentInputBlock().id,
|
||||
input_default={"name": "input_1"},
|
||||
),
|
||||
graph.Node(
|
||||
block_id=InputBlock().id,
|
||||
input_default={
|
||||
"name": "input_2",
|
||||
"description": "Second input value",
|
||||
"placeholder_values": [],
|
||||
"limit_to_placeholder_values": False,
|
||||
},
|
||||
block_id=AgentInputBlock().id,
|
||||
input_default={"name": "input_2"},
|
||||
),
|
||||
graph.Node(
|
||||
block_id=FillTextTemplateBlock().id,
|
||||
@@ -85,7 +75,6 @@ def create_test_graph() -> graph.Graph:
|
||||
|
||||
async def sample_agent():
|
||||
async with SpinTestServer() as server:
|
||||
exec_man = server.exec_manager
|
||||
test_user = await create_test_user()
|
||||
test_graph = await create_graph(create_test_graph(), test_user.id)
|
||||
input_data = {"input_1": "Hello", "input_2": "World"}
|
||||
@@ -93,9 +82,7 @@ async def sample_agent():
|
||||
test_graph.id, input_data, test_user.id
|
||||
)
|
||||
print(response)
|
||||
result = await wait_execution(
|
||||
exec_man, test_user.id, test_graph.id, response["id"], 4, 10
|
||||
)
|
||||
result = await wait_execution(test_user.id, test_graph.id, response["id"], 10)
|
||||
print(result)
|
||||
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ import functools
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from typing import Callable, Tuple, TypeVar
|
||||
from typing import Callable, ParamSpec, Tuple, TypeVar
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
@@ -24,18 +24,19 @@ def _end_measurement(
|
||||
return end_wall_time - start_wall_time, end_cpu_time - start_cpu_time
|
||||
|
||||
|
||||
P = ParamSpec("P")
|
||||
T = TypeVar("T")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def time_measured(func: Callable[..., T]) -> Callable[..., Tuple[TimingInfo, T]]:
|
||||
def time_measured(func: Callable[P, T]) -> Callable[P, Tuple[TimingInfo, T]]:
|
||||
"""
|
||||
Decorator to measure the time taken by a function to execute.
|
||||
"""
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs) -> Tuple[TimingInfo, T]:
|
||||
def wrapper(*args: P.args, **kwargs: P.kwargs) -> Tuple[TimingInfo, T]:
|
||||
start_wall_time, start_cpu_time = _start_measurement()
|
||||
try:
|
||||
result = func(*args, **kwargs)
|
||||
@@ -49,13 +50,13 @@ def time_measured(func: Callable[..., T]) -> Callable[..., Tuple[TimingInfo, T]]
|
||||
return wrapper
|
||||
|
||||
|
||||
def error_logged(func: Callable[..., T]) -> Callable[..., T | None]:
|
||||
def error_logged(func: Callable[P, T]) -> Callable[P, T | None]:
|
||||
"""
|
||||
Decorator to suppress and log any exceptions raised by a function.
|
||||
"""
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs) -> T | None:
|
||||
def wrapper(*args: P.args, **kwargs: P.kwargs) -> T | None:
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except Exception as e:
|
||||
|
||||
@@ -1,17 +1,15 @@
|
||||
import os
|
||||
|
||||
from forge.logging.config import LogFormatName
|
||||
|
||||
|
||||
def configure_logging():
|
||||
import logging
|
||||
|
||||
from forge.logging import configure_logging
|
||||
import autogpt_libs.logging.config
|
||||
|
||||
if os.getenv("APP_ENV") != "cloud":
|
||||
configure_logging()
|
||||
autogpt_libs.logging.config.configure_logging(force_cloud_logging=False)
|
||||
else:
|
||||
configure_logging(log_format=LogFormatName.STRUCTURED)
|
||||
autogpt_libs.logging.config.configure_logging(force_cloud_logging=True)
|
||||
|
||||
# Silence httpx logger
|
||||
logging.getLogger("httpx").setLevel(logging.WARNING)
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
from abc import abstractmethod
|
||||
from typing import Any, Callable, Coroutine, Type, TypeVar, cast
|
||||
|
||||
import Pyro5.api
|
||||
from Pyro5 import api as pyro
|
||||
from Pyro5 import nameserver
|
||||
|
||||
from autogpt_server.data import db
|
||||
from autogpt_server.data.queue import AsyncEventQueue, AsyncRedisEventQueue
|
||||
@@ -42,25 +43,16 @@ def expose(func: C) -> C:
|
||||
return pyro.expose(wrapper) # type: ignore
|
||||
|
||||
|
||||
class PyroNameServer(AppProcess):
|
||||
def run(self):
|
||||
nameserver.start_ns_loop(host=pyro_host, port=9090)
|
||||
|
||||
@conn_retry
|
||||
def _wait_for_ns(self):
|
||||
pyro.locate_ns(host="localhost", port=9090)
|
||||
|
||||
def health_check(self):
|
||||
self._wait_for_ns()
|
||||
logger.info(f"{__class__.__name__} is ready")
|
||||
|
||||
|
||||
class AppService(AppProcess):
|
||||
shared_event_loop: asyncio.AbstractEventLoop
|
||||
event_queue: AsyncEventQueue = AsyncRedisEventQueue()
|
||||
use_db: bool = False
|
||||
use_redis: bool = False
|
||||
|
||||
def __init__(self, port):
|
||||
self.port = port
|
||||
self.uri = None
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def service_name(cls) -> str:
|
||||
@@ -108,11 +100,10 @@ class AppService(AppProcess):
|
||||
|
||||
@conn_retry
|
||||
def __start_pyro(self):
|
||||
daemon = pyro.Daemon(host=pyro_host)
|
||||
ns = pyro.locate_ns(host=pyro_host, port=9090)
|
||||
uri = daemon.register(self)
|
||||
ns.register(self.service_name, uri)
|
||||
logger.info(f"[{self.service_name}] Connected to Pyro; URI = {uri}")
|
||||
host = Config().pyro_host
|
||||
daemon = Pyro5.api.Daemon(host=host, port=self.port)
|
||||
self.uri = daemon.register(self, objectId=self.service_name)
|
||||
logger.info(f"[{self.service_name}] Connected to Pyro; URI = {self.uri}")
|
||||
daemon.requestLoop()
|
||||
|
||||
def __start_async_loop(self):
|
||||
@@ -122,16 +113,19 @@ class AppService(AppProcess):
|
||||
AS = TypeVar("AS", bound=AppService)
|
||||
|
||||
|
||||
def get_service_client(service_type: Type[AS]) -> AS:
|
||||
def get_service_client(service_type: Type[AS], port: int) -> AS:
|
||||
service_name = service_type.service_name
|
||||
|
||||
class DynamicClient:
|
||||
@conn_retry
|
||||
def __init__(self):
|
||||
ns = pyro.locate_ns()
|
||||
uri = ns.lookup(service_name)
|
||||
self.proxy = pyro.Proxy(uri)
|
||||
host = os.environ.get(f"{service_name.upper()}_HOST", "localhost")
|
||||
uri = f"PYRO:{service_type.service_name}@{host}:{port}"
|
||||
logger.debug(f"Connecting to service [{service_name}]. URI = {uri}")
|
||||
self.proxy = Pyro5.api.Proxy(uri)
|
||||
# Attempt to bind to ensure the connection is established
|
||||
self.proxy._pyroBind()
|
||||
logger.debug(f"Successfully connected to service [{service_name}]")
|
||||
|
||||
def __getattr__(self, name: str) -> Callable[..., Any]:
|
||||
return getattr(self.proxy, name)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user