Merge branch 'redesigning-block-menu' into kpczerwinski/secrt-1320-backend-update

This commit is contained in:
Krzysztof Czerwinski
2025-06-06 14:43:17 +02:00
96 changed files with 18564 additions and 13498 deletions

View File

@@ -27,7 +27,7 @@
!autogpt_platform/frontend/src/
!autogpt_platform/frontend/public/
!autogpt_platform/frontend/package.json
!autogpt_platform/frontend/yarn.lock
!autogpt_platform/frontend/pnpm-lock.yaml
!autogpt_platform/frontend/tsconfig.json
!autogpt_platform/frontend/README.md
## config

View File

@@ -10,17 +10,19 @@ updates:
commit-message:
prefix: "chore(libs/deps)"
prefix-development: "chore(libs/deps-dev)"
ignore:
- dependency-name: "poetry"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
# backend (Poetry project)
- package-ecosystem: "pip"
@@ -32,17 +34,19 @@ updates:
commit-message:
prefix: "chore(backend/deps)"
prefix-development: "chore(backend/deps-dev)"
ignore:
- dependency-name: "poetry"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
# frontend (Next.js project)
- package-ecosystem: "npm"
@@ -58,13 +62,13 @@ updates:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
# infra (Terraform)
- package-ecosystem: "terraform"
@@ -81,14 +85,13 @@ updates:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
# GitHub Actions
- package-ecosystem: "github-actions"
@@ -101,14 +104,13 @@ updates:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
# Docker
- package-ecosystem: "docker"
@@ -121,16 +123,16 @@ updates:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
# Docs
- package-ecosystem: 'pip'
- package-ecosystem: "pip"
directory: "docs/"
schedule:
interval: "weekly"
@@ -142,10 +144,10 @@ updates:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"

View File

@@ -32,7 +32,7 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: ["3.10"]
python-version: ["3.11"]
runs-on: ubuntu-latest
services:
@@ -81,12 +81,12 @@ jobs:
- name: Install Poetry (Unix)
run: |
# Extract Poetry version from backend/poetry.lock
HEAD_POETRY_VERSION=$(head -n 1 poetry.lock | grep -oP '(?<=Poetry )[0-9]+\.[0-9]+\.[0-9]+')
HEAD_POETRY_VERSION=$(python ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
echo "Found Poetry version ${HEAD_POETRY_VERSION} in backend/poetry.lock"
if [ -n "$BASE_REF" ]; then
BASE_BRANCH=${BASE_REF/refs\/heads\//}
BASE_POETRY_VERSION=$((git show "origin/$BASE_BRANCH":./poetry.lock; true) | head -n 1 | grep -oP '(?<=Poetry )[0-9]+\.[0-9]+\.[0-9]+')
BASE_POETRY_VERSION=$((git show "origin/$BASE_BRANCH":./poetry.lock; true) | python ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry -)
echo "Found Poetry version ${BASE_POETRY_VERSION} in backend/poetry.lock on ${BASE_REF}"
POETRY_VERSION=$(printf '%s\n' "$HEAD_POETRY_VERSION" "$BASE_POETRY_VERSION" | sort -V | tail -n1)
else

View File

@@ -0,0 +1,198 @@
name: AutoGPT Platform - Dev Deploy PR Event Dispatcher
on:
pull_request:
types: [closed]
issue_comment:
types: [created]
permissions:
issues: write
pull-requests: write
jobs:
dispatch:
runs-on: ubuntu-latest
steps:
- name: Check comment permissions and deployment status
id: check_status
if: github.event_name == 'issue_comment' && github.event.issue.pull_request
uses: actions/github-script@v7
with:
script: |
const commentBody = context.payload.comment.body.trim();
const commentUser = context.payload.comment.user.login;
const prAuthor = context.payload.issue.user.login;
const authorAssociation = context.payload.comment.author_association;
// Check permissions
const hasPermission = (
authorAssociation === 'OWNER' ||
authorAssociation === 'MEMBER' ||
authorAssociation === 'COLLABORATOR'
);
core.setOutput('comment_body', commentBody);
core.setOutput('has_permission', hasPermission);
if (!hasPermission && (commentBody === '!deploy' || commentBody === '!undeploy')) {
core.setOutput('permission_denied', 'true');
return;
}
if (commentBody !== '!deploy' && commentBody !== '!undeploy') {
return;
}
// Process deploy command
if (commentBody === '!deploy') {
core.setOutput('should_deploy', 'true');
}
// Process undeploy command
else if (commentBody === '!undeploy') {
core.setOutput('should_undeploy', 'true');
}
- name: Post permission denied comment
if: steps.check_status.outputs.permission_denied == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `❌ **Permission denied**: Only the repository owners, members, or collaborators can use deployment commands.`
});
- name: Get PR details for deployment
id: pr_details
if: steps.check_status.outputs.should_deploy == 'true' || steps.check_status.outputs.should_undeploy == 'true'
uses: actions/github-script@v7
with:
script: |
const pr = await github.rest.pulls.get({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: context.issue.number
});
core.setOutput('pr_number', pr.data.number);
core.setOutput('pr_title', pr.data.title);
core.setOutput('pr_state', pr.data.state);
- name: Dispatch Deploy Event
if: steps.check_status.outputs.should_deploy == 'true'
uses: peter-evans/repository-dispatch@v2
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "deploy",
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
"repo": "${{ github.repository }}"
}
- name: Post deploy success comment
if: steps.check_status.outputs.should_deploy == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `🚀 **Deploying PR #${{ steps.pr_details.outputs.pr_number }}** to development environment...`
});
- name: Dispatch Undeploy Event (from comment)
if: steps.check_status.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v2
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "undeploy",
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
"repo": "${{ github.repository }}"
}
- name: Post undeploy success comment
if: steps.check_status.outputs.should_undeploy == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `🗑️ **Undeploying PR #${{ steps.pr_details.outputs.pr_number }}** from development environment...`
});
- name: Check deployment status on PR close
id: check_pr_close
if: github.event_name == 'pull_request' && github.event.action == 'closed'
uses: actions/github-script@v7
with:
script: |
const comments = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number
});
let lastDeployIndex = -1;
let lastUndeployIndex = -1;
comments.data.forEach((comment, index) => {
if (comment.body.trim() === '!deploy') {
lastDeployIndex = index;
} else if (comment.body.trim() === '!undeploy') {
lastUndeployIndex = index;
}
});
// Should undeploy if there's a !deploy without a subsequent !undeploy
const shouldUndeploy = lastDeployIndex !== -1 && lastDeployIndex > lastUndeployIndex;
core.setOutput('should_undeploy', shouldUndeploy);
- name: Dispatch Undeploy Event (PR closed with active deployment)
if: >-
github.event_name == 'pull_request' &&
github.event.action == 'closed' &&
steps.check_pr_close.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v2
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "undeploy",
"pr_number": "${{ github.event.pull_request.number }}",
"pr_title": "${{ github.event.pull_request.title }}",
"pr_state": "${{ github.event.pull_request.state }}",
"repo": "${{ github.repository }}"
}
- name: Post PR close undeploy comment
if: >-
github.event_name == 'pull_request' &&
github.event.action == 'closed' &&
steps.check_pr_close.outputs.should_undeploy == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `🧹 **Auto-undeploying**: PR closed with active deployment. Cleaning up development environment for PR #${{ github.event.pull_request.number }}.`
});

View File

@@ -1,57 +0,0 @@
name: Dev Deploy PR Event Dispatcher
on:
pull_request:
types: [opened, synchronize, closed]
issue_comment:
types: [created]
jobs:
dispatch:
runs-on: ubuntu-latest
steps:
- name: Check if should dispatch
id: check
if: >-
github.event.issue.pull_request &&
github.event.comment.body == '!deploy' &&
(
github.event.comment.user.login == github.event.issue.user.login ||
github.event.comment.author_association == 'OWNER' ||
github.event.comment.author_association == 'MEMBER' ||
github.event.comment.author_association == 'COLLABORATOR'
)
run: |
echo "should_dispatch=true" >> $GITHUB_OUTPUT
- name: Dispatch PR Event
if: steps.check.outputs.should_dispatch == 'true'
uses: peter-evans/repository-dispatch@v2
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "deploy",
"pr_number": "${{ github.event.pull_request.number }}",
"pr_title": "${{ github.event.pull_request.title }}",
"pr_state": "${{ github.event.pull_request.state }}",
"repo": "${{ github.repository }}"
}
- name: Dispatch PR Closure Event
if: github.event.action == 'closed' && contains(github.event.pull_request.comments.*.body, '!deploy')
uses: peter-evans/repository-dispatch@v2
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "undeploy",
"pr_number": "${{ github.event.pull_request.number }}",
"pr_title": "${{ github.event.pull_request.title }}",
"pr_state": "${{ github.event.pull_request.state }}",
"repo": "${{ github.repository }}"
}

View File

@@ -29,13 +29,14 @@ jobs:
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Install dependencies
run: |
yarn install --frozen-lockfile
run: pnpm install --frozen-lockfile
- name: Run lint
run: |
yarn lint
run: pnpm lint
type-check:
runs-on: ubuntu-latest
@@ -48,13 +49,14 @@ jobs:
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Install dependencies
run: |
yarn install --frozen-lockfile
run: pnpm install --frozen-lockfile
- name: Run tsc check
run: |
yarn type-check
run: pnpm type-check
test:
runs-on: ubuntu-latest
@@ -74,6 +76,9 @@ jobs:
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
@@ -93,25 +98,24 @@ jobs:
docker compose -f ../docker-compose.yml up -d
- name: Install dependencies
run: |
yarn install --frozen-lockfile
run: pnpm install --frozen-lockfile
- name: Setup Builder .env
run: |
cp .env.example .env
- name: Setup .env
run: cp .env.example .env
- name: Build frontend
run: pnpm build --turbo
# uses Turbopack, much faster and safe enough for a test pipeline
- name: Install Browser '${{ matrix.browser }}'
run: yarn playwright install --with-deps ${{ matrix.browser }}
run: pnpm playwright install --with-deps ${{ matrix.browser }}
- name: Run tests
timeout-minutes: 20
run: |
yarn test --project=${{ matrix.browser }}
- name: Run Playwright tests
run: pnpm test:no-build --project=${{ matrix.browser }}
- name: Print Final Docker Compose logs
if: always()
run: |
docker compose -f ../docker-compose.yml logs
run: docker compose -f ../docker-compose.yml logs
- uses: actions/upload-artifact@v4
if: ${{ !cancelled() }}

View File

@@ -0,0 +1,60 @@
#!/usr/bin/env python3
import sys
if sys.version_info < (3, 11):
print("Python version 3.11 or higher required")
sys.exit(1)
import tomllib
def get_package_version(package_name: str, lockfile_path: str) -> str | None:
"""Extract package version from poetry.lock file."""
try:
if lockfile_path == "-":
data = tomllib.load(sys.stdin.buffer)
else:
with open(lockfile_path, "rb") as f:
data = tomllib.load(f)
except FileNotFoundError:
print(f"Error: File '{lockfile_path}' not found", file=sys.stderr)
sys.exit(1)
except tomllib.TOMLDecodeError as e:
print(f"Error parsing TOML file: {e}", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"Error reading file: {e}", file=sys.stderr)
sys.exit(1)
# Look for the package in the packages list
packages = data.get("package", [])
for package in packages:
if package.get("name", "").lower() == package_name.lower():
return package.get("version")
return None
def main():
if len(sys.argv) not in (2, 3):
print(
"Usages: python get_package_version_from_lockfile.py <package name> [poetry.lock path]\n"
" cat poetry.lock | python get_package_version_from_lockfile.py <package name> -",
file=sys.stderr,
)
sys.exit(1)
package_name = sys.argv[1]
lockfile_path = sys.argv[2] if len(sys.argv) == 3 else "poetry.lock"
version = get_package_version(package_name, lockfile_path)
if version:
print(version)
else:
print(f"Package '{package_name}' not found in {lockfile_path}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -235,7 +235,7 @@ repos:
hooks:
- id: tsc
name: Typecheck - AutoGPT Platform - Frontend
entry: bash -c 'cd autogpt_platform/frontend && npm run type-check'
entry: bash -c 'cd autogpt_platform/frontend && pnpm type-check'
files: ^autogpt_platform/frontend/
types: [file]
language: system

View File

@@ -3,6 +3,7 @@
This guide provides context for Codex when updating the **autogpt_platform** folder.
## Directory overview
- `autogpt_platform/backend` FastAPI based backend service.
- `autogpt_platform/autogpt_libs` Shared Python libraries.
- `autogpt_platform/frontend` Next.js + Typescript frontend.
@@ -11,12 +12,14 @@ This guide provides context for Codex when updating the **autogpt_platform** fol
See `docs/content/platform/getting-started.md` for setup instructions.
## Code style
- Format Python code with `poetry run format`.
- Format frontend code using `yarn format`.
- Format frontend code using `pnpm format`.
## Testing
- Backend: `poetry run test` (runs pytest with a docker based postgres + prisma).
- Frontend: `yarn test` or `yarn test-ui` for Playwright tests. See `docs/content/platform/contributing/tests.md` for tips.
- Frontend: `pnpm test` or `pnpm test-ui` for Playwright tests. See `docs/content/platform/contributing/tests.md` for tips.
Always run the relevant linters and tests before committing.
Use conventional commit messages for all commits (e.g. `feat(backend): add API`).
@@ -38,6 +41,7 @@ Use conventional commit messages for all commits (e.g. `feat(backend): add API`)
- blocks
## Pull requests
- Use the template in `.github/PULL_REQUEST_TEMPLATE.md`.
- Rely on the pre-commit checks for linting and formatting
- Fill out the **Changes** section and the checklist.
@@ -47,4 +51,3 @@ Use conventional commit messages for all commits (e.g. `feat(backend): add API`)
- For changes touching `data/*.py`, validate user ID checks or explain why not needed.
- If adding protected frontend routes, update `frontend/lib/supabase/middleware.ts`.
- Use the linear ticket branch structure if given codex/open-1668-resume-dropped-runs

View File

@@ -15,8 +15,35 @@
> Setting up and hosting the AutoGPT Platform yourself is a technical process.
> If you'd rather something that just works, we recommend [joining the waitlist](https://bit.ly/3ZDijAI) for the cloud-hosted beta.
### System Requirements
Before proceeding with the installation, ensure your system meets the following requirements:
#### Hardware Requirements
- CPU: 4+ cores recommended
- RAM: Minimum 8GB, 16GB recommended
- Storage: At least 10GB of free space
#### Software Requirements
- Operating Systems:
- Linux (Ubuntu 20.04 or newer recommended)
- macOS (10.15 or newer)
- Windows 10/11 with WSL2
- Required Software (with minimum versions):
- Docker Engine (20.10.0 or newer)
- Docker Compose (2.0.0 or newer)
- Git (2.30 or newer)
- Node.js (16.x or newer)
- npm (8.x or newer)
- VSCode (1.60 or newer) or any modern code editor
#### Network Requirements
- Stable internet connection
- Access to required ports (will be configured in Docker)
- Ability to make outbound HTTPS connections
### Updated Setup Instructions:
Weve moved to a fully maintained and regularly updated documentation site.
We've moved to a fully maintained and regularly updated documentation site.
👉 [Follow the official self-hosting guide here](https://docs.agpt.co/platform/getting-started/)
@@ -152,7 +179,7 @@ Just clone the repo, install dependencies with `./run setup`, and you should be
[![Join us on Discord](https://invidget.switchblade.xyz/autogpt)](https://discord.gg/autogpt)
To report a bug or request a feature, create a [GitHub Issue](https://github.com/Significant-Gravitas/AutoGPT/issues/new/choose). Please ensure someone else hasnt created an issue for the same topic.
To report a bug or request a feature, create a [GitHub Issue](https://github.com/Significant-Gravitas/AutoGPT/issues/new/choose). Please ensure someone else hasn't created an issue for the same topic.
## 🤝 Sister projects

View File

@@ -15,44 +15,57 @@ Welcome to the AutoGPT Platform - a powerful system for creating and running AI
To run the AutoGPT Platform, follow these steps:
1. Clone this repository to your local machine and navigate to the `autogpt_platform` directory within the repository:
```
git clone <https://github.com/Significant-Gravitas/AutoGPT.git | git@github.com:Significant-Gravitas/AutoGPT.git>
cd AutoGPT/autogpt_platform
```
2. Run the following command:
```
cp .env.example .env
```
This command will copy the `.env.example` file to `.env`. You can modify the `.env` file to add your own environment variables.
3. Run the following command:
```
docker compose up -d
```
This command will start all the necessary backend services defined in the `docker-compose.yml` file in detached mode.
4. Navigate to `frontend` within the `autogpt_platform` directory:
```
cd frontend
```
You will need to run your frontend application separately on your local machine.
5. Run the following command:
5. Run the following command:
```
cp .env.example .env.local
```
This command will copy the `.env.example` file to `.env.local` in the `frontend` directory. You can modify the `.env.local` within this folder to add your own environment variables for the frontend application.
6. Run the following command:
Enable corepack and install dependencies by running:
```
npm install
npm run dev
corepack enable
pnpm i
```
This command will install the necessary dependencies and start the frontend application in development mode.
If you are using Yarn, you can run the following commands instead:
Then start the frontend application in development mode:
```
yarn install && yarn dev
pnpm dev
```
7. Open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend.
@@ -68,43 +81,52 @@ Here are some useful Docker Compose commands for managing your AutoGPT Platform:
- `docker compose down`: Stop and remove containers, networks, and volumes.
- `docker compose watch`: Watch for changes in your services and automatically update them.
### Sample Scenarios
Here are some common scenarios where you might use multiple Docker Compose commands:
1. Updating and restarting a specific service:
```
docker compose build api_srv
docker compose up -d --no-deps api_srv
```
This rebuilds the `api_srv` service and restarts it without affecting other services.
2. Viewing logs for troubleshooting:
```
docker compose logs -f api_srv ws_srv
```
This shows and follows the logs for both `api_srv` and `ws_srv` services.
3. Scaling a service for increased load:
```
docker compose up -d --scale executor=3
```
This scales the `executor` service to 3 instances to handle increased load.
4. Stopping the entire system for maintenance:
```
docker compose stop
docker compose rm -f
docker compose pull
docker compose up -d
```
This stops all services, removes containers, pulls the latest images, and restarts the system.
5. Developing with live updates:
```
docker compose watch
```
This watches for changes in your code and automatically updates the relevant services.
6. Checking the status of services:
@@ -115,7 +137,6 @@ Here are some common scenarios where you might use multiple Docker Compose comma
These scenarios demonstrate how to use Docker Compose commands in combination to manage your AutoGPT Platform effectively.
### Persisting Data
To persist data for PostgreSQL and Redis, you can modify the `docker-compose.yml` file to add volumes. Here's how:

View File

@@ -0,0 +1,174 @@
from enum import Enum
from typing import Literal, Optional
from pydantic import SecretStr
from replicate.client import Client as ReplicateClient
from replicate.helpers import FileOutput
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import (
APIKeyCredentials,
CredentialsField,
CredentialsMetaInput,
SchemaField,
)
from backend.integrations.providers import ProviderName
from backend.util.file import MediaFileType
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="replicate",
api_key=SecretStr("mock-replicate-api-key"),
title="Mock Replicate API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
class FluxKontextModelName(str, Enum):
PRO = "Flux Kontext Pro"
MAX = "Flux Kontext Max"
@property
def api_name(self) -> str:
return f"black-forest-labs/flux-kontext-{self.name.lower()}"
class AspectRatio(str, Enum):
MATCH_INPUT_IMAGE = "match_input_image"
ASPECT_1_1 = "1:1"
ASPECT_16_9 = "16:9"
ASPECT_9_16 = "9:16"
ASPECT_4_3 = "4:3"
ASPECT_3_4 = "3:4"
ASPECT_3_2 = "3:2"
ASPECT_2_3 = "2:3"
ASPECT_4_5 = "4:5"
ASPECT_5_4 = "5:4"
ASPECT_21_9 = "21:9"
ASPECT_9_21 = "9:21"
ASPECT_2_1 = "2:1"
ASPECT_1_2 = "1:2"
class AIImageEditorBlock(Block):
class Input(BlockSchema):
credentials: CredentialsMetaInput[
Literal[ProviderName.REPLICATE], Literal["api_key"]
] = CredentialsField(
description="Replicate API key with permissions for Flux Kontext models",
)
prompt: str = SchemaField(
description="Text instruction describing the desired edit",
title="Prompt",
)
input_image: Optional[MediaFileType] = SchemaField(
description="Reference image URI (jpeg, png, gif, webp)",
default=None,
title="Input Image",
)
aspect_ratio: AspectRatio = SchemaField(
description="Aspect ratio of the generated image",
default=AspectRatio.MATCH_INPUT_IMAGE,
title="Aspect Ratio",
advanced=False,
)
seed: Optional[int] = SchemaField(
description="Random seed. Set for reproducible generation",
default=None,
title="Seed",
advanced=True,
)
model: FluxKontextModelName = SchemaField(
description="Model variant to use",
default=FluxKontextModelName.PRO,
title="Model",
)
class Output(BlockSchema):
output_image: MediaFileType = SchemaField(
description="URL of the transformed image"
)
error: str = SchemaField(description="Error message if generation failed")
def __init__(self):
super().__init__(
id="3fd9c73d-4370-4925-a1ff-1b86b99fabfa",
description=(
"Edit images using BlackForest Labs' Flux Kontext models. Provide a prompt "
"and optional reference image to generate a modified image."
),
categories={BlockCategory.AI, BlockCategory.MULTIMEDIA},
input_schema=AIImageEditorBlock.Input,
output_schema=AIImageEditorBlock.Output,
test_input={
"prompt": "Add a hat to the cat",
"input_image": "https://example.com/cat.png",
"aspect_ratio": AspectRatio.MATCH_INPUT_IMAGE,
"seed": None,
"model": FluxKontextModelName.PRO,
"credentials": TEST_CREDENTIALS_INPUT,
},
test_output=[
("output_image", "https://replicate.com/output/edited-image.png"),
],
test_mock={
"run_model": lambda *args, **kwargs: "https://replicate.com/output/edited-image.png",
},
test_credentials=TEST_CREDENTIALS,
)
def run(
self,
input_data: Input,
*,
credentials: APIKeyCredentials,
**kwargs,
) -> BlockOutput:
result = self.run_model(
api_key=credentials.api_key,
model_name=input_data.model.api_name,
prompt=input_data.prompt,
input_image=input_data.input_image,
aspect_ratio=input_data.aspect_ratio.value,
seed=input_data.seed,
)
yield "output_image", result
def run_model(
self,
api_key: SecretStr,
model_name: str,
prompt: str,
input_image: Optional[MediaFileType],
aspect_ratio: str,
seed: Optional[int],
) -> MediaFileType:
client = ReplicateClient(api_token=api_key.get_secret_value())
input_params = {
"prompt": prompt,
"input_image": input_image,
"aspect_ratio": aspect_ratio,
**({"seed": seed} if seed is not None else {}),
}
output: FileOutput | list[FileOutput] = client.run( # type: ignore
model_name,
input=input_params,
wait=False,
)
if isinstance(output, list) and output:
output = output[0]
if isinstance(output, FileOutput):
return MediaFileType(output.url)
if isinstance(output, str):
return MediaFileType(output)
raise ValueError("No output received")

View File

@@ -1,12 +1,19 @@
import json
import logging
from enum import Enum
from typing import Any
from io import BufferedReader
from pathlib import Path
from requests.exceptions import HTTPError, RequestException
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.file import (
MediaFileType,
get_exec_file_path,
get_mime_type,
store_media_file,
)
from backend.util.request import requests
logger = logging.getLogger(name=__name__)
@@ -38,13 +45,21 @@ class SendWebRequestBlock(Block):
)
json_format: bool = SchemaField(
title="JSON format",
description="Whether to send and receive body as JSON",
description="If true, send the body as JSON (unless files are also present).",
default=True,
)
body: Any = SchemaField(
description="The body of the request",
body: dict | None = SchemaField(
description="Form/JSON body payload. If files are supplied, this must be a mapping of formfields.",
default=None,
)
files_name: str = SchemaField(
description="The name of the file field in the form data.",
default="file",
)
files: list[MediaFileType] = SchemaField(
description="Mapping of *form field name* → Image url / path / base64 url.",
default_factory=list,
)
class Output(BlockSchema):
response: object = SchemaField(description="The response from the server")
@@ -55,67 +70,112 @@ class SendWebRequestBlock(Block):
def __init__(self):
super().__init__(
id="6595ae1f-b924-42cb-9a41-551a0611c4b4",
description="This block makes an HTTP request to the given URL.",
description="Make an HTTP request (JSON / form / multipart).",
categories={BlockCategory.OUTPUT},
input_schema=SendWebRequestBlock.Input,
output_schema=SendWebRequestBlock.Output,
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
@staticmethod
def _prepare_files(
graph_exec_id: str,
files_name: str,
files: list[MediaFileType],
) -> tuple[list[tuple[str, tuple[str, BufferedReader, str]]], list[BufferedReader]]:
"""Convert the `files` mapping into the structure expected by `requests`.
Returns a tuple of (**files_payload**, **open_handles**) so we can close handles later.
"""
files_payload: list[tuple[str, tuple[str, BufferedReader, str]]] = []
open_handles: list[BufferedReader] = []
for media in files:
# Normalise to a list so we can repeat the same key
rel_path = store_media_file(graph_exec_id, media, return_content=False)
abs_path = get_exec_file_path(graph_exec_id, rel_path)
try:
handle = open(abs_path, "rb")
except Exception as e:
for h in open_handles:
try:
h.close()
except Exception:
pass
raise RuntimeError(f"Failed to open file '{abs_path}': {e}") from e
open_handles.append(handle)
mime = get_mime_type(abs_path)
files_payload.append((files_name, (Path(abs_path).name, handle, mime)))
return files_payload, open_handles
def run(self, input_data: Input, *, graph_exec_id: str, **kwargs) -> BlockOutput:
# ─── Parse/normalise body ────────────────────────────────────
body = input_data.body
if isinstance(body, str):
try:
body = json.loads(body)
except json.JSONDecodeError:
# plain text treat as formfield value instead
input_data.json_format = False
if input_data.json_format:
if isinstance(body, str):
try:
# Try to parse as JSON first
body = json.loads(body)
except json.JSONDecodeError:
# If it's not valid JSON and just plain text,
# we should send it as plain text instead
input_data.json_format = False
# ─── Prepare files (if any) ──────────────────────────────────
use_files = bool(input_data.files)
files_payload: list[tuple[str, tuple[str, BufferedReader, str]]] = []
open_handles: list[BufferedReader] = []
if use_files:
files_payload, open_handles = self._prepare_files(
graph_exec_id, input_data.files_name, input_data.files
)
# Enforce body format rules
if use_files and input_data.json_format:
raise ValueError(
"json_format=True cannot be combined with file uploads; set json_format=False and put form fields in `body`."
)
# ─── Execute request ─────────────────────────────────────────
try:
response = requests.request(
input_data.method.value,
input_data.url,
headers=input_data.headers,
json=body if input_data.json_format else None,
files=files_payload if use_files else None,
# * If files → multipart ⇒ pass formfields via data=
data=body if not input_data.json_format else None,
# * Else, choose JSON vs urlencoded based on flag
json=body if (input_data.json_format and not use_files) else None,
)
if input_data.json_format:
if response.status_code == 204 or not response.content.strip():
result = None
else:
result = response.json()
# Decide how to parse the response
if input_data.json_format or response.headers.get(
"content-type", ""
).startswith("application/json"):
result = (
None
if (response.status_code == 204 or not response.content.strip())
else response.json()
)
else:
result = response.text
yield "response", result
# Yield according to status code bucket
if 200 <= response.status_code < 300:
yield "response", result
elif 400 <= response.status_code < 500:
yield "client_error", result
else:
yield "server_error", result
except HTTPError as e:
# Handle error responses
try:
result = e.response.json() if input_data.json_format else str(e)
except json.JSONDecodeError:
result = str(e)
if 400 <= e.response.status_code < 500:
yield "client_error", result
elif 500 <= e.response.status_code < 600:
yield "server_error", result
else:
error_msg = (
"Unexpected status code "
f"{e.response.status_code} '{e.response.reason}'"
)
logger.warning(error_msg)
yield "error", error_msg
yield "error", f"HTTP error: {str(e)}"
except RequestException as e:
# Handle other request-related exceptions
yield "error", str(e)
yield "error", f"Request error: {str(e)}"
except Exception as e:
# Catch any other unexpected exceptions
yield "error", str(e)
finally:
for h in open_handles:
try:
h.close()
except Exception:
pass

View File

@@ -2,6 +2,7 @@ from typing import Type
from backend.blocks.ai_music_generator import AIMusicGeneratorBlock
from backend.blocks.ai_shortform_video_block import AIShortformVideoCreatorBlock
from backend.blocks.flux_kontext import AIImageEditorBlock, FluxKontextModelName
from backend.blocks.ideogram import IdeogramModelBlock
from backend.blocks.jina.embeddings import JinaEmbeddingBlock
from backend.blocks.jina.search import ExtractWebsiteContentBlock, SearchTheWebBlock
@@ -260,6 +261,30 @@ BLOCK_COSTS: dict[Type[Block], list[BlockCost]] = {
},
)
],
AIImageEditorBlock: [
BlockCost(
cost_amount=10,
cost_filter={
"model": FluxKontextModelName.PRO.api_name,
"credentials": {
"id": replicate_credentials.id,
"provider": replicate_credentials.provider,
"type": replicate_credentials.type,
},
},
),
BlockCost(
cost_amount=20,
cost_filter={
"model": FluxKontextModelName.MAX.api_name,
"credentials": {
"id": replicate_credentials.id,
"provider": replicate_credentials.provider,
"type": replicate_credentials.type,
},
},
),
],
AIMusicGeneratorBlock: [
BlockCost(
cost_amount=11,

View File

@@ -12,6 +12,7 @@ from prisma.types import (
AgentGraphWhereInput,
AgentNodeCreateInput,
AgentNodeLinkCreateInput,
StoreListingVersionWhereInput,
)
from pydantic import create_model
from pydantic.fields import computed_field
@@ -712,23 +713,24 @@ async def get_graph(
include=AGENT_GRAPH_INCLUDE,
order={"version": "desc"},
)
# For access, the graph must be owned by the user or listed in the store
if graph is None or (
graph.userId != user_id
and not (
await StoreListingVersion.prisma().find_first(
where={
"agentGraphId": graph_id,
"agentGraphVersion": version or graph.version,
"isDeleted": False,
"submissionStatus": SubmissionStatus.APPROVED,
}
)
)
):
if graph is None:
return None
if graph.userId != user_id:
store_listing_filter: StoreListingVersionWhereInput = {
"agentGraphId": graph_id,
"isDeleted": False,
"submissionStatus": SubmissionStatus.APPROVED,
}
if version is not None:
store_listing_filter["agentGraphVersion"] = version
# For access, the graph must be owned by the user or listed in the store
if not await StoreListingVersion.prisma().find_first(
where=store_listing_filter, order={"agentGraphVersion": "desc"}
):
return None
if include_subgraphs or for_export:
sub_graphs = await get_sub_graphs(graph)
return GraphModel.from_db(

View File

@@ -305,6 +305,13 @@ def _enqueue_next_nodes(
)
def register_next_executions(node_link: Link) -> list[NodeExecutionEntry]:
try:
return _register_next_executions(node_link)
except Exception as e:
log_metadata.exception(f"Failed to register next executions: {e}")
return []
def _register_next_executions(node_link: Link) -> list[NodeExecutionEntry]:
enqueued_executions = []
next_output_name = node_link.source_name
next_input_name = node_link.sink_name

View File

@@ -174,68 +174,195 @@ def _is_cost_filter_match(cost_filter: BlockInput, input_data: BlockInput) -> bo
# ============ Execution Input Helpers ============ #
# --------------------------------------------------------------------------- #
# Delimiters
# --------------------------------------------------------------------------- #
LIST_SPLIT = "_$_"
DICT_SPLIT = "_#_"
OBJC_SPLIT = "_@_"
_DELIMS = (LIST_SPLIT, DICT_SPLIT, OBJC_SPLIT)
# --------------------------------------------------------------------------- #
# Tokenisation utilities
# --------------------------------------------------------------------------- #
def _next_delim(s: str) -> tuple[str | None, int]:
"""
Return the *earliest* delimiter appearing in `s` and its index.
If none present → (None, -1).
"""
first: str | None = None
pos = len(s) # sentinel: larger than any real index
for d in _DELIMS:
i = s.find(d)
if 0 <= i < pos:
first, pos = d, i
return first, (pos if first else -1)
def _tokenise(path: str) -> list[tuple[str, str]] | None:
"""
Convert the raw path string (starting with a delimiter) into
[ (delimiter, identifier), … ] or None if the syntax is malformed.
"""
tokens: list[tuple[str, str]] = []
while path:
# 1. Which delimiter starts this chunk?
delim = next((d for d in _DELIMS if path.startswith(d)), None)
if delim is None:
return None # invalid syntax
# 2. Slice off the delimiter, then up to the next delimiter (or EOS)
path = path[len(delim) :]
nxt_delim, pos = _next_delim(path)
token, path = (
path[: pos if pos != -1 else len(path)],
path[pos if pos != -1 else len(path) :],
)
if token == "":
return None # empty identifier is invalid
tokens.append((delim, token))
return tokens
# --------------------------------------------------------------------------- #
# Public API parsing (flattened ➜ concrete)
# --------------------------------------------------------------------------- #
def parse_execution_output(output: BlockData, name: str) -> Any | None:
"""
Extracts partial output data by name from a given BlockData.
Retrieve a nested value out of `output` using the flattened *name*.
The function supports extracting data from lists, dictionaries, and objects
using specific naming conventions:
- For lists: <output_name>_$_<index>
- For dictionaries: <output_name>_#_<key>
- For objects: <output_name>_@_<attribute>
Args:
output (BlockData): A tuple containing the output name and data.
name (str): The name used to extract specific data from the output.
Returns:
Any | None: The extracted data if found, otherwise None.
Examples:
>>> output = ("result", [10, 20, 30])
>>> parse_execution_output(output, "result_$_1")
20
>>> output = ("config", {"key1": "value1", "key2": "value2"})
>>> parse_execution_output(output, "config_#_key1")
'value1'
>>> class Sample:
... attr1 = "value1"
... attr2 = "value2"
>>> output = ("object", Sample())
>>> parse_execution_output(output, "object_@_attr1")
'value1'
On any failure (wrong name, wrong type, out-of-range, bad path)
returns **None**.
"""
output_name, output_data = output
base_name, data = output
if name == output_name:
return output_data
# Exact match → whole object
if name == base_name:
return data
if name.startswith(f"{output_name}{LIST_SPLIT}"):
index = int(name.split(LIST_SPLIT)[1])
if not isinstance(output_data, list) or len(output_data) <= index:
return None
return output_data[int(name.split(LIST_SPLIT)[1])]
# Must start with the expected name
if not name.startswith(base_name):
return None
path = name[len(base_name) :]
if not path:
return None # nothing left to parse
if name.startswith(f"{output_name}{DICT_SPLIT}"):
index = name.split(DICT_SPLIT)[1]
if not isinstance(output_data, dict) or index not in output_data:
return None
return output_data[index]
if name.startswith(f"{output_name}{OBJC_SPLIT}"):
index = name.split(OBJC_SPLIT)[1]
if isinstance(output_data, object) and hasattr(output_data, index):
return getattr(output_data, index)
tokens = _tokenise(path)
if tokens is None:
return None
return None
cur: Any = data
for delim, ident in tokens:
if delim == LIST_SPLIT:
# list[index]
try:
idx = int(ident)
except ValueError:
return None
if not isinstance(cur, list) or idx >= len(cur):
return None
cur = cur[idx]
elif delim == DICT_SPLIT:
if not isinstance(cur, dict) or ident not in cur:
return None
cur = cur[ident]
elif delim == OBJC_SPLIT:
if not hasattr(cur, ident):
return None
cur = getattr(cur, ident)
else:
return None # unreachable
return cur
def _assign(container: Any, tokens: list[tuple[str, str]], value: Any) -> Any:
"""
Recursive helper that *returns* the (possibly new) container with
`value` assigned along the remaining `tokens` path.
"""
if not tokens:
return value # leaf reached
delim, ident = tokens[0]
rest = tokens[1:]
# ---------- list ----------
if delim == LIST_SPLIT:
try:
idx = int(ident)
except ValueError:
raise ValueError("index must be an integer")
if container is None:
container = []
elif not isinstance(container, list):
container = list(container) if hasattr(container, "__iter__") else []
while len(container) <= idx:
container.append(None)
container[idx] = _assign(container[idx], rest, value)
return container
# ---------- dict ----------
if delim == DICT_SPLIT:
if container is None:
container = {}
elif not isinstance(container, dict):
container = dict(container) if hasattr(container, "items") else {}
container[ident] = _assign(container.get(ident), rest, value)
return container
# ---------- object ----------
if delim == OBJC_SPLIT:
if container is None or not isinstance(container, MockObject):
container = MockObject()
setattr(
container,
ident,
_assign(getattr(container, ident, None), rest, value),
)
return container
return value # unreachable
def merge_execution_input(data: BlockInput) -> BlockInput:
"""
Reconstruct nested objects from a *flattened* dict of key → value.
Raises ValueError on syntactically invalid list indices.
"""
merged: BlockInput = {}
for key, value in data.items():
# Split off the base name (before the first delimiter, if any)
delim, pos = _next_delim(key)
if delim is None:
merged[key] = value
continue
base, path = key[:pos], key[pos:]
tokens = _tokenise(path)
if tokens is None:
# Invalid key; treat as scalar under the raw name
merged[key] = value
continue
merged[base] = _assign(merged.get(base), tokens, value)
data.update(merged)
return data
def validate_exec(
@@ -292,77 +419,6 @@ def validate_exec(
return data, node_block.name
def merge_execution_input(data: BlockInput) -> BlockInput:
"""
Merges dynamic input pins into a single list, dictionary, or object based on naming patterns.
This function processes input keys that follow specific patterns to merge them into a unified structure:
- `<input_name>_$_<index>` for list inputs.
- `<input_name>_#_<index>` for dictionary inputs.
- `<input_name>_@_<index>` for object inputs.
Args:
data (BlockInput): A dictionary containing input keys and their corresponding values.
Returns:
BlockInput: A dictionary with merged inputs.
Raises:
ValueError: If a list index is not an integer.
Examples:
>>> data = {
... "list_$_0": "a",
... "list_$_1": "b",
... "dict_#_key1": "value1",
... "dict_#_key2": "value2",
... "object_@_attr1": "value1",
... "object_@_attr2": "value2"
... }
>>> merge_execution_input(data)
{
"list": ["a", "b"],
"dict": {"key1": "value1", "key2": "value2"},
"object": <MockObject attr1="value1" attr2="value2">
}
"""
# Merge all input with <input_name>_$_<index> into a single list.
items = list(data.items())
for key, value in items:
if LIST_SPLIT not in key:
continue
name, index = key.split(LIST_SPLIT)
if not index.isdigit():
raise ValueError(f"Invalid key: {key}, #{index} index must be an integer.")
data[name] = data.get(name, [])
if int(index) >= len(data[name]):
# Pad list with empty string on missing indices.
data[name].extend([""] * (int(index) - len(data[name]) + 1))
data[name][int(index)] = value
# Merge all input with <input_name>_#_<index> into a single dict.
for key, value in items:
if DICT_SPLIT not in key:
continue
name, index = key.split(DICT_SPLIT)
data[name] = data.get(name, {})
data[name][index] = value
# Merge all input with <input_name>_@_<index> into a single object.
for key, value in items:
if OBJC_SPLIT not in key:
continue
name, index = key.split(OBJC_SPLIT)
if name not in data or not isinstance(data[name], object):
data[name] = MockObject()
setattr(data[name], index, value)
return data
def _validate_node_input_credentials(
graph: GraphModel,
user_id: str,

View File

@@ -67,8 +67,7 @@ def store_media_file(
return ext if ext else ".bin"
def _file_to_data_uri(path: Path) -> str:
mime_type, _ = mimetypes.guess_type(path)
mime_type = mime_type or "application/octet-stream"
mime_type = get_mime_type(str(path))
b64 = base64.b64encode(path.read_bytes()).decode("utf-8")
return f"data:{mime_type};base64,{b64}"
@@ -130,3 +129,21 @@ def store_media_file(
return MediaFileType(_file_to_data_uri(target_path))
else:
return MediaFileType(_strip_base_prefix(target_path, base_path))
def get_mime_type(file: str) -> str:
"""
Get the MIME type of a file, whether it's a data URI, URL, or local path.
"""
if file.startswith("data:"):
match = re.match(r"^data:([^;]+);base64,", file)
return match.group(1) if match else "application/octet-stream"
elif file.startswith(("http://", "https://")):
parsed_url = urlparse(file)
mime_type, _ = mimetypes.guess_type(parsed_url.path)
return mime_type or "application/octet-stream"
else:
mime_type, _ = mimetypes.guess_type(file)
return mime_type or "application/octet-stream"

View File

@@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand.
# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
[[package]]
name = "aio-pika"
@@ -329,7 +329,7 @@ description = "Backport of CPython tarfile module"
optional = false
python-versions = ">=3.8"
groups = ["main"]
markers = "python_version < \"3.12\""
markers = "python_version <= \"3.11\""
files = [
{file = "backports.tarfile-1.2.0-py3-none-any.whl", hash = "sha256:77e284d754527b01fb1e6fa8a1afe577858ebe4e9dad8919e34c862cb399bc34"},
{file = "backports_tarfile-1.2.0.tar.gz", hash = "sha256:d75e02c268746e1b8144c278978b6e98e85de6ad16f8e4b0844a154557eca991"},
@@ -1021,7 +1021,7 @@ files = [
{file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"},
{file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"},
]
markers = {dev = "python_version == \"3.10\""}
markers = {dev = "python_version < \"3.11\""}
[package.extras]
test = ["pytest (>=6)"]
@@ -3293,14 +3293,14 @@ poetry-plugin = ["poetry (>=1.2.0,<3.0.0) ; python_version < \"4.0\""]
[[package]]
name = "poetry"
version = "2.1.3"
version = "2.1.1"
description = "Python dependency management and packaging made easy."
optional = false
python-versions = "<4.0,>=3.9"
groups = ["main"]
files = [
{file = "poetry-2.1.3-py3-none-any.whl", hash = "sha256:7054d3f97ccce7f31961ead16250407c4577bfe57e2037a190ae2913fc40a20c"},
{file = "poetry-2.1.3.tar.gz", hash = "sha256:f2c9bd6790b19475976d88ea4553bcc3533c0dc73f740edc4fffe9e2add50594"},
{file = "poetry-2.1.1-py3-none-any.whl", hash = "sha256:1d433880bd5b401327ddee789ccfe9ff197bf3b0cd240f0bc7cc99c84d14b16c"},
{file = "poetry-2.1.1.tar.gz", hash = "sha256:d82673865bf13d6cd0dacf28c69a89670456d8df2f9e5da82bfb5f833ba00efc"},
]
[package.dependencies]
@@ -3316,7 +3316,7 @@ packaging = ">=24.0"
pbs-installer = {version = ">=2025.1.6,<2026.0.0", extras = ["download", "install"]}
pkginfo = ">=1.12,<2.0"
platformdirs = ">=3.0.0,<5"
poetry-core = "2.1.3"
poetry-core = "2.1.1"
pyproject-hooks = ">=1.0.0,<2.0.0"
requests = ">=2.26,<3.0"
requests-toolbelt = ">=1.0.0,<2.0.0"
@@ -3329,14 +3329,14 @@ xattr = {version = ">=1.0.0,<2.0.0", markers = "sys_platform == \"darwin\""}
[[package]]
name = "poetry-core"
version = "2.1.3"
version = "2.1.1"
description = "Poetry PEP 517 Build Backend"
optional = false
python-versions = "<4.0,>=3.9"
groups = ["main"]
files = [
{file = "poetry_core-2.1.3-py3-none-any.whl", hash = "sha256:2c704f05016698a54ca1d327f46ce2426d72eaca6ff614132c8477c292266771"},
{file = "poetry_core-2.1.3.tar.gz", hash = "sha256:0522a015477ed622c89aad56a477a57813cace0c8e7ff2a2906b7ef4a2e296a4"},
{file = "poetry_core-2.1.1-py3-none-any.whl", hash = "sha256:bc3b0382ab4d00d5d780277fd0aad1580eb4403613b37fc60fec407b5bee1fe6"},
{file = "poetry_core-2.1.1.tar.gz", hash = "sha256:c1a1f6f00e4254742f40988a8caf665549101cf9991122cd5de1198897768b1a"},
]
[[package]]
@@ -5274,7 +5274,7 @@ description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
groups = ["main", "dev"]
markers = "python_version == \"3.10\""
markers = "python_version < \"3.11\""
files = [
{file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"},
{file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"},
@@ -5522,7 +5522,7 @@ description = "Fast implementation of asyncio event loop on top of libuv"
optional = false
python-versions = ">=3.8.0"
groups = ["main"]
markers = "platform_python_implementation != \"PyPy\" and sys_platform != \"win32\" and sys_platform != \"cygwin\""
markers = "sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\""
files = [
{file = "uvloop-0.21.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ec7e6b09a6fdded42403182ab6b832b71f4edaf7f37a9a0e371a01db5f0cb45f"},
{file = "uvloop-0.21.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:196274f2adb9689a289ad7d65700d37df0c0930fd8e4e743fa4834e850d7719d"},
@@ -6264,4 +6264,4 @@ cffi = ["cffi (>=1.11)"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.10,<3.13"
content-hash = "f0d0aae83b885e97413e0effe5f61dd24d50b6ff77f243d855053e7588877f35"
content-hash = "8968eaab1359ef97beccfc7796d69557e0eeb9286c69cfdc7441c483b91ae58a"

View File

@@ -37,7 +37,7 @@ ollama = "^0.4.8"
openai = "^1.78.1"
pika = "^1.3.2"
pinecone = "^5.3.1"
poetry = "^2.1.3"
poetry = "2.1.1" # CHECK DEPENDABOT SUPPORT BEFORE UPGRADING
postmarker = "^1.0"
praw = "~7.8.1"
prisma = "^0.15.0"

View File

@@ -1,55 +1,278 @@
from typing import cast
import pytest
from backend.executor.utils import merge_execution_input, parse_execution_output
from backend.util.mock import MockObject
def test_parse_execution_output():
# Test case for list extraction
# Test case for basic output
output = ("result", "value")
assert parse_execution_output(output, "result") == "value"
# Test case for list output
output = ("result", [10, 20, 30])
assert parse_execution_output(output, "result_$_1") == 20
assert parse_execution_output(output, "result_$_3") is None
# Test case for dictionary extraction
output = ("config", {"key1": "value1", "key2": "value2"})
assert parse_execution_output(output, "config_#_key1") == "value1"
assert parse_execution_output(output, "config_#_key3") is None
# Test case for dict output
output = ("result", {"key1": "value1", "key2": "value2"})
assert parse_execution_output(output, "result_#_key1") == "value1"
# Test case for object extraction
# Test case for object output
class Sample:
attr1 = "value1"
attr2 = "value2"
def __init__(self):
self.attr1 = "value1"
self.attr2 = "value2"
output = ("object", Sample())
assert parse_execution_output(output, "object_@_attr1") == "value1"
assert parse_execution_output(output, "object_@_attr3") is None
output = ("result", Sample())
assert parse_execution_output(output, "result_@_attr1") == "value1"
# Test case for direct match
output = ("direct", "match")
assert parse_execution_output(output, "direct") == "match"
assert parse_execution_output(output, "nomatch") is None
# Test case for nested list output
output = ("result", [[1, 2], [3, 4]])
assert parse_execution_output(output, "result_$_0_$_1") == 2
assert parse_execution_output(output, "result_$_1_$_0") == 3
# Test case for list containing dict
output = ("result", [{"key1": "value1"}, {"key2": "value2"}])
assert parse_execution_output(output, "result_$_0_#_key1") == "value1"
assert parse_execution_output(output, "result_$_1_#_key2") == "value2"
# Test case for dict containing list
output = ("result", {"key1": [1, 2], "key2": [3, 4]})
assert parse_execution_output(output, "result_#_key1_$_1") == 2
assert parse_execution_output(output, "result_#_key2_$_0") == 3
# Test case for complex nested structure
class NestedSample:
def __init__(self):
self.attr1 = [1, 2]
self.attr2 = {"key": "value"}
output = ("result", [NestedSample(), {"key": [1, 2]}])
assert parse_execution_output(output, "result_$_0_@_attr1_$_1") == 2
assert parse_execution_output(output, "result_$_0_@_attr2_#_key") == "value"
assert parse_execution_output(output, "result_$_1_#_key_$_0") == 1
# Test case for non-existent paths
output = ("result", [1, 2, 3])
assert parse_execution_output(output, "result_$_5") is None
assert parse_execution_output(output, "result_#_key") is None
assert parse_execution_output(output, "result_@_attr") is None
assert parse_execution_output(output, "wrong_name") is None
# Test cases for delimiter processing order
# Test case 1: List -> Dict -> List
output = ("result", [[{"key": [1, 2]}], [3, 4]])
assert parse_execution_output(output, "result_$_0_$_0_#_key_$_1") == 2
# Test case 2: Dict -> List -> Object
class NestedObj:
def __init__(self):
self.value = "nested"
output = ("result", {"key": [NestedObj(), 2]})
assert parse_execution_output(output, "result_#_key_$_0_@_value") == "nested"
# Test case 3: Object -> List -> Dict
class ParentObj:
def __init__(self):
self.items = [{"nested": "value"}]
output = ("result", ParentObj())
assert parse_execution_output(output, "result_@_items_$_0_#_nested") == "value"
# Test case 4: Complex nested structure with all types
class ComplexObj:
def __init__(self):
self.data = [{"items": [{"value": "deep"}]}]
output = ("result", {"key": [ComplexObj()]})
assert (
parse_execution_output(
output, "result_#_key_$_0_@_data_$_0_#_items_$_0_#_value"
)
== "deep"
)
# Test case 5: Invalid paths that should return None
output = ("result", [{"key": [1, 2]}])
assert parse_execution_output(output, "result_$_0_#_wrong_key") is None
assert parse_execution_output(output, "result_$_0_#_key_$_5") is None
assert parse_execution_output(output, "result_$_0_@_attr") is None
# Test case 6: Mixed delimiter types in wrong order
output = ("result", {"key": [1, 2]})
assert (
parse_execution_output(output, "result_#_key_$_1_@_attr") is None
) # Should fail at @_attr
assert (
parse_execution_output(output, "result_@_attr_$_0_#_key") is None
) # Should fail at @_attr
def test_merge_execution_input():
# Test case for merging list inputs
data = {"list_$_0": "a", "list_$_1": "b", "list_$_3": "d"}
merged_data = merge_execution_input(data)
assert merged_data["list"] == ["a", "b", "", "d"]
# Test case for basic list extraction
data = {
"list_$_0": "a",
"list_$_1": "b",
}
result = merge_execution_input(data)
assert "list" in result
assert result["list"] == ["a", "b"]
# Test case for merging dictionary inputs
data = {"dict_#_key1": "value1", "dict_#_key2": "value2"}
merged_data = merge_execution_input(data)
assert merged_data["dict"] == {"key1": "value1", "key2": "value2"}
# Test case for basic dict extraction
data = {
"dict_#_key1": "value1",
"dict_#_key2": "value2",
}
result = merge_execution_input(data)
assert "dict" in result
assert result["dict"] == {"key1": "value1", "key2": "value2"}
# Test case for merging object inputs
data = {"object_@_attr1": "value1", "object_@_attr2": "value2"}
merged_data = merge_execution_input(data)
assert hasattr(merged_data["object"], "attr1")
assert hasattr(merged_data["object"], "attr2")
assert merged_data["object"].attr1 == "value1"
assert merged_data["object"].attr2 == "value2"
# Test case for object extraction
class Sample:
def __init__(self):
self.attr1 = None
self.attr2 = None
# Test case for mixed inputs
data = {"list_$_0": "a", "dict_#_key1": "value1", "object_@_attr1": "value1"}
merged_data = merge_execution_input(data)
assert merged_data["list"] == ["a"]
assert merged_data["dict"] == {"key1": "value1"}
assert hasattr(merged_data["object"], "attr1")
assert merged_data["object"].attr1 == "value1"
data = {
"object_@_attr1": "value1",
"object_@_attr2": "value2",
}
result = merge_execution_input(data)
assert "object" in result
assert isinstance(result["object"], MockObject)
assert result["object"].attr1 == "value1"
assert result["object"].attr2 == "value2"
# Test case for nested list extraction
data = {
"nested_list_$_0_$_0": "a",
"nested_list_$_0_$_1": "b",
"nested_list_$_1_$_0": "c",
}
result = merge_execution_input(data)
assert "nested_list" in result
assert result["nested_list"] == [["a", "b"], ["c"]]
# Test case for list containing dict
data = {
"list_with_dict_$_0_#_key1": "value1",
"list_with_dict_$_0_#_key2": "value2",
"list_with_dict_$_1_#_key3": "value3",
}
result = merge_execution_input(data)
assert "list_with_dict" in result
assert result["list_with_dict"] == [
{"key1": "value1", "key2": "value2"},
{"key3": "value3"},
]
# Test case for dict containing list
data = {
"dict_with_list_#_key1_$_0": "value1",
"dict_with_list_#_key1_$_1": "value2",
"dict_with_list_#_key2_$_0": "value3",
}
result = merge_execution_input(data)
assert "dict_with_list" in result
assert result["dict_with_list"] == {
"key1": ["value1", "value2"],
"key2": ["value3"],
}
# Test case for complex nested structure
data = {
"complex_$_0_#_key1_$_0": "value1",
"complex_$_0_#_key1_$_1": "value2",
"complex_$_0_#_key2_@_attr1": "value3",
"complex_$_1_#_key3_$_0": "value4",
}
result = merge_execution_input(data)
assert "complex" in result
assert result["complex"][0]["key1"] == ["value1", "value2"]
assert isinstance(result["complex"][0]["key2"], MockObject)
assert result["complex"][0]["key2"].attr1 == "value3"
assert result["complex"][1]["key3"] == ["value4"]
# Test case for invalid list index
data = {"list_$_invalid": "value"}
with pytest.raises(ValueError, match="index must be an integer"):
merge_execution_input(data)
# Test cases for delimiter ordering
# Test case 1: List -> Dict -> List
data = {
"nested_$_0_#_key_$_0": "value1",
"nested_$_0_#_key_$_1": "value2",
}
result = merge_execution_input(data)
assert "nested" in result
assert result["nested"][0]["key"] == ["value1", "value2"]
# Test case 2: Dict -> List -> Object
data = {
"nested_#_key_$_0_@_attr": "value1",
"nested_#_key_$_1_@_attr": "value2",
}
result = merge_execution_input(data)
assert "nested" in result
assert isinstance(result["nested"]["key"][0], MockObject)
assert result["nested"]["key"][0].attr == "value1"
assert result["nested"]["key"][1].attr == "value2"
# Test case 3: Object -> List -> Dict
data = {
"nested_@_items_$_0_#_key": "value1",
"nested_@_items_$_1_#_key": "value2",
}
result = merge_execution_input(data)
assert "nested" in result
nested = result["nested"]
assert isinstance(nested, MockObject)
items = nested.items
assert isinstance(items, list)
assert items[0]["key"] == "value1"
assert items[1]["key"] == "value2"
# Test case 4: Complex nested structure with all types
data = {
"deep_#_key_$_0_@_data_$_0_#_items_$_0_#_value": "deep_value",
"deep_#_key_$_0_@_data_$_1_#_items_$_0_#_value": "another_value",
}
result = merge_execution_input(data)
assert "deep" in result
deep_key = result["deep"]["key"][0]
assert deep_key is not None
data0 = getattr(deep_key, "data", None)
assert isinstance(data0, list)
# Check items0
items0 = None
if len(data0) > 0 and isinstance(data0[0], dict) and "items" in data0[0]:
items0 = data0[0]["items"]
assert isinstance(items0, list)
items0 = cast(list, items0)
assert len(items0) > 0
assert isinstance(items0[0], dict)
assert items0[0]["value"] == "deep_value" # type: ignore
# Check items1
items1 = None
if len(data0) > 1 and isinstance(data0[1], dict) and "items" in data0[1]:
items1 = data0[1]["items"]
assert isinstance(items1, list)
items1 = cast(list, items1)
assert len(items1) > 0
assert isinstance(items1[0], dict)
assert items1[0]["value"] == "another_value" # type: ignore
# Test case 5: Mixed delimiter types in different orders
# the last one should replace the type
data = {
"mixed_$_0_#_key_@_attr": "value1", # List -> Dict -> Object
"mixed_#_key_$_0_@_attr": "value2", # Dict -> List -> Object
"mixed_@_attr_$_0_#_key": "value3", # Object -> List -> Dict
}
result = merge_execution_input(data)
assert "mixed" in result
assert result["mixed"].attr[0]["key"] == "value3"

View File

@@ -22,9 +22,14 @@
# debug
npm-debug.log*
pnpm-debug.log*
yarn-debug.log*
yarn-error.log*
# lock files (from yarn1 or npm)
yarn.lock
package-lock.json
# local env files
.env*.local

View File

@@ -1,4 +1,5 @@
node_modules
pnpm-lock.yaml
.next
build
public

View File

@@ -18,4 +18,5 @@ const config: StorybookConfig = {
},
staticDirs: ["../public"],
};
export default config;

View File

@@ -1,6 +1,8 @@
import React from "react";
import type { Preview } from "@storybook/react";
import { initialize, mswLoader } from "msw-storybook-addon";
import "../src/app/globals.css";
import "../src/components/styles/fonts.css";
// Initialize MSW
initialize();
@@ -18,6 +20,13 @@ const preview: Preview = {
},
},
loaders: [mswLoader],
decorators: [
(Story) => (
<>
<Story />
</>
),
],
};
export default preview;

View File

@@ -1,8 +1,9 @@
# Base stage for both dev and prod
FROM node:21-alpine AS base
WORKDIR /app
COPY autogpt_platform/frontend/package.json autogpt_platform/frontend/yarn.lock ./
RUN --mount=type=cache,target=/usr/local/share/.cache yarn install --frozen-lockfile
RUN corepack enable
COPY autogpt_platform/frontend/package.json autogpt_platform/frontend/pnpm-lock.yaml ./
RUN --mount=type=cache,target=/root/.local/share/pnpm pnpm install --frozen-lockfile
# Dev stage
FROM base AS dev
@@ -10,13 +11,13 @@ ENV NODE_ENV=development
ENV HOSTNAME=0.0.0.0
COPY autogpt_platform/frontend/ .
EXPOSE 3000
CMD ["yarn", "run", "dev", "--hostname", "0.0.0.0"]
CMD ["pnpm", "run", "dev", "--hostname", "0.0.0.0"]
# Build stage for prod
FROM base AS build
COPY autogpt_platform/frontend/ .
ENV SKIP_STORYBOOK_TESTS=true
RUN yarn build
RUN pnpm build
# Prod stage - based on NextJS reference Dockerfile https://github.com/vercel/next.js/blob/64271354533ed16da51be5dce85f0dbd15f17517/examples/with-docker/Dockerfile
FROM node:21-alpine AS prod

View File

@@ -1,46 +1,76 @@
This is the frontend for AutoGPT's next generation
## Getting Started
## 🧢 Getting Started
Run the following installation once.
This project uses [**pnpm**](https://pnpm.io/) as the package manager via **corepack**. [Corepack](https://github.com/nodejs/corepack) is a Node.js tool that automatically manages package managers without requiring global installations.
```bash
npm install
# or
yarn install
# or
pnpm install
# or
bun install
```
### Prerequisites
Next, run the development server:
Make sure you have Node.js 16.10+ installed. Corepack is included with Node.js by default.
```bash
npm run dev
# or
yarn dev
# or
pnpm dev
# or
bun dev
```
### ⚠️ Migrating from yarn
> This project was previously using yarn1, make sure to clean up the old files if you set it up previously with yarn:
>
> ```bash
> rm -f yarn.lock && rm -rf node_modules
> ```
>
> Then follow the setup steps below.
### Setup
1. **Enable corepack** (run this once on your system):
```bash
corepack enable
```
This enables corepack to automatically manage pnpm based on the `packageManager` field in `package.json`.
2. **Install dependencies**:
```bash
pnpm i
```
3. **Start the development server**:
```bash
pnpm dev
```
Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
You can start editing the page by modifying `app/page.tsx`. The page auto-updates as you edit the file.
For subsequent runs, you do not have to `npm install` again. Simply do `npm run dev`.
### Subsequent Runs
If the project is updated via git, you will need to `npm install` after each update.
For subsequent development sessions, you only need to run:
```bash
pnpm dev
```
Every time a new Front-end dependency is added by you or others, you will need to run `pnpm i` to install the new dependencies.
### Available Scripts
- `pnpm dev` - Start development server
- `pnpm build` - Build for production
- `pnpm start` - Start production server
- `pnpm lint` - Run ESLint and Prettier checks
- `pnpm format` - Format code with Prettier
- `pnpm type-check` - Run TypeScript type checking
- `pnpm test` - Run Playwright tests
- `pnpm test-ui` - Run Playwright tests with UI
This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font.
## Deploy
## 🚚 Deploy
TODO
## Storybook
## 📙 Storybook
Storybook is a powerful development environment for UI components. It allows you to build UI components in isolation, making it easier to develop, test, and document your components independently from your main application.
@@ -57,7 +87,7 @@ Storybook is a powerful development environment for UI components. It allows you
Run the following command to start the Storybook development server:
```bash
npm run storybook
pnpm storybook
```
This will start Storybook on port 6006. Open [http://localhost:6006](http://localhost:6006) in your browser to view your component library.
@@ -66,23 +96,63 @@ Storybook is a powerful development environment for UI components. It allows you
To build a static version of Storybook for deployment, use:
```bash
npm run build-storybook
pnpm build-storybook
```
3. **Running Storybook Tests**:
Storybook tests can be run using:
```bash
npm run test-storybook
pnpm test-storybook
```
For CI environments, use:
```bash
npm run test-storybook:ci
pnpm test-storybook:ci
```
4. **Writing Stories**:
Create `.stories.tsx` files alongside your components to define different states and variations of your components.
By integrating Storybook into our development workflow, we can streamline UI development, improve component reusability, and maintain a consistent design system across the project.
## 🔭 Tech Stack
### Core Framework & Language
- [**Next.js**](https://nextjs.org/) - React framework with App Router
- [**React**](https://react.dev/) - UI library for building user interfaces
- [**TypeScript**](https://www.typescriptlang.org/) - Typed JavaScript for better developer experience
### Styling & UI Components
- [**Tailwind CSS**](https://tailwindcss.com/) - Utility-first CSS framework
- [**shadcn/ui**](https://ui.shadcn.com/) - Re-usable components built with Radix UI and Tailwind CSS
- [**Radix UI**](https://www.radix-ui.com/) - Headless UI components for accessibility
- [**Lucide React**](https://lucide.dev/guide/packages/lucide-react) - Beautiful & consistent icons
- [**Framer Motion**](https://motion.dev/) - Animation library for React
### Development & Testing
- [**Storybook**](https://storybook.js.org/) - Component development environment
- [**Playwright**](https://playwright.dev/) - End-to-end testing framework
- [**ESLint**](https://eslint.org/) - JavaScript/TypeScript linting
- [**Prettier**](https://prettier.io/) - Code formatting
### Backend & Services
- [**Supabase**](https://supabase.com/) - Backend-as-a-Service (database, auth, storage)
- [**Sentry**](https://sentry.io/) - Error monitoring and performance tracking
### Package Management
- [**pnpm**](https://pnpm.io/) - Fast, disk space efficient package manager
- [**Corepack**](https://github.com/nodejs/corepack) - Node.js package manager management
### Additional Libraries
- [**React Hook Form**](https://react-hook-form.com/) - Forms with easy validation
- [**Zod**](https://zod.dev/) - TypeScript-first schema validation
- [**React Table**](https://tanstack.com/table) - Headless table library
- [**React Flow**](https://reactflow.dev/) - Interactive node-based diagrams

View File

@@ -5,55 +5,58 @@
import { getEnvironmentStr } from "@/lib/utils";
import * as Sentry from "@sentry/nextjs";
Sentry.init({
dsn: "https://fe4e4aa4a283391808a5da396da20159@o4505260022104064.ingest.us.sentry.io/4507946746380288",
if (process.env.NODE_ENV === "production") {
Sentry.init({
dsn: "https://fe4e4aa4a283391808a5da396da20159@o4505260022104064.ingest.us.sentry.io/4507946746380288",
enabled: process.env.DISABLE_SENTRY !== "true",
environment: getEnvironmentStr(),
environment: getEnvironmentStr(),
// Add optional integrations for additional features
integrations: [
Sentry.replayIntegration(),
Sentry.httpClientIntegration(),
Sentry.replayCanvasIntegration(),
Sentry.reportingObserverIntegration(),
Sentry.browserProfilingIntegration(),
// Sentry.feedbackIntegration({
// // Additional SDK configuration goes in here, for example:
// colorScheme: "system",
// }),
],
// Add optional integrations for additional features
integrations: [
Sentry.replayIntegration(),
Sentry.httpClientIntegration(),
Sentry.replayCanvasIntegration(),
Sentry.reportingObserverIntegration(),
Sentry.browserProfilingIntegration(),
// Sentry.feedbackIntegration({
// // Additional SDK configuration goes in here, for example:
// colorScheme: "system",
// }),
],
// Define how likely traces are sampled. Adjust this value in production, or use tracesSampler for greater control.
tracesSampleRate: 1,
// Define how likely traces are sampled. Adjust this value in production, or use tracesSampler for greater control.
tracesSampleRate: 1,
// Set `tracePropagationTargets` to control for which URLs trace propagation should be enabled
tracePropagationTargets: [
"localhost",
"localhost:8006",
/^https:\/\/dev\-builder\.agpt\.co\/api/,
/^https:\/\/.*\.agpt\.co\/api/,
],
// Set `tracePropagationTargets` to control for which URLs trace propagation should be enabled
tracePropagationTargets: [
"localhost",
"localhost:8006",
/^https:\/\/dev\-builder\.agpt\.co\/api/,
/^https:\/\/.*\.agpt\.co\/api/,
],
// Define how likely Replay events are sampled.
// This sets the sample rate to be 10%. You may want this to be 100% while
// in development and sample at a lower rate in production
replaysSessionSampleRate: 0.1,
// Define how likely Replay events are sampled.
// This sets the sample rate to be 10%. You may want this to be 100% while
// in development and sample at a lower rate in production
replaysSessionSampleRate: 0.1,
// Define how likely Replay events are sampled when an error occurs.
replaysOnErrorSampleRate: 1.0,
// Define how likely Replay events are sampled when an error occurs.
replaysOnErrorSampleRate: 1.0,
// Setting this option to true will print useful information to the console while you're setting up Sentry.
debug: false,
// Setting this option to true will print useful information to the console while you're setting up Sentry.
debug: false,
// Set profilesSampleRate to 1.0 to profile every transaction.
// Since profilesSampleRate is relative to tracesSampleRate,
// the final profiling rate can be computed as tracesSampleRate * profilesSampleRate
// For example, a tracesSampleRate of 0.5 and profilesSampleRate of 0.5 would
// result in 25% of transactions being profiled (0.5*0.5=0.25)
profilesSampleRate: 1.0,
_experiments: {
// Enable logs to be sent to Sentry.
enableLogs: true,
},
});
// Set profilesSampleRate to 1.0 to profile every transaction.
// Since profilesSampleRate is relative to tracesSampleRate,
// the final profiling rate can be computed as tracesSampleRate * profilesSampleRate
// For example, a tracesSampleRate of 0.5 and profilesSampleRate of 0.5 would
// result in 25% of transactions being profiled (0.5*0.5=0.25)
profilesSampleRate: 1.0,
_experiments: {
// Enable logs to be sent to Sentry.
enableLogs: true,
},
});
}
export const onRouterTransitionStart = Sentry.captureRouterTransitionStart;

View File

@@ -16,63 +16,64 @@ const nextConfig = {
],
},
output: "standalone",
// TODO: Re-enable TypeScript checks once current issues are resolved
typescript: {
ignoreBuildErrors: true,
},
transpilePackages: ["geist"],
};
export default withSentryConfig(nextConfig, {
// For all available options, see:
// https://github.com/getsentry/sentry-webpack-plugin#options
const isDevelopmentBuild = process.env.NODE_ENV !== "production";
org: "significant-gravitas",
project: "builder",
export default isDevelopmentBuild
? nextConfig
: withSentryConfig(nextConfig, {
// For all available options, see:
// https://github.com/getsentry/sentry-webpack-plugin#options
// Only print logs for uploading source maps in CI
silent: !process.env.CI,
org: "significant-gravitas",
project: "builder",
// For all available options, see:
// https://docs.sentry.io/platforms/javascript/guides/nextjs/manual-setup/
// Only print logs for uploading source maps in CI
silent: !process.env.CI,
// Upload a larger set of source maps for prettier stack traces (increases build time)
widenClientFileUpload: true,
// For all available options, see:
// https://docs.sentry.io/platforms/javascript/guides/nextjs/manual-setup/
// Automatically annotate React components to show their full name in breadcrumbs and session replay
reactComponentAnnotation: {
enabled: true,
},
// Upload a larger set of source maps for prettier stack traces (increases build time)
widenClientFileUpload: true,
// Route browser requests to Sentry through a Next.js rewrite to circumvent ad-blockers.
// This can increase your server load as well as your hosting bill.
// Note: Check that the configured route will not match with your Next.js middleware, otherwise reporting of client-
// side errors will fail.
tunnelRoute: "/store",
// Hides source maps from generated client bundles
hideSourceMaps: true,
// Automatically tree-shake Sentry logger statements to reduce bundle size
disableLogger: true,
// Enables automatic instrumentation of Vercel Cron Monitors. (Does not yet work with App Router route handlers.)
// See the following for more information:
// https://docs.sentry.io/product/crons/
// https://vercel.com/docs/cron-jobs
automaticVercelMonitors: true,
async headers() {
return [
{
source: "/:path*",
headers: [
{
key: "Document-Policy",
value: "js-profiling",
},
],
// Automatically annotate React components to show their full name in breadcrumbs and session replay
reactComponentAnnotation: {
enabled: true,
},
];
},
});
// Route browser requests to Sentry through a Next.js rewrite to circumvent ad-blockers.
// This can increase your server load as well as your hosting bill.
// Note: Check that the configured route will not match with your Next.js middleware, otherwise reporting of client-
// side errors will fail.
tunnelRoute: "/store",
// No need to hide source maps from generated client bundles
// since the source is public anyway :)
hideSourceMaps: false,
// Automatically tree-shake Sentry logger statements to reduce bundle size
disableLogger: true,
// Enables automatic instrumentation of Vercel Cron Monitors. (Does not yet work with App Router route handlers.)
// See the following for more information:
// https://docs.sentry.io/product/crons/
// https://vercel.com/docs/cron-jobs
automaticVercelMonitors: true,
async headers() {
return [
{
source: "/:path*",
headers: [
{
key: "Document-Policy",
value: "js-profiling",
},
],
},
];
},
});

View File

@@ -3,21 +3,22 @@
"version": "0.3.4",
"private": true,
"scripts": {
"dev": "next dev",
"dev:nosentry": "NODE_ENV=development && DISABLE_SENTRY=true && next dev",
"dev:test": "NODE_ENV=test && next dev",
"dev": "next dev --turbo",
"dev:test": "NODE_ENV=test && next dev --turbo",
"build": "SKIP_STORYBOOK_TESTS=true next build",
"start": "next start",
"start:standalone": "cd .next/standalone && node server.js",
"lint": "next lint && prettier --check .",
"format": "prettier --write .",
"type-check": "tsc --noEmit",
"test": "playwright test",
"test-ui": "playwright test --ui",
"test": "next build --turbo && playwright test",
"test-ui": "next build --turbo && playwright test --ui",
"test:no-build": "playwright test",
"gentests": "playwright codegen http://localhost:3000",
"storybook": "storybook dev -p 6006",
"build-storybook": "storybook build",
"test-storybook": "test-storybook",
"test-storybook:ci": "concurrently -k -s first -n \"SB,TEST\" -c \"magenta,blue\" \"npm run build-storybook -- --quiet && npx http-server storybook-static --port 6006 --silent\" \"wait-on tcp:6006 && npm run test-storybook\""
"test-storybook:ci": "concurrently -k -s first -n \"SB,TEST\" -c \"magenta,blue\" \"pnpm run build-storybook -- --quiet && npx http-server storybook-static --port 6006 --silent\" \"wait-on tcp:6006 && pnpm run test-storybook\""
},
"browserslist": [
"defaults"
@@ -47,7 +48,7 @@
"@radix-ui/react-tooltip": "^1.2.6",
"@sentry/nextjs": "^9",
"@supabase/ssr": "^0.6.1",
"@supabase/supabase-js": "^2.49.4",
"@supabase/supabase-js": "^2.49.10",
"@tanstack/react-table": "^8.21.3",
"@types/jaro-winkler": "^0.2.4",
"@xyflow/react": "12.6.4",
@@ -61,14 +62,14 @@
"dotenv": "^16.5.0",
"elliptic": "6.6.1",
"embla-carousel-react": "^8.6.0",
"framer-motion": "^12.12.1",
"framer-motion": "^12.16.0",
"geist": "^1.4.2",
"jaro-winkler": "^0.2.8",
"launchdarkly-react-client-sdk": "^3.7.0",
"lodash.debounce": "^4.0.8",
"lucide-react": "^0.510.0",
"lodash": "^4.17.21",
"lucide-react": "^0.513.0",
"moment": "^2.30.1",
"next": "^14.2.26",
"next": "^15.3.2",
"next-themes": "^0.4.6",
"party-js": "^2.2.0",
"react": "^18",
@@ -82,12 +83,13 @@
"react-shepherd": "^6.1.8",
"react-timeago": "^8.2.0",
"recharts": "^2.15.3",
"shepherd.js": "^14.5.0",
"tailwind-merge": "^2.6.0",
"tailwind-scrollbar": "^4.0.2",
"tailwind-scrollbar-hide": "^2.0.0",
"tailwindcss-animate": "^1.0.7",
"uuid": "^11.1.0",
"zod": "^3.24.4"
"zod": "^3.25.51"
},
"devDependencies": {
"@chromatic-com/storybook": "^3.2.4",
@@ -101,9 +103,9 @@
"@storybook/nextjs": "^8.5.3",
"@storybook/react": "^8.3.5",
"@storybook/test": "^8.3.5",
"@storybook/test-runner": "^0.21.0",
"@storybook/test-runner": "^0.22.0",
"@types/canvas-confetti": "^1.9.0",
"@types/lodash": "^4.17.13",
"@types/lodash": "^4.17.17",
"@types/negotiator": "^0.6.3",
"@types/node": "^22.13.0",
"@types/react": "^18",
@@ -113,9 +115,9 @@
"chromatic": "^11.25.2",
"concurrently": "^9.1.2",
"eslint": "^8",
"eslint-config-next": "15.1.6",
"eslint-plugin-storybook": "^0.11.2",
"msw": "^2.7.0",
"eslint-config-next": "15.3.3",
"eslint-plugin-storybook": "^0.12.0",
"msw": "^2.9.0",
"msw-storybook-addon": "^2.0.3",
"postcss": "^8",
"prettier": "^3.3.3",
@@ -124,10 +126,10 @@
"tailwindcss": "^3.4.17",
"typescript": "^5"
},
"packageManager": "yarn@1.22.22+sha512.a6b2f7906b721bba3d67d4aff083df04dad64c399707841b7acf00f6b133b7ac24255f2652fa22ae3534329dc6180534e98d17432037ff6fd140556e2bb3137e",
"msw": {
"workerDirectory": [
"public"
]
}
},
"packageManager": "pnpm@10.11.1+sha256.211e9990148495c9fc30b7e58396f7eeda83d9243eb75407ea4f8650fb161f7c"
}

View File

@@ -22,7 +22,7 @@ export default defineConfig({
/* Opt out of parallel tests on CI. */
workers: process.env.CI ? 1 : undefined,
/* Reporter to use. See https://playwright.dev/docs/test-reporters */
reporter: "html",
reporter: [["html"], ["line"]],
/* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */
use: {
/* Base URL to use in actions like `await page.goto('/')`. */
@@ -74,11 +74,14 @@ export default defineConfig({
// },
],
/* Run your local dev server before starting the tests */
/* Run your local server before starting the tests */
webServer: {
command: "npm run build && npm run start",
command: "pnpm start",
url: "http://localhost:3000/",
reuseExistingServer: !process.env.CI,
timeout: 120 * 1000,
timeout: 10 * 1000,
env: {
NODE_ENV: "test",
},
},
});

16541
autogpt_platform/frontend/pnpm-lock.yaml generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -9,7 +9,6 @@ import { getEnvironmentStr } from "./src/lib/utils";
Sentry.init({
dsn: "https://fe4e4aa4a283391808a5da396da20159@o4505260022104064.ingest.us.sentry.io/4507946746380288",
enabled: process.env.NODE_ENV !== "development",
environment: getEnvironmentStr(),
// Define how likely traces are sampled. Adjust this value in production, or use tracesSampler for greater control.

View File

@@ -9,7 +9,6 @@ import * as Sentry from "@sentry/nextjs";
Sentry.init({
dsn: "https://fe4e4aa4a283391808a5da396da20159@o4505260022104064.ingest.us.sentry.io/4507946746380288",
enabled: process.env.NODE_ENV !== "development",
environment: getEnvironmentStr(),
// Define how likely traces are sampled. Adjust this value in production, or use tracesSampler for greater control.

View File

@@ -2,6 +2,9 @@ import BackendAPI from "@/lib/autogpt-server-api";
import { redirect } from "next/navigation";
import { finishOnboarding } from "./6-congrats/actions";
// Force dynamic rendering to avoid static generation issues with cookies
export const dynamic = "force-dynamic";
export default async function OnboardingPage() {
const api = new BackendAPI();

View File

@@ -3,14 +3,16 @@ import { Suspense } from "react";
import type { SubmissionStatus } from "@/lib/autogpt-server-api/types";
import { AdminAgentsDataTable } from "@/components/admin/marketplace/admin-agents-data-table";
type MarketplaceAdminPageSearchParams = {
page?: string;
status?: string;
search?: string;
};
async function AdminMarketplaceDashboard({
searchParams,
}: {
searchParams: {
page?: string;
status?: string;
search?: string;
};
searchParams: MarketplaceAdminPageSearchParams;
}) {
const page = searchParams.page ? Number.parseInt(searchParams.page) : 1;
const status = searchParams.status as SubmissionStatus | undefined;
@@ -47,16 +49,12 @@ async function AdminMarketplaceDashboard({
export default async function AdminMarketplacePage({
searchParams,
}: {
searchParams: {
page?: string;
status?: string;
search?: string;
};
searchParams: Promise<MarketplaceAdminPageSearchParams>;
}) {
"use server";
const withAdminAccess = await withRoleAccess(["admin"]);
const ProtectedAdminMarketplace = await withAdminAccess(
AdminMarketplaceDashboard,
);
return <ProtectedAdminMarketplace searchParams={searchParams} />;
return <ProtectedAdminMarketplace searchParams={await searchParams} />;
}

View File

@@ -3,14 +3,16 @@ import type { CreditTransactionType } from "@/lib/autogpt-server-api";
import { withRoleAccess } from "@/lib/withRoleAccess";
import { Suspense } from "react";
type SpendingDashboardPageSearchParams = {
page?: string;
status?: string;
search?: string;
};
function SpendingDashboard({
searchParams,
}: {
searchParams: {
page?: string;
status?: string;
search?: string;
};
searchParams: SpendingDashboardPageSearchParams;
}) {
const page = searchParams.page ? Number.parseInt(searchParams.page) : 1;
const search = searchParams.search;
@@ -45,14 +47,10 @@ function SpendingDashboard({
export default async function SpendingDashboardPage({
searchParams,
}: {
searchParams: {
page?: string;
status?: string;
search?: string;
};
searchParams: Promise<SpendingDashboardPageSearchParams>;
}) {
"use server";
const withAdminAccess = await withRoleAccess(["admin"]);
const ProtectedSpendingDashboard = await withAdminAccess(SpendingDashboard);
return <ProtectedSpendingDashboard searchParams={searchParams} />;
return <ProtectedSpendingDashboard searchParams={await searchParams} />;
}

View File

@@ -8,30 +8,6 @@ import BackendAPI from "@/lib/autogpt-server-api";
import { loginFormSchema, LoginProvider } from "@/types/auth";
import { verifyTurnstileToken } from "@/lib/turnstile";
export async function logout() {
return await Sentry.withServerActionInstrumentation(
"logout",
{},
async () => {
const supabase = getServerSupabase();
if (!supabase) {
redirect("/error");
}
const { error } = await supabase.auth.signOut();
if (error) {
console.error("Error logging out", error);
return error.message;
}
revalidatePath("/", "layout");
redirect("/login");
},
);
}
async function shouldShowOnboarding() {
const api = new BackendAPI();
return (
@@ -59,23 +35,21 @@ export async function login(
}
// We are sure that the values are of the correct type because zod validates the form
const { data, error } = await supabase.auth.signInWithPassword(values);
const { error } = await supabase.auth.signInWithPassword(values);
if (error) {
console.error("Error logging in", error);
console.error("Error logging in:", error);
return error.message;
}
await api.createUser();
// Don't onboard if disabled or already onboarded
if (await shouldShowOnboarding()) {
revalidatePath("/onboarding", "layout");
redirect("/onboarding");
}
if (data.session) {
await supabase.auth.setSession(data.session);
}
revalidatePath("/", "layout");
redirect("/");
});

View File

@@ -15,7 +15,7 @@ import { zodResolver } from "@hookform/resolvers/zod";
import { useCallback, useState } from "react";
import { useRouter } from "next/navigation";
import Link from "next/link";
import useSupabase from "@/hooks/useSupabase";
import useSupabase from "@/lib/supabase/useSupabase";
import LoadingBox from "@/components/ui/loading";
import {
AuthCard,
@@ -80,6 +80,7 @@ export default function LoginPage() {
}
const error = await login(data, turnstile.token as string);
await supabase?.auth.refreshSession();
setIsLoading(false);
if (error) {
setFeedback(error);
@@ -89,7 +90,7 @@ export default function LoginPage() {
}
setFeedback(null);
},
[form, turnstile],
[form, turnstile, supabase],
);
if (user) {

View File

@@ -0,0 +1,56 @@
"use server";
import BackendAPI, {
CreatorsResponse,
StoreAgentsResponse,
} from "@/lib/autogpt-server-api";
const EMPTY_AGENTS_RESPONSE: StoreAgentsResponse = {
agents: [],
pagination: {
total_items: 0,
total_pages: 0,
current_page: 0,
page_size: 0,
},
};
const EMPTY_CREATORS_RESPONSE: CreatorsResponse = {
creators: [],
pagination: {
total_items: 0,
total_pages: 0,
current_page: 0,
page_size: 0,
},
};
export async function getMarketplaceData(): Promise<{
featuredAgents: StoreAgentsResponse;
topAgents: StoreAgentsResponse;
featuredCreators: CreatorsResponse;
}> {
const api = new BackendAPI();
const [featuredAgents, topAgents, featuredCreators] = await Promise.all([
api.getStoreAgents({ featured: true }).catch((error) => {
console.error("Error fetching featured marketplace agents:", error);
return EMPTY_AGENTS_RESPONSE;
}),
api.getStoreAgents({ sorted_by: "runs" }).catch((error) => {
console.error("Error fetching top marketplace agents:", error);
return EMPTY_AGENTS_RESPONSE;
}),
api
.getStoreCreators({ featured: true, sorted_by: "num_agents" })
.catch((error) => {
console.error("Error fetching featured marketplace creators:", error);
return EMPTY_CREATORS_RESPONSE;
}),
]);
return {
featuredAgents,
topAgents,
featuredCreators,
};
}

View File

@@ -8,12 +8,18 @@ import { Separator } from "@/components/ui/separator";
import { Metadata } from "next";
import getServerUser from "@/lib/supabase/getServerUser";
// Force dynamic rendering to avoid static generation issues with cookies
export const dynamic = "force-dynamic";
type MarketplaceAgentPageParams = { creator: string; slug: string };
export async function generateMetadata({
params,
params: _params,
}: {
params: { creator: string; slug: string };
params: Promise<MarketplaceAgentPageParams>;
}): Promise<Metadata> {
const api = new BackendAPI();
const params = await _params;
const agent = await api.getStoreAgent(params.creator, params.slug);
return {
@@ -31,11 +37,12 @@ export async function generateMetadata({
// }));
// }
export default async function Page({
params,
export default async function MarketplaceAgentPage({
params: _params,
}: {
params: { creator: string; slug: string };
params: Promise<MarketplaceAgentPageParams>;
}) {
const params = await _params;
const creator_lower = params.creator.toLowerCase();
const { user } = await getServerUser();
const api = new BackendAPI();

View File

@@ -6,12 +6,18 @@ import { CreatorInfoCard } from "@/components/agptui/CreatorInfoCard";
import { CreatorLinks } from "@/components/agptui/CreatorLinks";
import { Separator } from "@/components/ui/separator";
// Force dynamic rendering to avoid static generation issues with cookies
export const dynamic = "force-dynamic";
type MarketplaceCreatorPageParams = { creator: string };
export async function generateMetadata({
params,
params: _params,
}: {
params: { creator: string };
params: Promise<MarketplaceCreatorPageParams>;
}): Promise<Metadata> {
const api = new BackendAPI();
const params = await _params;
const creator = await api.getStoreCreator(params.creator.toLowerCase());
return {
@@ -29,11 +35,12 @@ export async function generateMetadata({
// }
export default async function Page({
params,
params: _params,
}: {
params: { creator: string };
params: Promise<MarketplaceCreatorPageParams>;
}) {
const api = new BackendAPI();
const params = await _params;
try {
const creator = await api.getStoreCreator(params.creator);

View File

@@ -1,4 +1,4 @@
import * as React from "react";
import React from "react";
import { HeroSection } from "@/components/agptui/composite/HeroSection";
import { FeaturedSection } from "@/components/agptui/composite/FeaturedSection";
import {
@@ -12,93 +12,11 @@ import {
} from "@/components/agptui/composite/FeaturedCreators";
import { Separator } from "@/components/ui/separator";
import { Metadata } from "next";
import {
StoreAgentsResponse,
CreatorsResponse,
} from "@/lib/autogpt-server-api/types";
import BackendAPI from "@/lib/autogpt-server-api";
async function getStoreData() {
try {
const api = new BackendAPI();
import { getMarketplaceData } from "./actions";
// Add error handling and default values
let featuredAgents: StoreAgentsResponse = {
agents: [],
pagination: {
total_items: 0,
total_pages: 0,
current_page: 0,
page_size: 0,
},
};
let topAgents: StoreAgentsResponse = {
agents: [],
pagination: {
total_items: 0,
total_pages: 0,
current_page: 0,
page_size: 0,
},
};
let featuredCreators: CreatorsResponse = {
creators: [],
pagination: {
total_items: 0,
total_pages: 0,
current_page: 0,
page_size: 0,
},
};
try {
[featuredAgents, topAgents, featuredCreators] = await Promise.all([
api.getStoreAgents({ featured: true }),
api.getStoreAgents({ sorted_by: "runs" }),
api.getStoreCreators({ featured: true, sorted_by: "num_agents" }),
]);
} catch (error) {
console.error("Error fetching store data:", error);
}
return {
featuredAgents,
topAgents,
featuredCreators,
};
} catch (error) {
console.error("Error in getStoreData:", error);
return {
featuredAgents: {
agents: [],
pagination: {
total_items: 0,
total_pages: 0,
current_page: 0,
page_size: 0,
},
},
topAgents: {
agents: [],
pagination: {
total_items: 0,
total_pages: 0,
current_page: 0,
page_size: 0,
},
},
featuredCreators: {
creators: [],
pagination: {
total_items: 0,
total_pages: 0,
current_page: 0,
page_size: 0,
},
},
};
}
}
// Force dynamic rendering to avoid static generation issues with cookies
export const dynamic = "force-dynamic";
// FIX: Correct metadata
export const metadata: Metadata = {
@@ -144,9 +62,9 @@ export const metadata: Metadata = {
},
};
export default async function Page({}: {}) {
// Get data server-side
const { featuredAgents, topAgents, featuredCreators } = await getStoreData();
export default async function MarketplacePage(): Promise<React.ReactElement> {
const { featuredAgents, topAgents, featuredCreators } =
await getMarketplaceData();
return (
<div className="mx-auto w-screen max-w-[1360px]">

View File

@@ -1,6 +1,6 @@
"use client";
import { useState, useEffect } from "react";
import { use, useCallback, useEffect, useState } from "react";
import { AgentsSection } from "@/components/agptui/composite/AgentsSection";
import { SearchBar } from "@/components/agptui/SearchBar";
import { FeaturedCreators } from "@/components/agptui/composite/FeaturedCreators";
@@ -9,15 +9,17 @@ import { SearchFilterChips } from "@/components/agptui/SearchFilterChips";
import { SortDropdown } from "@/components/agptui/SortDropdown";
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
export default function Page({
type MarketplaceSearchPageSearchParams = { searchTerm?: string; sort?: string };
export default function MarketplaceSearchPage({
searchParams,
}: {
searchParams: { searchTerm?: string; sort?: string };
searchParams: Promise<MarketplaceSearchPageSearchParams>;
}) {
return (
<SearchResults
searchTerm={searchParams.searchTerm || ""}
sort={searchParams.sort || "trending"}
searchTerm={use(searchParams).searchTerm || ""}
sort={use(searchParams).sort || "trending"}
/>
);
}
@@ -28,7 +30,7 @@ function SearchResults({
}: {
searchTerm: string;
sort: string;
}) {
}): React.ReactElement {
const [showAgents, setShowAgents] = useState(true);
const [showCreators, setShowCreators] = useState(true);
const [agents, setAgents] = useState<any[]>([]);
@@ -80,40 +82,43 @@ function SearchResults({
}
};
const handleSortChange = (sortValue: string) => {
let sortBy = "recent";
if (sortValue === "runs") {
sortBy = "runs";
} else if (sortValue === "rating") {
sortBy = "rating";
}
const sortedAgents = [...agents].sort((a, b) => {
if (sortBy === "runs") {
return b.runs - a.runs;
} else if (sortBy === "rating") {
return b.rating - a.rating;
} else {
return (
new Date(b.updated_at).getTime() - new Date(a.updated_at).getTime()
);
const handleSortChange = useCallback(
(sortValue: string) => {
let sortBy = "recent";
if (sortValue === "runs") {
sortBy = "runs";
} else if (sortValue === "rating") {
sortBy = "rating";
}
});
const sortedCreators = [...creators].sort((a, b) => {
if (sortBy === "runs") {
return b.agent_runs - a.agent_runs;
} else if (sortBy === "rating") {
return b.agent_rating - a.agent_rating;
} else {
// Creators don't have updated_at, sort by number of agents as fallback
return b.num_agents - a.num_agents;
}
});
const sortedAgents = [...agents].sort((a, b) => {
if (sortBy === "runs") {
return b.runs - a.runs;
} else if (sortBy === "rating") {
return b.rating - a.rating;
} else {
return (
new Date(b.updated_at).getTime() - new Date(a.updated_at).getTime()
);
}
});
setAgents(sortedAgents);
setCreators(sortedCreators);
};
const sortedCreators = [...creators].sort((a, b) => {
if (sortBy === "runs") {
return b.agent_runs - a.agent_runs;
} else if (sortBy === "rating") {
return b.agent_rating - a.agent_rating;
} else {
// Creators don't have updated_at, sort by number of agents as fallback
return b.num_agents - a.num_agents;
}
});
setAgents(sortedAgents);
setCreators(sortedCreators);
},
[agents, creators],
);
return (
<div className="w-full">

View File

@@ -11,7 +11,7 @@ import {
StoreSubmissionsResponse,
StoreSubmissionRequest,
} from "@/lib/autogpt-server-api/types";
import useSupabase from "@/hooks/useSupabase";
import useSupabase from "@/lib/supabase/useSupabase";
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
export default function Page({}: {}) {

View File

@@ -1,7 +1,7 @@
"use client";
import { Button } from "@/components/ui/button";
import { useRouter } from "next/navigation";
import { useCallback, useContext, useMemo, useState } from "react";
import { useCallback, useContext, useEffect, useMemo, useState } from "react";
import { useToast } from "@/components/ui/use-toast";
import { IconKey, IconUser } from "@/components/ui/icons";
import { Trash2Icon } from "lucide-react";
@@ -26,10 +26,10 @@ import {
AlertDialogHeader,
AlertDialogTitle,
} from "@/components/ui/alert-dialog";
import useSupabase from "@/hooks/useSupabase";
import useSupabase from "@/lib/supabase/useSupabase";
import LoadingBox from "@/components/ui/loading";
export default function PrivatePage() {
export default function UserIntegrationsPage() {
const { supabase, user, isUserLoading } = useSupabase();
const router = useRouter();
const providers = useContext(CredentialsProvidersContext);
@@ -122,15 +122,15 @@ export default function PrivatePage() {
[],
);
useEffect(() => {
if (isUserLoading) return;
if (!user || !supabase) router.push("/login");
}, [isUserLoading, user, supabase, router]);
if (isUserLoading) {
return <LoadingBox className="h-[80vh]" />;
}
if (!user || !supabase) {
router.push("/login");
return null;
}
const allCredentials = providers
? Object.values(providers).flatMap((provider) =>
provider.savedCredentials

View File

@@ -1,40 +1,28 @@
import * as React from "react";
import React from "react";
import { Metadata } from "next/types";
import { ProfileInfoForm } from "@/components/agptui/ProfileInfoForm";
import { redirect } from "next/navigation";
import BackendAPI from "@/lib/autogpt-server-api";
import { CreatorDetails } from "@/lib/autogpt-server-api/types";
import { ProfileInfoForm } from "@/components/agptui/ProfileInfoForm";
async function getProfileData(api: BackendAPI) {
try {
const profile = await api.getStoreProfile();
return {
profile,
};
} catch (error) {
console.error("Error fetching profile:", error);
return {
profile: null,
};
}
}
// Force dynamic rendering to avoid static generation issues with cookies
export const dynamic = "force-dynamic";
export const metadata: Metadata = { title: "Profile - AutoGPT Platform" };
export default async function Page({}: {}) {
export default async function UserProfilePage(): Promise<React.ReactElement> {
const api = new BackendAPI();
const { profile } = await getProfileData(api);
const profile = await api.getStoreProfile().catch((error) => {
console.error("Error fetching profile:", error);
return null;
});
if (!profile) {
return (
<div className="flex flex-col items-center justify-center p-4">
<p>Please log in to view your profile</p>
</div>
);
redirect("/login");
}
return (
<div className="flex flex-col items-center justify-center px-4">
<ProfileInfoForm profile={profile as CreatorDetails} />
<ProfileInfoForm profile={profile} />
</div>
);
}

View File

@@ -11,7 +11,7 @@ export async function sendResetEmail(email: string, turnstileToken: string) {
{},
async () => {
const supabase = getServerSupabase();
const headersList = headers();
const headersList = await headers();
const host = headersList.get("host");
const protocol =
process.env.NODE_ENV === "development" ? "http" : "https";
@@ -38,8 +38,6 @@ export async function sendResetEmail(email: string, turnstileToken: string) {
console.error("Error sending reset email", error);
return error.message;
}
redirect("/reset_password");
},
);
}

View File

@@ -17,7 +17,7 @@ import {
FormMessage,
} from "@/components/ui/form";
import { Input } from "@/components/ui/input";
import useSupabase from "@/hooks/useSupabase";
import useSupabase from "@/lib/supabase/useSupabase";
import { sendEmailFormSchema, changePasswordFormSchema } from "@/types/auth";
import { zodResolver } from "@hookform/resolvers/zod";
import { useCallback, useState } from "react";

View File

@@ -17,7 +17,7 @@ import { useCallback, useState } from "react";
import { useRouter } from "next/navigation";
import Link from "next/link";
import { Checkbox } from "@/components/ui/checkbox";
import useSupabase from "@/hooks/useSupabase";
import useSupabase from "@/lib/supabase/useSupabase";
import LoadingBox from "@/components/ui/loading";
import {
AuthCard,

View File

@@ -1,8 +1,6 @@
import React, { Suspense } from "react";
import type { Metadata } from "next";
import { Poppins } from "next/font/google";
import { GeistSans } from "geist/font/sans";
import { GeistMono } from "geist/font/mono";
import { fonts } from "@/components/styles/fonts";
import "./globals.css";
@@ -11,12 +9,6 @@ import { Providers } from "@/app/providers";
import TallyPopupSimple from "@/components/TallyPopup";
import { GoogleAnalytics } from "@/components/analytics/google-analytics";
const poppins = Poppins({
subsets: ["latin"],
weight: ["400", "500", "600", "700"],
variable: "--font-poppins",
});
export const metadata: Metadata = {
title: "AutoGPT Platform",
description: "Your one stop shop to creating AI Agents",
@@ -30,7 +22,7 @@ export default async function RootLayout({
return (
<html
lang="en"
className={`${poppins.variable} ${GeistSans.variable} ${GeistMono.variable}`}
className={`${fonts.poppins.variable} ${fonts.sans.variable} ${fonts.mono.variable}`}
>
<head>
<GoogleAnalytics

View File

@@ -1,55 +0,0 @@
"use client";
import { Avatar, AvatarFallback, AvatarImage } from "@/components/ui/avatar";
import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuTrigger,
} from "@/components/ui/dropdown-menu";
import { Button } from "./ui/button";
import { useRouter } from "next/navigation";
import useSupabase from "@/hooks/useSupabase";
const ProfileDropdown = () => {
const { supabase, user, isUserLoading } = useSupabase();
const router = useRouter();
if (isUserLoading) {
return null;
}
return (
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button variant="ghost" className="h-8 w-8 rounded-full">
<Avatar>
<AvatarImage
src={user?.user_metadata["avatar_url"]}
alt="User Avatar"
/>
<AvatarFallback>CN</AvatarFallback>
</Avatar>
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align="end">
<DropdownMenuItem onClick={() => router.push("/profile")}>
Profile
</DropdownMenuItem>
{user!.role === "admin" && (
<DropdownMenuItem onClick={() => router.push("/admin/dashboard")}>
Admin Dashboard
</DropdownMenuItem>
)}
<DropdownMenuItem
onClick={() =>
supabase?.auth.signOut().then(() => router.replace("/login"))
}
>
Log out
</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
);
};
export default ProfileDropdown;

View File

@@ -1,5 +1,5 @@
// components/RoleBasedAccess.tsx
import useSupabase from "@/hooks/useSupabase";
import useSupabase from "@/lib/supabase/useSupabase";
import React from "react";
interface RoleBasedAccessProps {

View File

@@ -51,9 +51,6 @@ export const Empty: Story = {
description: "",
avatar_url: "",
links: [],
top_categories: [],
agent_rating: 0,
agent_runs: 0,
},
},
};
@@ -71,9 +68,6 @@ export const Filled: Story = {
"twitter.com/oliviagrace",
"github.com/ograce",
],
top_categories: ["Entertainment", "Blog", "Content creation"],
agent_rating: 4.5,
agent_runs: 100,
},
},
};

View File

@@ -7,14 +7,14 @@ import Image from "next/image";
import { Button } from "./Button";
import { IconPersonFill } from "@/components/ui/icons";
import { CreatorDetails, ProfileDetails } from "@/lib/autogpt-server-api/types";
import { ProfileDetails } from "@/lib/autogpt-server-api/types";
import { Separator } from "@/components/ui/separator";
import useSupabase from "@/hooks/useSupabase";
import useSupabase from "@/lib/supabase/useSupabase";
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
export const ProfileInfoForm = ({ profile }: { profile: CreatorDetails }) => {
export const ProfileInfoForm = ({ profile }: { profile: ProfileDetails }) => {
const [isSubmitting, setIsSubmitting] = useState(false);
const [profileData, setProfileData] = useState(profile);
const [profileData, setProfileData] = useState<ProfileDetails>(profile);
const { supabase } = useSupabase();
const api = useBackendAPI();
@@ -31,10 +31,8 @@ export const ProfileInfoForm = ({ profile }: { profile: CreatorDetails }) => {
};
if (!isSubmitting) {
const returnedProfile = await api.updateStoreProfile(
updatedProfile as ProfileDetails,
);
setProfileData(returnedProfile as CreatorDetails);
const returnedProfile = await api.updateStoreProfile(updatedProfile);
setProfileData(returnedProfile);
}
} catch (error) {
console.error("Error updating profile:", error);
@@ -88,10 +86,8 @@ export const ProfileInfoForm = ({ profile }: { profile: CreatorDetails }) => {
avatar_url: mediaUrl,
};
const returnedProfile = await api.updateStoreProfile(
updatedProfile as ProfileDetails,
);
setProfileData(returnedProfile as CreatorDetails);
const returnedProfile = await api.updateStoreProfile(updatedProfile);
setProfileData(returnedProfile);
} catch (error) {
console.error("Error uploading image:", error);
}

View File

@@ -37,12 +37,12 @@ interface ProfilePopoutMenuProps {
}[];
}
export const ProfilePopoutMenu: React.FC<ProfilePopoutMenuProps> = ({
export function ProfilePopoutMenu({
userName,
userEmail,
avatarSrc,
menuItemGroups,
}) => {
}: ProfilePopoutMenuProps) {
const popupId = React.useId();
const getIcon = (icon: IconType) => {
@@ -91,28 +91,28 @@ export const ProfilePopoutMenu: React.FC<ProfilePopoutMenuProps> = ({
<PopoverContent
id={popupId}
className="flex h-[380px] w-[300px] flex-col items-start justify-start gap-4 rounded-[26px] bg-zinc-400/70 p-6 shadow backdrop-blur-2xl dark:bg-zinc-800/70"
className="flex flex-col items-start justify-start gap-4 rounded-[26px] bg-zinc-400/70 p-4 shadow backdrop-blur-2xl dark:bg-zinc-800/70"
>
{/* Header with avatar and user info */}
<div className="inline-flex items-center justify-start gap-4 self-stretch">
<div className="inline-flex items-center justify-start gap-1 self-stretch">
<Avatar className="h-[60px] w-[60px]">
<AvatarImage src={avatarSrc} alt="" aria-hidden="true" />
<AvatarFallback aria-hidden="true">
{userName?.charAt(0) || "U"}
</AvatarFallback>
</Avatar>
<div className="relative h-[47px] w-[173px]">
<div className="absolute left-0 top-0 font-sans text-base font-semibold leading-7 text-white dark:text-neutral-200">
<div className="relative flex h-[47px] w-[173px] flex-col items-start justify-center gap-1">
<div className="max-w-[10.5rem] truncate font-sans text-base font-semibold leading-none text-white dark:text-neutral-200">
{userName}
</div>
<div className="absolute left-0 top-[23px] font-sans text-base font-normal leading-normal text-white dark:text-neutral-400">
<div className="max-w-[10.5rem] truncate font-sans text-base font-normal leading-none text-white dark:text-neutral-400">
{userEmail}
</div>
</div>
</div>
{/* Menu items */}
<div className="flex w-full flex-col items-start justify-start gap-1.5 rounded-[23px]">
<div className="flex w-full flex-col items-start justify-start gap-2 rounded-[23px]">
{menuItemGroups.map((group, groupIndex) => (
<div
key={groupIndex}
@@ -180,4 +180,4 @@ export const ProfilePopoutMenu: React.FC<ProfilePopoutMenuProps> = ({
</PopoverContent>
</Popover>
);
};
}

View File

@@ -1,13 +1,13 @@
"use client";
import { logout } from "@/app/(platform)/login/actions";
import useSupabase from "@/lib/supabase/useSupabase";
import { IconLogOut } from "@/components/ui/icons";
export const ProfilePopoutMenuLogoutButton = () => {
const supabase = useSupabase();
return (
<div
className="inline-flex w-full items-center justify-start gap-2.5"
onClick={() => logout()}
onClick={() => supabase.logOut()}
role="button"
tabIndex={0}
>

View File

@@ -1,5 +1,3 @@
// BLOCK MENU TODO: Currently when i click on the control panel button, if it is already open, then it needs to close, currently its not happening
import React, { useCallback, useState } from "react";
import {
Popover,

View File

@@ -10,12 +10,8 @@ const BlockMenuContent: React.FC = () => {
const { searchQuery } = useBlockMenuContext();
return (
<div className="flex h-full w-full flex-col">
{/* Search Bar */}
<BlockMenuSearchBar />
<Separator className="h-[1px] w-full text-zinc-300" />
{/* Content */}
{searchQuery ? <BlockMenuSearch /> : <BlockMenuDefault />}
</div>
);

View File

@@ -15,7 +15,7 @@ const BlockMenuSearchBar: React.FC<BlockMenuSearchBarProps> = ({
}) => {
const inputRef = useRef<HTMLInputElement>(null);
const [localQuery, setLocalQuery] = useState("");
const { searchQuery, setSearchQuery, searchId, setSearchId, setFilters } =
const { setSearchQuery, searchId, setSearchId, setFilters } =
useBlockMenuContext();
const debouncedSetSearchQuery = useMemo(

View File

@@ -17,7 +17,7 @@ const ControlPanelButton: React.FC<Props> = ({
...rest
}) => {
return (
// Using div instead of button, because it's only for design purposes. We will use this to give design to PopoverTrigger.
// Using div instead of button, because it's only for design purposes. We are using this to give design to PopoverTrigger.
<div
className={cn(
"flex h-[4.25rem] w-[4.25rem] items-center justify-center whitespace-normal bg-white p-[1.38rem] text-zinc-800 shadow-none hover:cursor-pointer hover:bg-zinc-100 hover:text-zinc-950 focus:ring-0",

View File

@@ -82,6 +82,7 @@ const MarketplaceAgentBlock: MarketplaceAgentBlockComponent = ({
<Link
href={`/marketplace/agent/${creator_name}/${slug}`}
className="flex gap-0.5 truncate"
onClick={(e) => e.stopPropagation()}
>
<span className="font-sans text-xs leading-5 text-blue-700 underline">
Agent page

View File

@@ -108,7 +108,6 @@ export function BlockMenuStateProvider({
integrations: 0,
marketplace_agents: 0,
my_agents: 0,
providers: 0,
});
const [loadingSlug, setLoadingSlug] = useState<string | null>(null);

View File

@@ -70,7 +70,7 @@ const AllBlocksContent: React.FC = () => {
return (
<div className="scrollbar-thumb-rounded h-full overflow-y-auto pt-4 transition-all duration-200 scrollbar-thin scrollbar-track-transparent scrollbar-thumb-transparent hover:scrollbar-thumb-zinc-200">
<div className="w-full space-y-3 px-4 pb-4">
{[0, 1, 3].map((categoryIndex) => (
{Array.from({ length: 3 }).map((_, categoryIndex) => (
<Fragment key={categoryIndex}>
{categoryIndex > 0 && (
<Skeleton className="my-4 h-[1px] w-full text-zinc-100" />

View File

@@ -1,18 +0,0 @@
import React from "react";
import Block from "../Block";
interface BlockListSkeletonProps {
count?: number;
}
const BlockListSkeleton: React.FC<BlockListSkeletonProps> = ({ count = 3 }) => {
return (
<>
{Array.from({ length: count }).map((_, index) => (
<Block.Skeleton key={index} />
))}
</>
);
};
export default BlockListSkeleton;

View File

@@ -7,9 +7,7 @@ const BlockMenuDefault: React.FC = () => {
return (
<div className="flex flex-1 overflow-y-auto">
<BlockMenuSidebar />
<Separator className="h-full w-[1px] text-zinc-300" />
<BlockMenuDefaultContent />
</div>
);

View File

@@ -88,7 +88,7 @@ const BlockMenuSidebar: React.FC = ({}) => {
onClick={() => setDefaultState(item.type as DefaultStateType)}
/>
))}
<div className="ml-[0.5365rem] border-l border-black/10 pl-[0.75rem]">
<div className="ml-[0.5365rem] space-y-2 border-l border-black/10 pl-[0.75rem]">
{subMenuItems.map((item) => (
<MenuItem
key={item.type}

View File

@@ -42,7 +42,7 @@ const IntegrationBlocks: React.FC = ({}) => {
if (loading) {
return (
<div className="w-full space-y-3 p-4">
{[0, 1, 3].map((blockIndex) => (
{Array.from({ length: 3 }).map((_, blockIndex) => (
<Fragment key={blockIndex}>
{blockIndex > 0 && (
<Skeleton className="my-4 h-[1px] w-full text-zinc-100" />

View File

@@ -1,9 +1,7 @@
import React, { useState } from "react";
import React from "react";
import MarketplaceAgentBlock from "../MarketplaceAgentBlock";
import { usePagination } from "@/hooks/usePagination";
import ErrorState from "../ErrorState";
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
import { convertLibraryAgentIntoBlock } from "@/lib/utils";
import { useBlockMenuContext } from "../block-menu-provider";
const MarketplaceAgentsContent: React.FC = () => {
@@ -19,9 +17,7 @@ const MarketplaceAgentsContent: React.FC = () => {
request: { apiType: "store-agents" },
pageSize: 10,
});
const api = useBackendAPI();
const { handleAddStoreAgent } = useBlockMenuContext();
const [loadingSlug, setLoadingSlug] = useState<string | null>(null);
const { handleAddStoreAgent, loadingSlug } = useBlockMenuContext();
if (loading) {
return (
@@ -30,7 +26,7 @@ const MarketplaceAgentsContent: React.FC = () => {
className="scrollbar-thumb-rounded h-full overflow-y-auto pt-4 transition-all duration-200 scrollbar-thin scrollbar-track-transparent scrollbar-thumb-transparent hover:scrollbar-thumb-zinc-200"
>
<div className="w-full space-y-3 px-4 pb-4">
{[0, 1, 2, 3, 4].map((index) => (
{Array.from({ length: 5 }).map((_, index) => (
<MarketplaceAgentBlock.Skeleton key={index} />
))}
</div>

View File

@@ -27,7 +27,7 @@ const MyAgentsContent: React.FC = () => {
className="scrollbar-thumb-rounded h-full overflow-y-auto pt-4 transition-all duration-200 scrollbar-thin scrollbar-track-transparent scrollbar-thumb-transparent hover:scrollbar-thumb-zinc-200"
>
<div className="w-full space-y-3 px-4 pb-4">
{[0, 1, 2, 3, 4].map((index) => (
{Array.from({ length: 5 }).map((_, index) => (
<UGCAgentBlock.Skeleton key={index} />
))}
</div>

View File

@@ -26,7 +26,7 @@ const PaginatedIntegrationList: React.FC = () => {
className="scrollbar-thumb-rounded h-full overflow-y-auto pt-4 transition-all duration-200 scrollbar-thin scrollbar-track-transparent scrollbar-thumb-transparent hover:scrollbar-thumb-zinc-200"
>
<div className="w-full space-y-3 px-4 pb-4">
{[0, 1, 3, 4, 5].map((integrationIndex) => (
{Array.from({ length: 6 }).map((_, integrationIndex) => (
<Integration.Skeleton key={integrationIndex} />
))}
</div>

View File

@@ -32,7 +32,6 @@ const BlockMenuSearch: React.FC = ({}) => {
}
try {
// Prepare filter array from active categories
const activeCategories = Object.entries(filters.categories)
.filter(([_, isActive]) => isActive)
.map(([category, _]) => category)
@@ -58,10 +57,8 @@ const BlockMenuSearch: React.FC = ({}) => {
setCategoryCounts(response.total_items);
if (isLoadMore) {
console.log("search list : ", response.items);
setSearchData((prev) => [...prev, ...response.items]);
} else {
console.log("initial list : ", response.items);
setSearchData(response.items);
}

View File

@@ -89,7 +89,7 @@ export default function FilterSheet({
setIsOpen(false);
}, [setFilters]);
const hasActiveFilters = useCallback(() => {
const hasLocalActiveFilters = useCallback(() => {
const hasCategoryFilter = Object.values(localFilters.categories).some(
(value) => value,
);
@@ -98,6 +98,15 @@ export default function FilterSheet({
return hasCategoryFilter || hasCreatorFilter;
}, [localFilters]);
const hasActiveFilters = useCallback(() => {
const hasCategoryFilter = Object.values(filters.categories).some(
(value) => value,
);
const hasCreatorFilter = filters.createdBy.length > 0;
return hasCategoryFilter || hasCreatorFilter;
}, [filters]);
return (
<div className="m-0 ml-4 inline w-fit p-0">
<Button
@@ -121,93 +130,100 @@ export default function FilterSheet({
<>
<div
className={cn(
"absolute bottom-2 left-2 top-2 z-20 w-3/4 max-w-[22.5rem] space-y-4 rounded-[0.75rem] bg-white py-4 shadow-[0_4px_12px_2px_rgba(0,0,0,0.1)] transition-all",
"absolute bottom-2 left-2 top-2 z-20 w-3/4 max-w-[22.5rem] space-y-4 overflow-hidden rounded-[0.75rem] bg-white pb-4 shadow-[0_4px_12px_2px_rgba(0,0,0,0.1)] transition-all",
isOpen
? "translate-x-0 duration-300 ease-out"
: "-translate-x-full duration-300 ease-out",
)}
>
{/* Top */}
<div className="flex items-center justify-between px-5">
<p className="font-sans text-base text-[#040404]">Filters</p>
<Button
variant="ghost"
size="icon"
onClick={() => setIsOpen(false)}
>
<X className="h-5 w-5" />
</Button>
</div>
<Separator className="h-[1px] w-full text-zinc-300" />
{/* Categories */}
<div className="space-y-4 px-5">
<p className="font-sans text-base font-medium text-zinc-800">
Categories
</p>
<div className="space-y-2">
{categories.map((category) => (
<div
key={category.key}
className="flex items-center space-x-2"
>
<Checkbox
id={category.key}
checked={localFilters.categories[category.key]}
onCheckedChange={() => onCategoryChange(category.key)}
className="border border-[#D4D4D4] shadow-none data-[state=checked]:border-none data-[state=checked]:bg-violet-700 data-[state=checked]:text-white"
/>
<label
htmlFor={category.key}
className="font-sans text-sm leading-[1.375rem] text-zinc-600"
>
{category.name}
</label>
</div>
))}
</div>
</div>
<Separator className="h-[1px] w-full text-zinc-300" />
{/* Created By */}
<div className="space-y-4 px-5">
<p className="font-sans text-base font-medium text-zinc-800">
Created by
</p>
<div className="space-y-2">
{creators.map((creator) => (
<div key={creator} className="flex items-center space-x-2">
<Checkbox
id={`creator-${creator}`}
checked={localFilters.createdBy.includes(creator)}
onCheckedChange={() => onCreatorChange(creator)}
className="border border-[#D4D4D4] shadow-none data-[state=checked]:border-none data-[state=checked]:bg-violet-700 data-[state=checked]:text-white"
/>
<label
htmlFor={`creator-${creator}`}
className="font-sans text-sm leading-[1.375rem] text-zinc-600"
>
{creator}
</label>
</div>
))}
</div>
{creators.length > 5 && (
<Button
variant={"link"}
className="m-0 p-0 font-sans text-sm font-medium leading-[1.375rem] text-zinc-800 underline hover:text-zinc-600"
>
More
</Button>
<div
className={cn(
"flex-1 space-y-4 pb-16",
"scrollbar-thumb-rounded h-full overflow-y-auto pt-4 transition-all duration-200 scrollbar-thin scrollbar-track-transparent scrollbar-thumb-transparent hover:scrollbar-thumb-zinc-200",
)}
>
{/* Top */}
<div className="flex items-center justify-between px-5">
<p className="font-sans text-base text-[#040404]">Filters</p>
<Button
variant="ghost"
size="icon"
onClick={() => setIsOpen(false)}
>
<X className="h-5 w-5" />
</Button>
</div>
<Separator className="h-[1px] w-full text-zinc-300" />
{/* Categories */}
<div className="space-y-4 px-5">
<p className="font-sans text-base font-medium text-zinc-800">
Categories
</p>
<div className="space-y-2">
{categories.map((category) => (
<div
key={category.key}
className="flex items-center space-x-2"
>
<Checkbox
id={category.key}
checked={localFilters.categories[category.key]}
onCheckedChange={() => onCategoryChange(category.key)}
className="border border-[#D4D4D4] shadow-none data-[state=checked]:border-none data-[state=checked]:bg-violet-700 data-[state=checked]:text-white"
/>
<label
htmlFor={category.key}
className="font-sans text-sm leading-[1.375rem] text-zinc-600"
>
{category.name}
</label>
</div>
))}
</div>
</div>
<Separator className="h-[1px] w-full text-zinc-300" />
{/* Created By */}
<div className="space-y-4 px-5">
<p className="font-sans text-base font-medium text-zinc-800">
Created by
</p>
<div className="space-y-2">
{creators.map((creator) => (
<div key={creator} className="flex items-center space-x-2">
<Checkbox
id={`creator-${creator}`}
checked={localFilters.createdBy.includes(creator)}
onCheckedChange={() => onCreatorChange(creator)}
className="border border-[#D4D4D4] shadow-none data-[state=checked]:border-none data-[state=checked]:bg-violet-700 data-[state=checked]:text-white"
/>
<label
htmlFor={`creator-${creator}`}
className="font-sans text-sm leading-[1.375rem] text-zinc-600"
>
{creator}
</label>
</div>
))}
</div>
{creators.length > 5 && (
<Button
variant={"link"}
className="m-0 p-0 font-sans text-sm font-medium leading-[1.375rem] text-zinc-800 underline hover:text-zinc-600"
>
More
</Button>
)}
</div>
</div>
{/* Footer buttons */}
<div className="absolute bottom-0 flex w-full justify-between gap-3 border-t border-zinc-300 px-5 py-3">
<div className="fixed bottom-0 flex w-full justify-between gap-3 border-t border-zinc-300 bg-white px-5 py-3">
<Button
className="min-w-[5rem] rounded-[0.5rem] border-none px-1.5 py-2 font-sans text-sm font-medium leading-[1.375rem] text-zinc-800 shadow-none ring-1 ring-zinc-400"
variant={"outline"}
@@ -221,7 +237,7 @@ export default function FilterSheet({
"min-w-[6.25rem] rounded-[0.5rem] border-none px-1.5 py-2 font-sans text-sm font-medium leading-[1.375rem] text-white shadow-none ring-1 disabled:ring-0",
)}
onClick={handleApplyFilters}
disabled={!hasActiveFilters()}
disabled={!hasLocalActiveFilters()}
>
Apply filters
</Button>

View File

@@ -1,13 +1,10 @@
import { useState, useEffect, useCallback } from "react";
import { useCallback } from "react";
import FilterChip from "../FilterChip";
import FilterSheet from "./FilterSheet";
import { CategoryKey, useBlockMenuContext } from "../block-menu-provider";
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
const FiltersList = () => {
const { filters, setFilters, categoryCounts, setCategoryCounts } =
useBlockMenuContext();
const api = useBackendAPI();
const { filters, setFilters, categoryCounts } = useBlockMenuContext();
const categories: Array<{ key: CategoryKey; name: string }> = [
{ key: "blocks", name: "Blocks" },
{ key: "integrations", name: "Integrations" },
@@ -39,10 +36,6 @@ const FiltersList = () => {
[filters, setFilters],
);
useEffect(() => {
console.log(categoryCounts);
}, [categoryCounts]);
return (
<div className="flex flex-nowrap gap-3 overflow-x-auto scrollbar-hide">
<FilterSheet categories={categories} />

View File

@@ -4,7 +4,7 @@ import Block from "../Block";
import UGCAgentBlock from "../UGCAgentBlock";
import AiBlock from "./AiBlock";
import IntegrationBlock from "../IntegrationBlock";
import { SearchItem, useBlockMenuContext } from "../block-menu-provider";
import { useBlockMenuContext } from "../block-menu-provider";
import NoSearchResult from "./NoSearchResult";
import { Button } from "@/components/ui/button";
import { convertLibraryAgentIntoBlock, getBlockType } from "@/lib/utils";

View File

@@ -9,13 +9,13 @@
margin-bottom: 1rem;
}
.custom-node input:not([type="checkbox"]),
.custom-node input:not([type="checkbox"]):not([type="file"]),
.custom-node textarea,
.custom-node select,
.custom-node [data-id^="date-picker"],
.custom-node [data-list-container],
.custom-node [data-add-item],
.custom-node [data-content-settings]. .array-item-container {
.custom-node [data-content-settings] .array-item-container {
display: flex;
align-items: center;
min-width: calc(100% - 2.5rem);

View File

@@ -1,14 +1,14 @@
/* flow.css or index.css */
body {
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto",
"Oxygen", "Ubuntu", "Cantarell", "Fira Sans", "Droid Sans",
"Helvetica Neue", sans-serif;
font-family:
-apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen", "Ubuntu",
"Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue", sans-serif;
}
code {
font-family: source-code-pro, Menlo, Monaco, Consolas, "Courier New",
monospace;
font-family:
source-code-pro, Menlo, Monaco, Consolas, "Courier New", monospace;
}
.modal {

View File

@@ -1,3 +1,5 @@
import { createContext, useCallback, useEffect, useState } from "react";
import useSupabase from "@/lib/supabase/useSupabase";
import {
APIKeyCredentials,
CredentialsDeleteNeedConfirmationResponse,
@@ -8,7 +10,6 @@ import {
UserPasswordCredentials,
} from "@/lib/autogpt-server-api";
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
import { createContext, useCallback, useEffect, useState } from "react";
// Get keys from CredentialsProviderName type
const CREDENTIALS_PROVIDER_NAMES = Object.values(
@@ -102,6 +103,7 @@ export default function CredentialsProvider({
}) {
const [providers, setProviders] =
useState<CredentialsProvidersContextType | null>(null);
const { isLoggedIn } = useSupabase();
const api = useBackendAPI();
const addCredentials = useCallback(
@@ -202,48 +204,50 @@ export default function CredentialsProvider({
);
useEffect(() => {
api.isAuthenticated().then((isAuthenticated) => {
if (!isAuthenticated) return;
if (!isLoggedIn) {
if (isLoggedIn == false) setProviders(null);
return;
}
api.listCredentials().then((response) => {
const credentialsByProvider = response.reduce(
(acc, cred) => {
if (!acc[cred.provider]) {
acc[cred.provider] = [];
}
acc[cred.provider].push(cred);
return acc;
},
{} as Record<CredentialsProviderName, CredentialsMetaResponse[]>,
);
api.listCredentials().then((response) => {
const credentialsByProvider = response.reduce(
(acc, cred) => {
if (!acc[cred.provider]) {
acc[cred.provider] = [];
}
acc[cred.provider].push(cred);
return acc;
},
{} as Record<CredentialsProviderName, CredentialsMetaResponse[]>,
);
setProviders((prev) => ({
...prev,
...Object.fromEntries(
CREDENTIALS_PROVIDER_NAMES.map((provider) => [
setProviders((prev) => ({
...prev,
...Object.fromEntries(
CREDENTIALS_PROVIDER_NAMES.map((provider) => [
provider,
{
provider,
{
provider,
providerName: providerDisplayNames[provider],
savedCredentials: credentialsByProvider[provider] ?? [],
oAuthCallback: (code: string, state_token: string) =>
oAuthCallback(provider, code, state_token),
createAPIKeyCredentials: (
credentials: APIKeyCredentialsCreatable,
) => createAPIKeyCredentials(provider, credentials),
createUserPasswordCredentials: (
credentials: UserPasswordCredentialsCreatable,
) => createUserPasswordCredentials(provider, credentials),
deleteCredentials: (id: string, force: boolean = false) =>
deleteCredentials(provider, id, force),
} satisfies CredentialsProviderData,
]),
),
}));
});
providerName: providerDisplayNames[provider],
savedCredentials: credentialsByProvider[provider] ?? [],
oAuthCallback: (code: string, state_token: string) =>
oAuthCallback(provider, code, state_token),
createAPIKeyCredentials: (
credentials: APIKeyCredentialsCreatable,
) => createAPIKeyCredentials(provider, credentials),
createUserPasswordCredentials: (
credentials: UserPasswordCredentialsCreatable,
) => createUserPasswordCredentials(provider, credentials),
deleteCredentials: (id: string, force: boolean = false) =>
deleteCredentials(provider, id, force),
} satisfies CredentialsProviderData,
]),
),
}));
});
}, [
api,
isLoggedIn,
createAPIKeyCredentials,
createUserPasswordCredentials,
deleteCredentials,

View File

@@ -493,10 +493,11 @@ export const NodeGenericInputField: FC<{
schema={propSchema as BlockIOKVSubSchema}
entries={currentValue}
errors={errors}
className={className}
displayName={displayName}
connections={connections}
handleInputChange={handleInputChange}
handleInputClick={handleInputClick}
className={className}
displayName={displayName}
/>
);
@@ -732,6 +733,7 @@ const NodeKeyValueInput: FC<{
errors: { [key: string]: string | undefined };
connections: NodeObjectInputTreeProps["connections"];
handleInputChange: NodeObjectInputTreeProps["handleInputChange"];
handleInputClick: NodeObjectInputTreeProps["handleInputClick"];
className?: string;
displayName?: string;
}> = ({
@@ -741,6 +743,7 @@ const NodeKeyValueInput: FC<{
schema,
connections,
handleInputChange,
handleInputClick,
errors,
className,
displayName,
@@ -761,7 +764,7 @@ const NodeKeyValueInput: FC<{
}, [entries, schema.default, connections, nodeId, selfKey]);
const [keyValuePairs, setKeyValuePairs] = useState<
{ key: string; value: string | number | null }[]
{ key: string; value: any }[]
>([]);
useEffect(
@@ -778,18 +781,6 @@ const NodeKeyValueInput: FC<{
);
}
const isNumberType =
schema.additionalProperties &&
["number", "integer"].includes(schema.additionalProperties.type);
function convertValueType(value: string): string | number | null {
if (isNumberType) {
const numValue = Number(value);
return !isNaN(numValue) ? numValue : null;
}
return value;
}
function getEntryKey(key: string): string {
return `${selfKey}_#_${key}`;
}
@@ -799,6 +790,11 @@ const NodeKeyValueInput: FC<{
);
}
const propSchema =
schema.additionalProperties && schema.additionalProperties.type
? schema.additionalProperties
: ({ type: "string" } as BlockIOSimpleTypeSubSchema);
return (
<div
className={cn(className, keyValuePairs.length > 0 ? "flex flex-col" : "")}
@@ -832,18 +828,24 @@ const NodeKeyValueInput: FC<{
)
}
/>
<LocalValuedInput
type={isNumberType ? "number" : "text"}
placeholder="Value"
value={value ?? ""}
onChange={(e) =>
<NodeGenericInputField
className="w-full"
nodeId={nodeId}
propKey={`${selfKey}_#_${key}`}
propSchema={propSchema}
currentValue={value}
errors={errors}
connections={connections}
displayName={displayName || beautifyString(key)}
handleInputChange={(_, newValue) =>
updateKeyValuePairs(
keyValuePairs.toSpliced(index, 1, {
key: key,
value: convertValueType(e.target.value),
value: newValue,
}),
)
}
handleInputClick={handleInputClick}
/>
<Button
variant="ghost"

View File

@@ -1,5 +1,5 @@
"use client";
import useSupabase from "@/hooks/useSupabase";
import useSupabase from "@/lib/supabase/useSupabase";
import { OnboardingStep, UserOnboarding } from "@/lib/autogpt-server-api";
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
import { usePathname, useRouter } from "next/navigation";

View File

@@ -0,0 +1,30 @@
/* Google Fonts - Poppins (weights: 400, 500, 600, 700) */
@import url("https://fonts.googleapis.com/css2?family=Poppins:wght@400;500;600;700&display=swap");
/* Local Geist Fonts from node_modules */
@font-face {
font-family: "Geist";
src: url("../../../node_modules/geist/dist/fonts/geist-sans/Geist-Variable.woff2")
format("woff2-variations");
font-weight: 100 900;
font-style: normal;
font-display: swap;
}
@font-face {
font-family: "GeistMono";
src: url("../../../node_modules/geist/dist/fonts/geist-mono/GeistMono-Variable.woff2")
format("woff2-variations");
font-weight: 100 900;
font-style: normal;
font-display: swap;
}
/* CSS Variables matching config from fonts.ts */
:root {
--font-poppins: "Poppins", sans-serif;
--font-geist-sans: "Geist", ui-sans-serif, system-ui, sans-serif;
--font-geist-mono:
"GeistMono", ui-monospace, "Cascadia Code", "Source Code Pro", Menlo,
Consolas, monospace;
}

View File

@@ -0,0 +1,15 @@
import { Poppins } from "next/font/google";
import { GeistSans } from "geist/font/sans";
import { GeistMono } from "geist/font/mono";
const poppins = Poppins({
subsets: ["latin"],
weight: ["400", "500", "600", "700"] as const,
variable: "--font-poppins",
});
export const fonts = {
poppins,
sans: GeistSans,
mono: GeistMono,
};

View File

@@ -1,46 +0,0 @@
import { createBrowserClient } from "@supabase/ssr";
import { User } from "@supabase/supabase-js";
import { useEffect, useMemo, useState } from "react";
export default function useSupabase() {
const [user, setUser] = useState<User | null>(null);
const [isUserLoading, setIsUserLoading] = useState(true);
const supabase = useMemo(() => {
try {
return createBrowserClient(
process.env.NEXT_PUBLIC_SUPABASE_URL!,
process.env.NEXT_PUBLIC_SUPABASE_ANON_KEY!,
);
} catch (error) {
console.error("Error creating Supabase client", error);
return null;
}
}, []);
useEffect(() => {
if (!supabase) {
setIsUserLoading(false);
return;
}
const fetchUser = async () => {
const response = await supabase.auth.getUser();
if (response.error) {
// Display error only if it's not about missing auth session (user is not logged in)
if (response.error.message !== "Auth session missing!") {
console.error("Error fetching user", response.error);
}
setUser(null);
} else {
setUser(response.data.user);
}
setIsUserLoading(false);
};
fetchUser();
}, [supabase]);
return { supabase, user, isUserLoading };
}

View File

@@ -101,6 +101,7 @@ export default class BackendAPI {
? createBrowserClient(
process.env.NEXT_PUBLIC_SUPABASE_URL!,
process.env.NEXT_PUBLIC_SUPABASE_ANON_KEY!,
{ isSingleton: true },
)
: getServerSupabase();
}
@@ -108,9 +109,9 @@ export default class BackendAPI {
async isAuthenticated(): Promise<boolean> {
if (!this.supabaseClient) return false;
const {
data: { user },
} = await this.supabaseClient?.auth.getUser();
return user != null;
data: { session },
} = await this.supabaseClient.auth.getSession();
return session != null;
}
createUser(): Promise<User> {

View File

@@ -1,9 +1,10 @@
import type { UnsafeUnwrappedCookies } from "next/headers";
import { createServerClient } from "@supabase/ssr";
export default function getServerSupabase() {
// Need require here, so Next.js doesn't complain about importing this on client side
const { cookies } = require("next/headers");
const cookieStore = cookies();
const cookieStore = cookies() as UnsafeUnwrappedCookies;
try {
const supabase = createServerClient(

View File

@@ -0,0 +1,65 @@
"use client";
import { useCallback, useEffect, useMemo, useState } from "react";
import { createBrowserClient } from "@supabase/ssr";
import { SignOut, User } from "@supabase/supabase-js";
import { useRouter } from "next/navigation";
export default function useSupabase() {
const router = useRouter();
const [user, setUser] = useState<User | null>(null);
const [isUserLoading, setIsUserLoading] = useState(true);
const supabase = useMemo(() => {
try {
return createBrowserClient(
process.env.NEXT_PUBLIC_SUPABASE_URL!,
process.env.NEXT_PUBLIC_SUPABASE_ANON_KEY!,
{ isSingleton: true },
);
} catch (error) {
console.error("Error creating Supabase client", error);
return null;
}
}, []);
useEffect(() => {
if (!supabase) {
setIsUserLoading(false);
return;
}
// Sync up the current state and listen for changes
const {
data: { subscription },
} = supabase.auth.onAuthStateChange((_event, session) => {
setUser(session?.user ?? null);
setIsUserLoading(false);
});
return () => {
subscription.unsubscribe();
};
}, [supabase]);
const logOut = useCallback(
async (options?: SignOut) => {
if (!supabase) return;
const { error } = await supabase.auth.signOut({
scope: options?.scope ?? "local",
});
if (error) console.error("Error logging out:", error);
router.push("/login");
},
[router, supabase],
);
if (!supabase || isUserLoading) {
return { supabase, user: null, isLoggedIn: null, isUserLoading, logOut };
}
if (!user) {
return { supabase, user, isLoggedIn: false, isUserLoading, logOut };
}
return { supabase, user, isLoggedIn: true, isUserLoading, logOut };
}

View File

@@ -432,8 +432,7 @@ export const convertLibraryAgentIntoBlock = (agent: LibraryAgent) => {
// Need to change it once, we got provider blocks
export const getBlockType = (item: any) => {
console.log(item);
if (item.inputSchema.properties?.model?.title === "LLM Model") {
if (item?.inputSchema?.properties?.model?.title === "LLM Model") {
return "ai_agent";
}
if (item.id && item.name && item.inputSchema && item.outputSchema) {

View File

@@ -4,7 +4,10 @@ export class LoginPage {
constructor(private page: Page) {}
async login(email: string, password: string) {
console.log("Attempting login with:", { email, password }); // Debug log
console.log(` Attempting login on ${this.page.url()} with`, {
email,
password,
});
// Fill email
const emailInput = this.page.getByPlaceholder("m@example.com");
@@ -33,23 +36,35 @@ export class LoginPage {
});
await loginButton.waitFor({ state: "visible" });
// Start waiting for navigation before clicking
const navigationPromise = Promise.race([
this.page.waitForURL("/", { timeout: 10_000 }), // Wait for home page
this.page.waitForURL("/marketplace", { timeout: 10_000 }), // Wait for home page
this.page.waitForURL("/onboarding/**", { timeout: 10_000 }), // Wait for onboarding page
]);
// Attach navigation logger for debug purposes
this.page.on("load", (page) => console.log(` Now at URL: ${page.url()}`));
console.log("About to click login button"); // Debug log
// Start waiting for navigation before clicking
const leaveLoginPage = this.page
.waitForURL(
(url) => /^\/(marketplace|onboarding(\/.*)?)?$/.test(url.pathname),
{ timeout: 10_000 },
)
.catch((reason) => {
console.error(
`🚨 Navigation away from /login timed out (current URL: ${this.page.url()}):`,
reason,
);
throw reason;
});
console.log(`🖱️ Clicking login button...`);
await loginButton.click();
console.log("Waiting for navigation"); // Debug log
await navigationPromise;
console.log("Waiting for navigation away from /login ...");
await leaveLoginPage;
console.log(`⌛ Post-login redirected to ${this.page.url()}`);
await this.page.goto("/marketplace");
console.log("Navigation complete, waiting for network idle"); // Debug log
await new Promise((resolve) => setTimeout(resolve, 200)); // allow time for client-side redirect
await this.page.waitForLoadState("load", { timeout: 10_000 });
console.log("Login process complete"); // Debug log
console.log("➡️ Navigating to /marketplace ...");
await this.page.goto("/marketplace", { timeout: 10_000 });
console.log("✅ Login process complete");
}
}

View File

@@ -1,22 +1,19 @@
{
"compilerOptions": {
"lib": ["dom", "dom.iterable", "esnext"],
"allowJs": true,
"lib": ["DOM", "DOM.Iterable", "ESNext"],
"allowJs": false,
"skipLibCheck": true,
"strict": true,
"noEmit": true,
"target": "ES2022",
"esModuleInterop": true,
"module": "esnext",
"module": "ESNext",
"moduleResolution": "bundler",
"resolveJsonModule": true,
"isolatedModules": true,
"jsx": "preserve",
"incremental": true,
"plugins": [
{
"name": "next"
}
],
"plugins": [{ "name": "next" }],
"paths": {
"@/*": ["./src/*"]
}

View File

@@ -1,7 +0,0 @@
module.exports = {
devServer: {
proxy: {
"/graphs": "http://localhost:8000",
},
},
};

File diff suppressed because it is too large Load Diff

View File

@@ -86,6 +86,7 @@ Below is a comprehensive list of all available blocks, categorized by their prim
| [Unreal Text to Speech](text_to_speech_block.md#unreal-text-to-speech) | Converts text to speech using Unreal Speech API |
| [AI Shortform Video Creator](ai_shortform_video_block.md#ai-shortform-video-creator) | Generates short-form videos using AI |
| [Replicate Flux Advanced Model](replicate_flux_advanced.md#replicate-flux-advanced-model) | Creates images using Replicate's Flux models |
| [Flux Kontext](flux_kontext.md#flux-kontext) | Text-based image editing using Flux Kontext |
## Miscellaneous
| Block Name | Description |

View File

@@ -0,0 +1,31 @@
# Flux Kontext
## What it is
An internal block that performs text-based image editing using BlackForest Labs' Flux Kontext models.
## What it does
Takes a prompt describing the desired transformation and optionally a reference image, then returns a new image URL.
## How it works
The block sends your prompt, image, and settings to the selected Flux Kontext model on Replicate. The service processes the request and returns a link to the edited image.
## Inputs
| Input | Description |
|--------------|-----------------------------------------------------------------------------|
| Credentials | Replicate API key with permissions for Flux Kontext models |
| Prompt | Text instruction describing the desired edit |
| Input Image | (Optional) Reference image URI (jpeg, png, gif, webp) |
| Aspect Ratio | Aspect ratio of the generated image (e.g. match_input_image, 1:1, 16:9, etc.) |
| Seed | (Optional, advanced) Random seed for reproducible generation |
| Model | Model variant to use: Flux Kontext Pro or Flux Kontext Max |
## Outputs
| Output | Description |
|------------|------------------------------------------|
| image_url | URL of the transformed image |
| error | Error message if generation failed |
## Use Cases
- Enhance a marketing image by requesting "add soft lighting and a subtle vignette" while providing the original asset as the reference image.
- Generate social media assets with specific aspect ratios and style prompts.
- Apply creative edits to product photos using text instructions.

View File

@@ -13,19 +13,19 @@ To run the tests, you can use the following commands:
Running the tests without the UI, and headless:
```bash
yarn test
pnpm test
```
If you want to run the tests in a UI where you can identify each locator used you can use the following command:
```bash
yarn test-ui
pnpm test-ui
```
You can also pass `--debug` to the test command to open the browsers in view mode rather than headless. This works with both the `yarn test` and `yarn test-ui` commands.
You can also pass `--debug` to the test command to open the browsers in view mode rather than headless. This works with both the `pnpm test` and `pnpm test-ui` commands.
```bash
yarn test --debug
pnpm test --debug
```
In CI, we run the tests in headless mode, with multiple browsers, and retry a failed test up to 2 times.
@@ -45,7 +45,7 @@ No matter what you do, you should **always** double check that your locators are
If you need to debug a test, you can use the below command to open the test in the playwright test editor. This is helpful if you want to see the test in the browser and see the state of the page as the test sees it and the locators it uses.
```bash
yarn test --debug --test-name-pattern="test-name"
pnpm test --debug --test-name-pattern="test-name"
```
#### Using vscode
@@ -64,7 +64,7 @@ This will save a file called `.auth/gentest-user.json` that can be loaded for al
### Saving a session for gen tests to always use
```bash
yarn gentests --save-storage .auth/gentest-user.json
pnpm gentests --save-storage .auth/gentest-user.json
```
Stop your session with `CTRL + C` after you are logged in and swap the `--save-storage` flag with `--load-storage` to load the session for all future tests.
@@ -72,7 +72,7 @@ Stop your session with `CTRL + C` after you are logged in and swap the `--save-s
### Loading a session for gen tests to always use
```bash
yarn gentests --load-storage .auth/gentest-user.json
pnpm gentests --load-storage .auth/gentest-user.json
```
## How to make a new test

View File

@@ -6,7 +6,7 @@ This guide will help you setup the server and builder for the project.
<!-- The video is listed in the root Readme.md of the repo -->
We also offer this in video format. You can check it out [here](https://github.com/Significant-Gravitas/AutoGPT?tab=readme-ov-file#how-to-setup-for-self-hosting).
<!--We also offer this in video format. You can check it out [here](https://github.com/Significant-Gravitas/AutoGPT?tab=readme-ov-file#how-to-setup-for-self-hosting). -->
!!! warning
**DO NOT FOLLOW ANY OUTSIDE TUTORIALS AS THEY WILL LIKELY BE OUT OF DATE**
@@ -117,23 +117,27 @@ To run the backend services, follow these steps:
To run the frontend application open a new terminal and follow these steps:
* Navigate to `frontend` folder within the `autogpt_platform` directory:
- Navigate to `frontend` folder within the `autogpt_platform` directory:
```
cd frontend
```
* Copy the `.env.example` file available in the `frontend` directory to `.env` in the same directory:
- Copy the `.env.example` file available in the `frontend` directory to `.env` in the same directory:
```
cp .env.example .env
```
You can modify the `.env` within this folder to add your own environment variables for the frontend application.
* Run the following command:
- Run the following command:
```
npm install
npm run dev
corepack enable
pnpm install
pnpm dev
```
This command will install the necessary dependencies and start the frontend application in development mode.
This command will enable corepack, install the necessary dependencies with pnpm, and start the frontend application in development mode.
### Checking if the application is running

View File

@@ -12,7 +12,9 @@ Follow these steps to set up and run Ollama with the AutoGPT platform.
## Setup Steps
### 1. Launch Ollama
Open a new terminal and execute:
```bash
ollama run llama3.2
```
@@ -20,17 +22,23 @@ ollama run llama3.2
> **Note**: This will download the [llama3.2](https://ollama.com/library/llama3.2) model and start the service. Keep this terminal running in the background.
### 2. Start the Backend
Open a new terminal and navigate to the autogpt_platform directory:
```bash
cd autogpt_platform
docker compose up -d --build
```
### 3. Start the Frontend
Open a new terminal and navigate to the frontend directory:
```bash
cd autogpt_platform/frontend
npm run dev
corepack enable
pnpm i
pnpm dev
```
Then visit [http://localhost:3000](http://localhost:3000) to see the frontend running, after registering an account/logging in, navigate to the build page at [http://localhost:3000/build](http://localhost:3000/build)
@@ -46,13 +54,13 @@ Now that both Ollama and the AutoGPT platform are running we can move onto using
![Select Ollama Model](../imgs/ollama/Ollama-Select-Llama32.png)
3. Now we need to add some prompts then save and then run the graph:
![Add Prompt](../imgs/ollama/Ollama-Add-Prompts.png)
![Add Prompt](../imgs/ollama/Ollama-Add-Prompts.png)
That's it! You've successfully setup the AutoGPT platform and made a LLM call to Ollama.
![Ollama Output](../imgs/ollama/Ollama-Output.png)
### Using Ollama on a Remote Server with AutoGPT
### Using Ollama on a Remote Server with AutoGPT
For running Ollama on a remote server, simply make sure the Ollama server is running and is accessible from other devices on your network/remotely through the port 11434, then you can use the same steps above but you need to add the Ollama servers IP address to the "Ollama Host" field in the block settings like so:
![Ollama Remote Host](../imgs/ollama/Ollama-Remote-Host.png)
@@ -69,4 +77,4 @@ For common errors:
1. **Connection Refused**: Make sure Ollama is running and the host address is correct (also make sure the port is correct, its default is 11434)
2. **Model Not Found**: Try running `ollama pull llama3.2` manually first
3. **Docker Issues**: Ensure Docker daemon is running with `docker ps`
3. **Docker Issues**: Ensure Docker daemon is running with `docker ps`