mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-12 15:55:03 -05:00
Compare commits
25 Commits
fix/copilo
...
pwuts/spee
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7e855806e3 | ||
|
|
adc46ec2e8 | ||
|
|
9d12807294 | ||
|
|
4ac40f11e5 | ||
|
|
f5cdb02a38 | ||
|
|
bd9ff05eaa | ||
|
|
66be27f6da | ||
|
|
39b821da94 | ||
|
|
393d6aa5ac | ||
|
|
f753058e8f | ||
|
|
7cdbbdd65e | ||
|
|
6191ac0b1e | ||
|
|
b51e87bc53 | ||
|
|
71f764f3d0 | ||
|
|
a78145505b | ||
|
|
36aeb0b2b3 | ||
|
|
2a189c44c4 | ||
|
|
508759610f | ||
|
|
062fe1aa70 | ||
|
|
2cd0d4fe0f | ||
|
|
1ecae8c87e | ||
|
|
659338f90c | ||
|
|
4df5b7bde7 | ||
|
|
017a00af46 | ||
|
|
52650eed1d |
@@ -5,42 +5,13 @@
|
|||||||
!docs/
|
!docs/
|
||||||
|
|
||||||
# Platform - Libs
|
# Platform - Libs
|
||||||
!autogpt_platform/autogpt_libs/autogpt_libs/
|
!autogpt_platform/autogpt_libs/
|
||||||
!autogpt_platform/autogpt_libs/pyproject.toml
|
|
||||||
!autogpt_platform/autogpt_libs/poetry.lock
|
|
||||||
!autogpt_platform/autogpt_libs/README.md
|
|
||||||
|
|
||||||
# Platform - Backend
|
# Platform - Backend
|
||||||
!autogpt_platform/backend/backend/
|
!autogpt_platform/backend/
|
||||||
!autogpt_platform/backend/test/e2e_test_data.py
|
|
||||||
!autogpt_platform/backend/migrations/
|
|
||||||
!autogpt_platform/backend/schema.prisma
|
|
||||||
!autogpt_platform/backend/pyproject.toml
|
|
||||||
!autogpt_platform/backend/poetry.lock
|
|
||||||
!autogpt_platform/backend/README.md
|
|
||||||
!autogpt_platform/backend/.env
|
|
||||||
!autogpt_platform/backend/gen_prisma_types_stub.py
|
|
||||||
|
|
||||||
# Platform - Market
|
|
||||||
!autogpt_platform/market/market/
|
|
||||||
!autogpt_platform/market/scripts.py
|
|
||||||
!autogpt_platform/market/schema.prisma
|
|
||||||
!autogpt_platform/market/pyproject.toml
|
|
||||||
!autogpt_platform/market/poetry.lock
|
|
||||||
!autogpt_platform/market/README.md
|
|
||||||
|
|
||||||
# Platform - Frontend
|
# Platform - Frontend
|
||||||
!autogpt_platform/frontend/src/
|
!autogpt_platform/frontend/
|
||||||
!autogpt_platform/frontend/public/
|
|
||||||
!autogpt_platform/frontend/scripts/
|
|
||||||
!autogpt_platform/frontend/package.json
|
|
||||||
!autogpt_platform/frontend/pnpm-lock.yaml
|
|
||||||
!autogpt_platform/frontend/tsconfig.json
|
|
||||||
!autogpt_platform/frontend/README.md
|
|
||||||
## config
|
|
||||||
!autogpt_platform/frontend/*.config.*
|
|
||||||
!autogpt_platform/frontend/.env.*
|
|
||||||
!autogpt_platform/frontend/.env
|
|
||||||
|
|
||||||
# Classic - AutoGPT
|
# Classic - AutoGPT
|
||||||
!classic/original_autogpt/autogpt/
|
!classic/original_autogpt/autogpt/
|
||||||
@@ -64,6 +35,37 @@
|
|||||||
# Classic - Frontend
|
# Classic - Frontend
|
||||||
!classic/frontend/build/web/
|
!classic/frontend/build/web/
|
||||||
|
|
||||||
# Explicitly re-ignore some folders
|
# Explicitly re-ignore unwanted files from whitelisted directories
|
||||||
.*
|
# Note: These patterns MUST come after the whitelist rules to take effect
|
||||||
**/__pycache__
|
|
||||||
|
# Hidden files and directories (but keep frontend .env files needed for build)
|
||||||
|
**/.*
|
||||||
|
!autogpt_platform/frontend/.env
|
||||||
|
!autogpt_platform/frontend/.env.default
|
||||||
|
!autogpt_platform/frontend/.env.production
|
||||||
|
|
||||||
|
# Python artifacts
|
||||||
|
**/__pycache__/
|
||||||
|
**/*.pyc
|
||||||
|
**/*.pyo
|
||||||
|
**/.venv/
|
||||||
|
**/.ruff_cache/
|
||||||
|
**/.pytest_cache/
|
||||||
|
**/.coverage
|
||||||
|
**/htmlcov/
|
||||||
|
|
||||||
|
# Node artifacts
|
||||||
|
**/node_modules/
|
||||||
|
**/.next/
|
||||||
|
**/storybook-static/
|
||||||
|
**/playwright-report/
|
||||||
|
**/test-results/
|
||||||
|
|
||||||
|
# Build artifacts
|
||||||
|
**/dist/
|
||||||
|
**/build/
|
||||||
|
**/target/
|
||||||
|
|
||||||
|
# Logs and temp files
|
||||||
|
**/*.log
|
||||||
|
**/*.tmp
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
ref: ${{ github.event.workflow_run.head_branch }}
|
ref: ${{ github.event.workflow_run.head_branch }}
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|||||||
2
.github/workflows/claude-dependabot.yml
vendored
2
.github/workflows/claude-dependabot.yml
vendored
@@ -30,7 +30,7 @@ jobs:
|
|||||||
actions: read # Required for CI access
|
actions: read # Required for CI access
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/claude.yml
vendored
2
.github/workflows/claude.yml
vendored
@@ -40,7 +40,7 @@ jobs:
|
|||||||
actions: read # Required for CI access
|
actions: read # Required for CI access
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/codeql.yml
vendored
2
.github/workflows/codeql.yml
vendored
@@ -58,7 +58,7 @@ jobs:
|
|||||||
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
|
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
# Initializes the CodeQL tools for scanning.
|
# Initializes the CodeQL tools for scanning.
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
|
|||||||
2
.github/workflows/copilot-setup-steps.yml
vendored
2
.github/workflows/copilot-setup-steps.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
|||||||
# If you do not check out your code, Copilot will do this for you.
|
# If you do not check out your code, Copilot will do this for you.
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
submodules: true
|
submodules: true
|
||||||
|
|||||||
2
.github/workflows/docs-block-sync.yml
vendored
2
.github/workflows/docs-block-sync.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/docs-claude-review.yml
vendored
2
.github/workflows/docs-claude-review.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/docs-enhance.yml
vendored
2
.github/workflows/docs-enhance.yml
vendored
@@ -28,7 +28,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
|
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
ref: ${{ github.event.inputs.git_ref || github.ref_name }}
|
ref: ${{ github.event.inputs.git_ref || github.ref_name }}
|
||||||
|
|
||||||
@@ -52,7 +52,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Trigger deploy workflow
|
- name: Trigger deploy workflow
|
||||||
uses: peter-evans/repository-dispatch@v3
|
uses: peter-evans/repository-dispatch@v4
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.DEPLOY_TOKEN }}
|
token: ${{ secrets.DEPLOY_TOKEN }}
|
||||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
ref: ${{ github.ref_name || 'master' }}
|
ref: ${{ github.ref_name || 'master' }}
|
||||||
|
|
||||||
@@ -45,7 +45,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Trigger deploy workflow
|
- name: Trigger deploy workflow
|
||||||
uses: peter-evans/repository-dispatch@v3
|
uses: peter-evans/repository-dispatch@v4
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.DEPLOY_TOKEN }}
|
token: ${{ secrets.DEPLOY_TOKEN }}
|
||||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||||
|
|||||||
2
.github/workflows/platform-backend-ci.yml
vendored
2
.github/workflows/platform-backend-ci.yml
vendored
@@ -68,7 +68,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
submodules: true
|
submodules: true
|
||||||
|
|||||||
@@ -82,7 +82,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Dispatch Deploy Event
|
- name: Dispatch Deploy Event
|
||||||
if: steps.check_status.outputs.should_deploy == 'true'
|
if: steps.check_status.outputs.should_deploy == 'true'
|
||||||
uses: peter-evans/repository-dispatch@v3
|
uses: peter-evans/repository-dispatch@v4
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||||
@@ -110,7 +110,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Dispatch Undeploy Event (from comment)
|
- name: Dispatch Undeploy Event (from comment)
|
||||||
if: steps.check_status.outputs.should_undeploy == 'true'
|
if: steps.check_status.outputs.should_undeploy == 'true'
|
||||||
uses: peter-evans/repository-dispatch@v3
|
uses: peter-evans/repository-dispatch@v4
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||||
@@ -168,7 +168,7 @@ jobs:
|
|||||||
github.event_name == 'pull_request' &&
|
github.event_name == 'pull_request' &&
|
||||||
github.event.action == 'closed' &&
|
github.event.action == 'closed' &&
|
||||||
steps.check_pr_close.outputs.should_undeploy == 'true'
|
steps.check_pr_close.outputs.should_undeploy == 'true'
|
||||||
uses: peter-evans/repository-dispatch@v3
|
uses: peter-evans/repository-dispatch@v4
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||||
|
|||||||
189
.github/workflows/platform-frontend-ci.yml
vendored
189
.github/workflows/platform-frontend-ci.yml
vendored
@@ -26,12 +26,11 @@ jobs:
|
|||||||
setup:
|
setup:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
cache-key: ${{ steps.cache-key.outputs.key }}
|
|
||||||
components-changed: ${{ steps.filter.outputs.components }}
|
components-changed: ${{ steps.filter.outputs.components }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
- name: Check for component changes
|
- name: Check for component changes
|
||||||
uses: dorny/paths-filter@v3
|
uses: dorny/paths-filter@v3
|
||||||
@@ -41,28 +40,17 @@ jobs:
|
|||||||
components:
|
components:
|
||||||
- 'autogpt_platform/frontend/src/components/**'
|
- 'autogpt_platform/frontend/src/components/**'
|
||||||
|
|
||||||
- name: Set up Node.js
|
|
||||||
uses: actions/setup-node@v6
|
|
||||||
with:
|
|
||||||
node-version: "22.18.0"
|
|
||||||
|
|
||||||
- name: Enable corepack
|
- name: Enable corepack
|
||||||
run: corepack enable
|
run: corepack enable
|
||||||
|
|
||||||
- name: Generate cache key
|
- name: Set up Node
|
||||||
id: cache-key
|
uses: actions/setup-node@v6
|
||||||
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Cache dependencies
|
|
||||||
uses: actions/cache@v5
|
|
||||||
with:
|
with:
|
||||||
path: ~/.pnpm-store
|
node-version: "22.18.0"
|
||||||
key: ${{ steps.cache-key.outputs.key }}
|
cache: "pnpm"
|
||||||
restore-keys: |
|
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
|
||||||
${{ runner.os }}-pnpm-
|
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies to populate cache
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
@@ -71,24 +59,17 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
- name: Set up Node.js
|
|
||||||
uses: actions/setup-node@v6
|
|
||||||
with:
|
|
||||||
node-version: "22.18.0"
|
|
||||||
|
|
||||||
- name: Enable corepack
|
- name: Enable corepack
|
||||||
run: corepack enable
|
run: corepack enable
|
||||||
|
|
||||||
- name: Restore dependencies cache
|
- name: Set up Node
|
||||||
uses: actions/cache@v5
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
path: ~/.pnpm-store
|
node-version: "22.18.0"
|
||||||
key: ${{ needs.setup.outputs.cache-key }}
|
cache: "pnpm"
|
||||||
restore-keys: |
|
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
|
||||||
${{ runner.os }}-pnpm-
|
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
@@ -107,26 +88,19 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Set up Node.js
|
|
||||||
uses: actions/setup-node@v6
|
|
||||||
with:
|
|
||||||
node-version: "22.18.0"
|
|
||||||
|
|
||||||
- name: Enable corepack
|
- name: Enable corepack
|
||||||
run: corepack enable
|
run: corepack enable
|
||||||
|
|
||||||
- name: Restore dependencies cache
|
- name: Set up Node
|
||||||
uses: actions/cache@v5
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
path: ~/.pnpm-store
|
node-version: "22.18.0"
|
||||||
key: ${{ needs.setup.outputs.cache-key }}
|
cache: "pnpm"
|
||||||
restore-keys: |
|
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
|
||||||
${{ runner.os }}-pnpm-
|
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
@@ -141,30 +115,20 @@ jobs:
|
|||||||
exitOnceUploaded: true
|
exitOnceUploaded: true
|
||||||
|
|
||||||
e2e_test:
|
e2e_test:
|
||||||
|
name: end-to-end tests
|
||||||
runs-on: big-boi
|
runs-on: big-boi
|
||||||
needs: setup
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
- name: Set up Node.js
|
- name: Set up Platform - Copy default supabase .env
|
||||||
uses: actions/setup-node@v6
|
|
||||||
with:
|
|
||||||
node-version: "22.18.0"
|
|
||||||
|
|
||||||
- name: Enable corepack
|
|
||||||
run: corepack enable
|
|
||||||
|
|
||||||
- name: Copy default supabase .env
|
|
||||||
run: |
|
run: |
|
||||||
cp ../.env.default ../.env
|
cp ../.env.default ../.env
|
||||||
|
|
||||||
- name: Copy backend .env and set OpenAI API key
|
- name: Set up Platform - Copy backend .env and set OpenAI API key
|
||||||
run: |
|
run: |
|
||||||
cp ../backend/.env.default ../backend/.env
|
cp ../backend/.env.default ../backend/.env
|
||||||
echo "OPENAI_INTERNAL_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> ../backend/.env
|
echo "OPENAI_INTERNAL_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> ../backend/.env
|
||||||
@@ -172,77 +136,87 @@ jobs:
|
|||||||
# Used by E2E test data script to generate embeddings for approved store agents
|
# Used by E2E test data script to generate embeddings for approved store agents
|
||||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Platform - Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
- name: Cache Docker layers
|
|
||||||
uses: actions/cache@v5
|
|
||||||
with:
|
with:
|
||||||
path: /tmp/.buildx-cache
|
driver: docker-container
|
||||||
key: ${{ runner.os }}-buildx-frontend-test-${{ hashFiles('autogpt_platform/docker-compose.yml', 'autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/pyproject.toml', 'autogpt_platform/backend/poetry.lock') }}
|
driver-opts: network=host
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-buildx-frontend-test-
|
|
||||||
|
|
||||||
- name: Run docker compose
|
- name: Set up Platform - Expose GHA cache to docker buildx CLI
|
||||||
|
uses: crazy-max/ghaction-github-runtime@v3
|
||||||
|
|
||||||
|
- name: Set up Platform - Build Docker images (with cache)
|
||||||
|
working-directory: autogpt_platform
|
||||||
run: |
|
run: |
|
||||||
NEXT_PUBLIC_PW_TEST=true docker compose -f ../docker-compose.yml up -d
|
pip install pyyaml
|
||||||
|
|
||||||
|
# Resolve extends and generate a flat compose file that bake can understand
|
||||||
|
docker compose -f docker-compose.yml config > docker-compose.resolved.yml
|
||||||
|
|
||||||
|
# Add cache configuration to the resolved compose file
|
||||||
|
python ../.github/workflows/scripts/docker-ci-fix-compose-build-cache.py \
|
||||||
|
--source docker-compose.resolved.yml \
|
||||||
|
--cache-from "type=gha" \
|
||||||
|
--cache-to "type=gha,mode=max" \
|
||||||
|
--backend-scope "platform-backend-${{ hashFiles('autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/poetry.lock', 'autogpt_platform/backend/backend') }}" \
|
||||||
|
--frontend-scope "platform-frontend-${{ hashFiles('autogpt_platform/frontend/Dockerfile', 'autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/src') }}"
|
||||||
|
|
||||||
|
# Build with bake using the resolved compose file (now includes cache config)
|
||||||
|
docker buildx bake --allow=fs.read=.. -f docker-compose.resolved.yml --load
|
||||||
env:
|
env:
|
||||||
DOCKER_BUILDKIT: 1
|
NEXT_PUBLIC_PW_TEST: true
|
||||||
BUILDX_CACHE_FROM: type=local,src=/tmp/.buildx-cache
|
|
||||||
BUILDX_CACHE_TO: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
|
||||||
|
|
||||||
- name: Move cache
|
- name: Set up Platform - Run (docker compose up)
|
||||||
run: |
|
run: docker compose -f ../docker-compose.resolved.yml up -d --no-build
|
||||||
rm -rf /tmp/.buildx-cache
|
env:
|
||||||
if [ -d "/tmp/.buildx-cache-new" ]; then
|
NEXT_PUBLIC_PW_TEST: true
|
||||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Wait for services to be ready
|
- name: Set up Platform - Wait for services to be ready
|
||||||
run: |
|
run: |
|
||||||
echo "Waiting for rest_server to be ready..."
|
echo "Waiting for rest_server to be ready..."
|
||||||
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
||||||
echo "Waiting for database to be ready..."
|
echo "Waiting for database to be ready..."
|
||||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
|
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
|
||||||
|
|
||||||
- name: Create E2E test data
|
- name: Set up tests - Create E2E test data
|
||||||
run: |
|
run: |
|
||||||
echo "Creating E2E test data..."
|
echo "Creating E2E test data..."
|
||||||
# First try to run the script from inside the container
|
# First try to run the script from inside the container
|
||||||
if docker compose -f ../docker-compose.yml exec -T rest_server test -f /app/autogpt_platform/backend/test/e2e_test_data.py; then
|
if docker compose -f ../docker-compose.resolved.yml exec -T rest_server test -f /app/autogpt_platform/backend/test/e2e_test_data.py; then
|
||||||
echo "✅ Found e2e_test_data.py in container, running it..."
|
echo "✅ Found e2e_test_data.py in container, running it..."
|
||||||
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python backend/test/e2e_test_data.py" || {
|
docker compose -f ../docker-compose.resolved.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python backend/test/e2e_test_data.py" || {
|
||||||
echo "❌ E2E test data creation failed!"
|
echo "❌ E2E test data creation failed!"
|
||||||
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
|
docker compose -f ../docker-compose.resolved.yml logs --tail=50 rest_server
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
echo "⚠️ e2e_test_data.py not found in container, copying and running..."
|
echo "⚠️ e2e_test_data.py not found in container, copying and running..."
|
||||||
# Copy the script into the container and run it
|
# Copy the script into the container and run it
|
||||||
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.yml ps -q rest_server):/tmp/e2e_test_data.py || {
|
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.resolved.yml ps -q rest_server):/tmp/e2e_test_data.py || {
|
||||||
echo "❌ Failed to copy script to container"
|
echo "❌ Failed to copy script to container"
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
|
docker compose -f ../docker-compose.resolved.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
|
||||||
echo "❌ E2E test data creation failed!"
|
echo "❌ E2E test data creation failed!"
|
||||||
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
|
docker compose -f ../docker-compose.resolved.yml logs --tail=50 rest_server
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Restore dependencies cache
|
- name: Set up tests - Enable corepack
|
||||||
uses: actions/cache@v5
|
run: corepack enable
|
||||||
with:
|
|
||||||
path: ~/.pnpm-store
|
|
||||||
key: ${{ needs.setup.outputs.cache-key }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
|
||||||
${{ runner.os }}-pnpm-
|
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Set up tests - Set up Node
|
||||||
|
uses: actions/setup-node@v6
|
||||||
|
with:
|
||||||
|
node-version: "22.18.0"
|
||||||
|
cache: "pnpm"
|
||||||
|
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||||
|
|
||||||
|
- name: Set up tests - Install dependencies
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
|
|
||||||
- name: Install Browser 'chromium'
|
- name: Set up tests - Install browser 'chromium'
|
||||||
run: pnpm playwright install --with-deps chromium
|
run: pnpm playwright install --with-deps chromium
|
||||||
|
|
||||||
- name: Run Playwright tests
|
- name: Run Playwright tests
|
||||||
@@ -269,7 +243,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Print Final Docker Compose logs
|
- name: Print Final Docker Compose logs
|
||||||
if: always()
|
if: always()
|
||||||
run: docker compose -f ../docker-compose.yml logs
|
run: docker compose -f ../docker-compose.resolved.yml logs
|
||||||
|
|
||||||
integration_test:
|
integration_test:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -277,26 +251,19 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
- name: Set up Node.js
|
|
||||||
uses: actions/setup-node@v6
|
|
||||||
with:
|
|
||||||
node-version: "22.18.0"
|
|
||||||
|
|
||||||
- name: Enable corepack
|
- name: Enable corepack
|
||||||
run: corepack enable
|
run: corepack enable
|
||||||
|
|
||||||
- name: Restore dependencies cache
|
- name: Set up Node
|
||||||
uses: actions/cache@v5
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
path: ~/.pnpm-store
|
node-version: "22.18.0"
|
||||||
key: ${{ needs.setup.outputs.cache-key }}
|
cache: "pnpm"
|
||||||
restore-keys: |
|
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
|
||||||
${{ runner.os }}-pnpm-
|
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
|
|||||||
4
.github/workflows/platform-fullstack-ci.yml
vendored
4
.github/workflows/platform-fullstack-ci.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
- name: Set up Node.js
|
- name: Set up Node.js
|
||||||
uses: actions/setup-node@v6
|
uses: actions/setup-node@v6
|
||||||
@@ -63,7 +63,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/repo-workflow-checker.yml
vendored
2
.github/workflows/repo-workflow-checker.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
# - name: Wait some time for all actions to start
|
# - name: Wait some time for all actions to start
|
||||||
# run: sleep 30
|
# run: sleep 30
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v6
|
||||||
# with:
|
# with:
|
||||||
# fetch-depth: 0
|
# fetch-depth: 0
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
|
|||||||
154
.github/workflows/scripts/docker-ci-fix-compose-build-cache.py
vendored
Normal file
154
.github/workflows/scripts/docker-ci-fix-compose-build-cache.py
vendored
Normal file
@@ -0,0 +1,154 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Add cache configuration to a resolved docker-compose file for all services
|
||||||
|
that have a build key, and ensure image names match what docker compose expects.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Add cache config to a resolved compose file"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--source",
|
||||||
|
required=True,
|
||||||
|
help="Source compose file to read (should be output of `docker compose config`)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--cache-from",
|
||||||
|
default="type=gha",
|
||||||
|
help="Cache source configuration",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--cache-to",
|
||||||
|
default="type=gha,mode=max",
|
||||||
|
help="Cache destination configuration",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--backend-scope",
|
||||||
|
default="",
|
||||||
|
help="GHA cache scope for backend services (e.g., platform-backend-{hash})",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--frontend-scope",
|
||||||
|
default="",
|
||||||
|
help="GHA cache scope for frontend service (e.g., platform-frontend-{hash})",
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
with open(args.source, "r") as f:
|
||||||
|
compose = yaml.safe_load(f)
|
||||||
|
|
||||||
|
# Get project name from compose file or default
|
||||||
|
project_name = compose.get("name", "autogpt_platform")
|
||||||
|
|
||||||
|
def get_image_name(dockerfile: str, target: str) -> str:
|
||||||
|
"""Generate image name based on Dockerfile folder and build target."""
|
||||||
|
dockerfile_parts = dockerfile.replace("\\", "/").split("/")
|
||||||
|
if len(dockerfile_parts) >= 2:
|
||||||
|
folder_name = dockerfile_parts[-2] # e.g., "backend" or "frontend"
|
||||||
|
else:
|
||||||
|
folder_name = "app"
|
||||||
|
return f"{project_name}-{folder_name}:{target}"
|
||||||
|
|
||||||
|
def get_build_key(dockerfile: str, target: str) -> str:
|
||||||
|
"""Generate a unique key for a Dockerfile+target combination."""
|
||||||
|
return f"{dockerfile}:{target}"
|
||||||
|
|
||||||
|
# First pass: collect all services with build configs and identify duplicates
|
||||||
|
# Track which (dockerfile, target) combinations we've seen
|
||||||
|
build_key_to_first_service: dict[str, str] = {}
|
||||||
|
services_to_build: list[str] = []
|
||||||
|
services_to_dedupe: list[str] = []
|
||||||
|
|
||||||
|
for service_name, service_config in compose.get("services", {}).items():
|
||||||
|
if "build" not in service_config:
|
||||||
|
continue
|
||||||
|
|
||||||
|
build_config = service_config["build"]
|
||||||
|
dockerfile = build_config.get("dockerfile", "Dockerfile")
|
||||||
|
target = build_config.get("target", "default")
|
||||||
|
build_key = get_build_key(dockerfile, target)
|
||||||
|
|
||||||
|
if build_key not in build_key_to_first_service:
|
||||||
|
# First service with this build config - it will do the actual build
|
||||||
|
build_key_to_first_service[build_key] = service_name
|
||||||
|
services_to_build.append(service_name)
|
||||||
|
else:
|
||||||
|
# Duplicate - will just use the image from the first service
|
||||||
|
services_to_dedupe.append(service_name)
|
||||||
|
|
||||||
|
# Second pass: configure builds and deduplicate
|
||||||
|
modified_services = []
|
||||||
|
for service_name, service_config in compose.get("services", {}).items():
|
||||||
|
if "build" not in service_config:
|
||||||
|
continue
|
||||||
|
|
||||||
|
build_config = service_config["build"]
|
||||||
|
dockerfile = build_config.get("dockerfile", "Dockerfile")
|
||||||
|
target = build_config.get("target", "latest")
|
||||||
|
image_name = get_image_name(dockerfile, target)
|
||||||
|
|
||||||
|
# Set image name for all services (needed for both builders and deduped)
|
||||||
|
service_config["image"] = image_name
|
||||||
|
|
||||||
|
if service_name in services_to_dedupe:
|
||||||
|
# Remove build config - this service will use the pre-built image
|
||||||
|
del service_config["build"]
|
||||||
|
continue
|
||||||
|
|
||||||
|
# This service will do the actual build - add cache config
|
||||||
|
cache_from = args.cache_from
|
||||||
|
cache_to = args.cache_to
|
||||||
|
|
||||||
|
# Determine scope based on Dockerfile path and target
|
||||||
|
# Each unique (dockerfile, target) combination gets its own cache scope
|
||||||
|
if "type=gha" in args.cache_from or "type=gha" in args.cache_to:
|
||||||
|
if "frontend" in dockerfile:
|
||||||
|
base_scope = args.frontend_scope
|
||||||
|
elif "backend" in dockerfile:
|
||||||
|
base_scope = args.backend_scope
|
||||||
|
else:
|
||||||
|
# Skip services that don't clearly match frontend/backend
|
||||||
|
continue
|
||||||
|
|
||||||
|
if base_scope:
|
||||||
|
# Append target to scope to differentiate e.g. migrate vs server
|
||||||
|
scope = f"{base_scope}-{target}"
|
||||||
|
if "type=gha" in args.cache_from:
|
||||||
|
cache_from = f"{args.cache_from},scope={scope}"
|
||||||
|
if "type=gha" in args.cache_to:
|
||||||
|
cache_to = f"{args.cache_to},scope={scope}"
|
||||||
|
|
||||||
|
build_config["cache_from"] = [cache_from]
|
||||||
|
build_config["cache_to"] = [cache_to]
|
||||||
|
modified_services.append(service_name)
|
||||||
|
|
||||||
|
# Write back to the same file
|
||||||
|
with open(args.source, "w") as f:
|
||||||
|
yaml.dump(compose, f, default_flow_style=False, sort_keys=False)
|
||||||
|
|
||||||
|
print(f"Added cache config to {len(modified_services)} services in {args.source}:")
|
||||||
|
for svc in modified_services:
|
||||||
|
svc_config = compose["services"][svc]
|
||||||
|
build_cfg = svc_config.get("build", {})
|
||||||
|
cache_from_val = build_cfg.get("cache_from", ["none"])[0]
|
||||||
|
cache_to_val = build_cfg.get("cache_to", ["none"])[0]
|
||||||
|
print(f" - {svc}")
|
||||||
|
print(f" image: {svc_config.get('image', 'N/A')}")
|
||||||
|
print(f" cache_from: {cache_from_val}")
|
||||||
|
print(f" cache_to: {cache_to_val}")
|
||||||
|
if services_to_dedupe:
|
||||||
|
print(
|
||||||
|
f"Deduplicated {len(services_to_dedupe)} services (will use pre-built images):"
|
||||||
|
)
|
||||||
|
for svc in services_to_dedupe:
|
||||||
|
print(f" - {svc} -> {compose['services'][svc].get('image', 'N/A')}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -1,3 +1,5 @@
|
|||||||
|
# ============================ DEPENDENCY BUILDER ============================ #
|
||||||
|
|
||||||
FROM debian:13-slim AS builder
|
FROM debian:13-slim AS builder
|
||||||
|
|
||||||
# Set environment variables
|
# Set environment variables
|
||||||
@@ -51,7 +53,9 @@ COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/parti
|
|||||||
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
||||||
RUN poetry run prisma generate && poetry run gen-prisma-stub
|
RUN poetry run prisma generate && poetry run gen-prisma-stub
|
||||||
|
|
||||||
FROM debian:13-slim AS server_dependencies
|
# ============================== BACKEND SERVER ============================== #
|
||||||
|
|
||||||
|
FROM debian:13-slim AS server
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
@@ -63,15 +67,14 @@ ENV POETRY_HOME=/opt/poetry \
|
|||||||
ENV PATH=/opt/poetry/bin:$PATH
|
ENV PATH=/opt/poetry/bin:$PATH
|
||||||
|
|
||||||
# Install Python, FFmpeg, and ImageMagick (required for video processing blocks)
|
# Install Python, FFmpeg, and ImageMagick (required for video processing blocks)
|
||||||
RUN apt-get update && apt-get install -y \
|
# Using --no-install-recommends saves ~650MB by skipping unnecessary deps like llvm, mesa, etc.
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
python3.13 \
|
python3.13 \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
ffmpeg \
|
ffmpeg \
|
||||||
imagemagick \
|
imagemagick \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Copy only necessary files from builder
|
|
||||||
COPY --from=builder /app /app
|
|
||||||
COPY --from=builder /usr/local/lib/python3* /usr/local/lib/python3*
|
COPY --from=builder /usr/local/lib/python3* /usr/local/lib/python3*
|
||||||
COPY --from=builder /usr/local/bin/poetry /usr/local/bin/poetry
|
COPY --from=builder /usr/local/bin/poetry /usr/local/bin/poetry
|
||||||
# Copy Node.js installation for Prisma
|
# Copy Node.js installation for Prisma
|
||||||
@@ -81,30 +84,54 @@ COPY --from=builder /usr/bin/npm /usr/bin/npm
|
|||||||
COPY --from=builder /usr/bin/npx /usr/bin/npx
|
COPY --from=builder /usr/bin/npx /usr/bin/npx
|
||||||
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
||||||
|
|
||||||
ENV PATH="/app/autogpt_platform/backend/.venv/bin:$PATH"
|
|
||||||
|
|
||||||
RUN mkdir -p /app/autogpt_platform/autogpt_libs
|
|
||||||
RUN mkdir -p /app/autogpt_platform/backend
|
|
||||||
|
|
||||||
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
|
|
||||||
|
|
||||||
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml /app/autogpt_platform/backend/
|
|
||||||
|
|
||||||
WORKDIR /app/autogpt_platform/backend
|
WORKDIR /app/autogpt_platform/backend
|
||||||
|
|
||||||
FROM server_dependencies AS migrate
|
# Copy only the .venv from builder (not the entire /app directory)
|
||||||
|
# The .venv includes the generated Prisma client
|
||||||
|
COPY --from=builder /app/autogpt_platform/backend/.venv ./.venv
|
||||||
|
ENV PATH="/app/autogpt_platform/backend/.venv/bin:$PATH"
|
||||||
|
|
||||||
# Migration stage only needs schema and migrations - much lighter than full backend
|
# Copy dependency files + autogpt_libs (path dependency)
|
||||||
COPY autogpt_platform/backend/schema.prisma /app/autogpt_platform/backend/
|
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
|
||||||
COPY autogpt_platform/backend/backend/data/partial_types.py /app/autogpt_platform/backend/backend/data/partial_types.py
|
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml ./
|
||||||
COPY autogpt_platform/backend/migrations /app/autogpt_platform/backend/migrations
|
|
||||||
|
|
||||||
FROM server_dependencies AS server
|
# Copy backend code + docs (for Copilot docs search)
|
||||||
|
COPY autogpt_platform/backend ./
|
||||||
COPY autogpt_platform/backend /app/autogpt_platform/backend
|
|
||||||
COPY docs /app/docs
|
COPY docs /app/docs
|
||||||
RUN poetry install --no-ansi --only-root
|
RUN poetry install --no-ansi --only-root
|
||||||
|
|
||||||
ENV PORT=8000
|
ENV PORT=8000
|
||||||
|
|
||||||
CMD ["poetry", "run", "rest"]
|
CMD ["poetry", "run", "rest"]
|
||||||
|
|
||||||
|
# =============================== DB MIGRATOR =============================== #
|
||||||
|
|
||||||
|
# Lightweight migrate stage - only needs Prisma CLI, not full Python environment
|
||||||
|
FROM debian:13-slim AS migrate
|
||||||
|
|
||||||
|
WORKDIR /app/autogpt_platform/backend
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
# Install only what's needed for prisma migrate: Node.js and minimal Python for prisma-python
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
python3.13 \
|
||||||
|
python3-pip \
|
||||||
|
ca-certificates \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Copy Node.js from builder (needed for Prisma CLI)
|
||||||
|
COPY --from=builder /usr/bin/node /usr/bin/node
|
||||||
|
COPY --from=builder /usr/lib/node_modules /usr/lib/node_modules
|
||||||
|
COPY --from=builder /usr/bin/npm /usr/bin/npm
|
||||||
|
|
||||||
|
# Copy Prisma binaries
|
||||||
|
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
||||||
|
|
||||||
|
# Install prisma-client-py directly (much smaller than copying full venv)
|
||||||
|
RUN pip3 install prisma>=0.15.0 --break-system-packages
|
||||||
|
|
||||||
|
COPY autogpt_platform/backend/schema.prisma ./
|
||||||
|
COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/partial_types.py
|
||||||
|
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
||||||
|
COPY autogpt_platform/backend/migrations ./migrations
|
||||||
|
|||||||
@@ -96,13 +96,7 @@ class ChatConfig(BaseSettings):
|
|||||||
# Extended thinking configuration for Claude models
|
# Extended thinking configuration for Claude models
|
||||||
thinking_enabled: bool = Field(
|
thinking_enabled: bool = Field(
|
||||||
default=True,
|
default=True,
|
||||||
description="Enable extended thinking for Claude models via OpenRouter",
|
description="Enable adaptive thinking for Claude models via OpenRouter",
|
||||||
)
|
|
||||||
thinking_budget_tokens: int = Field(
|
|
||||||
default=10000,
|
|
||||||
ge=1000,
|
|
||||||
le=100000,
|
|
||||||
description="Maximum tokens for extended thinking (budget_tokens for Claude)",
|
|
||||||
)
|
)
|
||||||
|
|
||||||
@field_validator("api_key", mode="before")
|
@field_validator("api_key", mode="before")
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import asyncio
|
|||||||
import logging
|
import logging
|
||||||
import uuid
|
import uuid
|
||||||
from datetime import UTC, datetime
|
from datetime import UTC, datetime
|
||||||
from typing import Any
|
from typing import Any, cast
|
||||||
from weakref import WeakValueDictionary
|
from weakref import WeakValueDictionary
|
||||||
|
|
||||||
from openai.types.chat import (
|
from openai.types.chat import (
|
||||||
@@ -104,6 +104,26 @@ class ChatSession(BaseModel):
|
|||||||
successful_agent_runs: dict[str, int] = {}
|
successful_agent_runs: dict[str, int] = {}
|
||||||
successful_agent_schedules: dict[str, int] = {}
|
successful_agent_schedules: dict[str, int] = {}
|
||||||
|
|
||||||
|
def add_tool_call_to_current_turn(self, tool_call: dict) -> None:
|
||||||
|
"""Attach a tool_call to the current turn's assistant message.
|
||||||
|
|
||||||
|
Searches backwards for the most recent assistant message (stopping at
|
||||||
|
any user message boundary). If found, appends the tool_call to it.
|
||||||
|
Otherwise creates a new assistant message with the tool_call.
|
||||||
|
"""
|
||||||
|
for msg in reversed(self.messages):
|
||||||
|
if msg.role == "user":
|
||||||
|
break
|
||||||
|
if msg.role == "assistant":
|
||||||
|
if not msg.tool_calls:
|
||||||
|
msg.tool_calls = []
|
||||||
|
msg.tool_calls.append(tool_call)
|
||||||
|
return
|
||||||
|
|
||||||
|
self.messages.append(
|
||||||
|
ChatMessage(role="assistant", content="", tool_calls=[tool_call])
|
||||||
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def new(user_id: str) -> "ChatSession":
|
def new(user_id: str) -> "ChatSession":
|
||||||
return ChatSession(
|
return ChatSession(
|
||||||
@@ -172,6 +192,47 @@ class ChatSession(BaseModel):
|
|||||||
successful_agent_schedules=successful_agent_schedules,
|
successful_agent_schedules=successful_agent_schedules,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _merge_consecutive_assistant_messages(
|
||||||
|
messages: list[ChatCompletionMessageParam],
|
||||||
|
) -> list[ChatCompletionMessageParam]:
|
||||||
|
"""Merge consecutive assistant messages into single messages.
|
||||||
|
|
||||||
|
Long-running tool flows can create split assistant messages: one with
|
||||||
|
text content and another with tool_calls. Anthropic's API requires
|
||||||
|
tool_result blocks to reference a tool_use in the immediately preceding
|
||||||
|
assistant message, so these splits cause 400 errors via OpenRouter.
|
||||||
|
"""
|
||||||
|
if len(messages) < 2:
|
||||||
|
return messages
|
||||||
|
|
||||||
|
result: list[ChatCompletionMessageParam] = [messages[0]]
|
||||||
|
for msg in messages[1:]:
|
||||||
|
prev = result[-1]
|
||||||
|
if prev.get("role") != "assistant" or msg.get("role") != "assistant":
|
||||||
|
result.append(msg)
|
||||||
|
continue
|
||||||
|
|
||||||
|
prev = cast(ChatCompletionAssistantMessageParam, prev)
|
||||||
|
curr = cast(ChatCompletionAssistantMessageParam, msg)
|
||||||
|
|
||||||
|
curr_content = curr.get("content") or ""
|
||||||
|
if curr_content:
|
||||||
|
prev_content = prev.get("content") or ""
|
||||||
|
prev["content"] = (
|
||||||
|
f"{prev_content}\n{curr_content}" if prev_content else curr_content
|
||||||
|
)
|
||||||
|
|
||||||
|
curr_tool_calls = curr.get("tool_calls")
|
||||||
|
if curr_tool_calls:
|
||||||
|
prev_tool_calls = prev.get("tool_calls")
|
||||||
|
prev["tool_calls"] = (
|
||||||
|
list(prev_tool_calls) + list(curr_tool_calls)
|
||||||
|
if prev_tool_calls
|
||||||
|
else list(curr_tool_calls)
|
||||||
|
)
|
||||||
|
return result
|
||||||
|
|
||||||
def to_openai_messages(self) -> list[ChatCompletionMessageParam]:
|
def to_openai_messages(self) -> list[ChatCompletionMessageParam]:
|
||||||
messages = []
|
messages = []
|
||||||
for message in self.messages:
|
for message in self.messages:
|
||||||
@@ -258,7 +319,7 @@ class ChatSession(BaseModel):
|
|||||||
name=message.name or "",
|
name=message.name or "",
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
return messages
|
return self._merge_consecutive_assistant_messages(messages)
|
||||||
|
|
||||||
|
|
||||||
async def _get_session_from_cache(session_id: str) -> ChatSession | None:
|
async def _get_session_from_cache(session_id: str) -> ChatSession | None:
|
||||||
|
|||||||
@@ -1,4 +1,16 @@
|
|||||||
|
from typing import cast
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
from openai.types.chat import (
|
||||||
|
ChatCompletionAssistantMessageParam,
|
||||||
|
ChatCompletionMessageParam,
|
||||||
|
ChatCompletionToolMessageParam,
|
||||||
|
ChatCompletionUserMessageParam,
|
||||||
|
)
|
||||||
|
from openai.types.chat.chat_completion_message_tool_call_param import (
|
||||||
|
ChatCompletionMessageToolCallParam,
|
||||||
|
Function,
|
||||||
|
)
|
||||||
|
|
||||||
from .model import (
|
from .model import (
|
||||||
ChatMessage,
|
ChatMessage,
|
||||||
@@ -117,3 +129,205 @@ async def test_chatsession_db_storage(setup_test_user, test_user_id):
|
|||||||
loaded.tool_calls is not None
|
loaded.tool_calls is not None
|
||||||
), f"Tool calls missing for {orig.role} message"
|
), f"Tool calls missing for {orig.role} message"
|
||||||
assert len(orig.tool_calls) == len(loaded.tool_calls)
|
assert len(orig.tool_calls) == len(loaded.tool_calls)
|
||||||
|
|
||||||
|
|
||||||
|
# --------------------------------------------------------------------------- #
|
||||||
|
# _merge_consecutive_assistant_messages #
|
||||||
|
# --------------------------------------------------------------------------- #
|
||||||
|
|
||||||
|
_tc = ChatCompletionMessageToolCallParam(
|
||||||
|
id="tc1", type="function", function=Function(name="do_stuff", arguments="{}")
|
||||||
|
)
|
||||||
|
_tc2 = ChatCompletionMessageToolCallParam(
|
||||||
|
id="tc2", type="function", function=Function(name="other", arguments="{}")
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_merge_noop_when_no_consecutive_assistants():
|
||||||
|
"""Messages without consecutive assistants are returned unchanged."""
|
||||||
|
msgs = [
|
||||||
|
ChatCompletionUserMessageParam(role="user", content="hi"),
|
||||||
|
ChatCompletionAssistantMessageParam(role="assistant", content="hello"),
|
||||||
|
ChatCompletionUserMessageParam(role="user", content="bye"),
|
||||||
|
]
|
||||||
|
merged = ChatSession._merge_consecutive_assistant_messages(msgs)
|
||||||
|
assert len(merged) == 3
|
||||||
|
assert [m["role"] for m in merged] == ["user", "assistant", "user"]
|
||||||
|
|
||||||
|
|
||||||
|
def test_merge_splits_text_and_tool_calls():
|
||||||
|
"""The exact bug scenario: text-only assistant followed by tool_calls-only assistant."""
|
||||||
|
msgs = [
|
||||||
|
ChatCompletionUserMessageParam(role="user", content="build agent"),
|
||||||
|
ChatCompletionAssistantMessageParam(
|
||||||
|
role="assistant", content="Let me build that"
|
||||||
|
),
|
||||||
|
ChatCompletionAssistantMessageParam(
|
||||||
|
role="assistant", content="", tool_calls=[_tc]
|
||||||
|
),
|
||||||
|
ChatCompletionToolMessageParam(role="tool", content="ok", tool_call_id="tc1"),
|
||||||
|
]
|
||||||
|
merged = ChatSession._merge_consecutive_assistant_messages(msgs)
|
||||||
|
|
||||||
|
assert len(merged) == 3
|
||||||
|
assert merged[0]["role"] == "user"
|
||||||
|
assert merged[2]["role"] == "tool"
|
||||||
|
a = cast(ChatCompletionAssistantMessageParam, merged[1])
|
||||||
|
assert a["role"] == "assistant"
|
||||||
|
assert a.get("content") == "Let me build that"
|
||||||
|
assert a.get("tool_calls") == [_tc]
|
||||||
|
|
||||||
|
|
||||||
|
def test_merge_combines_tool_calls_from_both():
|
||||||
|
"""Both consecutive assistants have tool_calls — they get merged."""
|
||||||
|
msgs: list[ChatCompletionAssistantMessageParam] = [
|
||||||
|
ChatCompletionAssistantMessageParam(
|
||||||
|
role="assistant", content="text", tool_calls=[_tc]
|
||||||
|
),
|
||||||
|
ChatCompletionAssistantMessageParam(
|
||||||
|
role="assistant", content="", tool_calls=[_tc2]
|
||||||
|
),
|
||||||
|
]
|
||||||
|
merged = ChatSession._merge_consecutive_assistant_messages(msgs) # type: ignore[arg-type]
|
||||||
|
|
||||||
|
assert len(merged) == 1
|
||||||
|
a = cast(ChatCompletionAssistantMessageParam, merged[0])
|
||||||
|
assert a.get("tool_calls") == [_tc, _tc2]
|
||||||
|
assert a.get("content") == "text"
|
||||||
|
|
||||||
|
|
||||||
|
def test_merge_three_consecutive_assistants():
|
||||||
|
"""Three consecutive assistants collapse into one."""
|
||||||
|
msgs: list[ChatCompletionAssistantMessageParam] = [
|
||||||
|
ChatCompletionAssistantMessageParam(role="assistant", content="a"),
|
||||||
|
ChatCompletionAssistantMessageParam(role="assistant", content="b"),
|
||||||
|
ChatCompletionAssistantMessageParam(
|
||||||
|
role="assistant", content="", tool_calls=[_tc]
|
||||||
|
),
|
||||||
|
]
|
||||||
|
merged = ChatSession._merge_consecutive_assistant_messages(msgs) # type: ignore[arg-type]
|
||||||
|
|
||||||
|
assert len(merged) == 1
|
||||||
|
a = cast(ChatCompletionAssistantMessageParam, merged[0])
|
||||||
|
assert a.get("content") == "a\nb"
|
||||||
|
assert a.get("tool_calls") == [_tc]
|
||||||
|
|
||||||
|
|
||||||
|
def test_merge_empty_and_single_message():
|
||||||
|
"""Edge cases: empty list and single message."""
|
||||||
|
assert ChatSession._merge_consecutive_assistant_messages([]) == []
|
||||||
|
|
||||||
|
single: list[ChatCompletionMessageParam] = [
|
||||||
|
ChatCompletionUserMessageParam(role="user", content="hi")
|
||||||
|
]
|
||||||
|
assert ChatSession._merge_consecutive_assistant_messages(single) == single
|
||||||
|
|
||||||
|
|
||||||
|
# --------------------------------------------------------------------------- #
|
||||||
|
# add_tool_call_to_current_turn #
|
||||||
|
# --------------------------------------------------------------------------- #
|
||||||
|
|
||||||
|
_raw_tc = {
|
||||||
|
"id": "tc1",
|
||||||
|
"type": "function",
|
||||||
|
"function": {"name": "f", "arguments": "{}"},
|
||||||
|
}
|
||||||
|
_raw_tc2 = {
|
||||||
|
"id": "tc2",
|
||||||
|
"type": "function",
|
||||||
|
"function": {"name": "g", "arguments": "{}"},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def test_add_tool_call_appends_to_existing_assistant():
|
||||||
|
"""When the last assistant is from the current turn, tool_call is added to it."""
|
||||||
|
session = ChatSession.new(user_id="u")
|
||||||
|
session.messages = [
|
||||||
|
ChatMessage(role="user", content="hi"),
|
||||||
|
ChatMessage(role="assistant", content="working on it"),
|
||||||
|
]
|
||||||
|
session.add_tool_call_to_current_turn(_raw_tc)
|
||||||
|
|
||||||
|
assert len(session.messages) == 2 # no new message created
|
||||||
|
assert session.messages[1].tool_calls == [_raw_tc]
|
||||||
|
|
||||||
|
|
||||||
|
def test_add_tool_call_creates_assistant_when_none_exists():
|
||||||
|
"""When there's no current-turn assistant, a new one is created."""
|
||||||
|
session = ChatSession.new(user_id="u")
|
||||||
|
session.messages = [
|
||||||
|
ChatMessage(role="user", content="hi"),
|
||||||
|
]
|
||||||
|
session.add_tool_call_to_current_turn(_raw_tc)
|
||||||
|
|
||||||
|
assert len(session.messages) == 2
|
||||||
|
assert session.messages[1].role == "assistant"
|
||||||
|
assert session.messages[1].tool_calls == [_raw_tc]
|
||||||
|
|
||||||
|
|
||||||
|
def test_add_tool_call_does_not_cross_user_boundary():
|
||||||
|
"""A user message acts as a boundary — previous assistant is not modified."""
|
||||||
|
session = ChatSession.new(user_id="u")
|
||||||
|
session.messages = [
|
||||||
|
ChatMessage(role="assistant", content="old turn"),
|
||||||
|
ChatMessage(role="user", content="new message"),
|
||||||
|
]
|
||||||
|
session.add_tool_call_to_current_turn(_raw_tc)
|
||||||
|
|
||||||
|
assert len(session.messages) == 3 # new assistant was created
|
||||||
|
assert session.messages[0].tool_calls is None # old assistant untouched
|
||||||
|
assert session.messages[2].role == "assistant"
|
||||||
|
assert session.messages[2].tool_calls == [_raw_tc]
|
||||||
|
|
||||||
|
|
||||||
|
def test_add_tool_call_multiple_times():
|
||||||
|
"""Multiple long-running tool calls accumulate on the same assistant."""
|
||||||
|
session = ChatSession.new(user_id="u")
|
||||||
|
session.messages = [
|
||||||
|
ChatMessage(role="user", content="hi"),
|
||||||
|
ChatMessage(role="assistant", content="doing stuff"),
|
||||||
|
]
|
||||||
|
session.add_tool_call_to_current_turn(_raw_tc)
|
||||||
|
# Simulate a pending tool result in between (like _yield_tool_call does)
|
||||||
|
session.messages.append(
|
||||||
|
ChatMessage(role="tool", content="pending", tool_call_id="tc1")
|
||||||
|
)
|
||||||
|
session.add_tool_call_to_current_turn(_raw_tc2)
|
||||||
|
|
||||||
|
assert len(session.messages) == 3 # user, assistant, tool — no extra assistant
|
||||||
|
assert session.messages[1].tool_calls == [_raw_tc, _raw_tc2]
|
||||||
|
|
||||||
|
|
||||||
|
def test_to_openai_messages_merges_split_assistants():
|
||||||
|
"""End-to-end: session with split assistants produces valid OpenAI messages."""
|
||||||
|
session = ChatSession.new(user_id="u")
|
||||||
|
session.messages = [
|
||||||
|
ChatMessage(role="user", content="build agent"),
|
||||||
|
ChatMessage(role="assistant", content="Let me build that"),
|
||||||
|
ChatMessage(
|
||||||
|
role="assistant",
|
||||||
|
content="",
|
||||||
|
tool_calls=[
|
||||||
|
{
|
||||||
|
"id": "tc1",
|
||||||
|
"type": "function",
|
||||||
|
"function": {"name": "create_agent", "arguments": "{}"},
|
||||||
|
}
|
||||||
|
],
|
||||||
|
),
|
||||||
|
ChatMessage(role="tool", content="done", tool_call_id="tc1"),
|
||||||
|
ChatMessage(role="assistant", content="Saved!"),
|
||||||
|
ChatMessage(role="user", content="show me an example run"),
|
||||||
|
]
|
||||||
|
openai_msgs = session.to_openai_messages()
|
||||||
|
|
||||||
|
# The two consecutive assistants at index 1,2 should be merged
|
||||||
|
roles = [m["role"] for m in openai_msgs]
|
||||||
|
assert roles == ["user", "assistant", "tool", "assistant", "user"]
|
||||||
|
|
||||||
|
# The merged assistant should have both content and tool_calls
|
||||||
|
merged = cast(ChatCompletionAssistantMessageParam, openai_msgs[1])
|
||||||
|
assert merged.get("content") == "Let me build that"
|
||||||
|
tc_list = merged.get("tool_calls")
|
||||||
|
assert tc_list is not None and len(list(tc_list)) == 1
|
||||||
|
assert list(tc_list)[0]["id"] == "tc1"
|
||||||
|
|||||||
@@ -10,6 +10,8 @@ from typing import Any
|
|||||||
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from backend.util.json import dumps as json_dumps
|
||||||
|
|
||||||
|
|
||||||
class ResponseType(str, Enum):
|
class ResponseType(str, Enum):
|
||||||
"""Types of streaming responses following AI SDK protocol."""
|
"""Types of streaming responses following AI SDK protocol."""
|
||||||
@@ -193,6 +195,18 @@ class StreamError(StreamBaseResponse):
|
|||||||
default=None, description="Additional error details"
|
default=None, description="Additional error details"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def to_sse(self) -> str:
|
||||||
|
"""Convert to SSE format, only emitting fields required by AI SDK protocol.
|
||||||
|
|
||||||
|
The AI SDK uses z.strictObject({type, errorText}) which rejects
|
||||||
|
any extra fields like `code` or `details`.
|
||||||
|
"""
|
||||||
|
data = {
|
||||||
|
"type": self.type.value,
|
||||||
|
"errorText": self.errorText,
|
||||||
|
}
|
||||||
|
return f"data: {json_dumps(data)}\n\n"
|
||||||
|
|
||||||
|
|
||||||
class StreamHeartbeat(StreamBaseResponse):
|
class StreamHeartbeat(StreamBaseResponse):
|
||||||
"""Heartbeat to keep SSE connection alive during long-running operations.
|
"""Heartbeat to keep SSE connection alive during long-running operations.
|
||||||
|
|||||||
@@ -80,19 +80,6 @@ settings = Settings()
|
|||||||
client = openai.AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
|
client = openai.AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
|
||||||
|
|
||||||
|
|
||||||
def _apply_thinking_config(extra_body: dict[str, Any], model: str) -> None:
|
|
||||||
"""Apply extended thinking configuration for Anthropic models via OpenRouter.
|
|
||||||
|
|
||||||
OpenRouter's reasoning API expects either:
|
|
||||||
- {"max_tokens": N} for explicit token budget
|
|
||||||
- {"effort": "high"} for automatic budget
|
|
||||||
|
|
||||||
See: https://openrouter.ai/docs/reasoning-tokens
|
|
||||||
"""
|
|
||||||
if config.thinking_enabled and "anthropic" in model.lower():
|
|
||||||
extra_body["reasoning"] = {"max_tokens": config.thinking_budget_tokens}
|
|
||||||
|
|
||||||
|
|
||||||
langfuse = get_client()
|
langfuse = get_client()
|
||||||
|
|
||||||
# Redis key prefix for tracking running long-running operations
|
# Redis key prefix for tracking running long-running operations
|
||||||
@@ -813,9 +800,13 @@ async def stream_chat_completion(
|
|||||||
# Build the messages list in the correct order
|
# Build the messages list in the correct order
|
||||||
messages_to_save: list[ChatMessage] = []
|
messages_to_save: list[ChatMessage] = []
|
||||||
|
|
||||||
# Add assistant message with tool_calls if any
|
# Add assistant message with tool_calls if any.
|
||||||
|
# Use extend (not assign) to preserve tool_calls already added by
|
||||||
|
# _yield_tool_call for long-running tools.
|
||||||
if accumulated_tool_calls:
|
if accumulated_tool_calls:
|
||||||
assistant_response.tool_calls = accumulated_tool_calls
|
if not assistant_response.tool_calls:
|
||||||
|
assistant_response.tool_calls = []
|
||||||
|
assistant_response.tool_calls.extend(accumulated_tool_calls)
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Added {len(accumulated_tool_calls)} tool calls to assistant message"
|
f"Added {len(accumulated_tool_calls)} tool calls to assistant message"
|
||||||
)
|
)
|
||||||
@@ -1079,8 +1070,9 @@ async def _stream_chat_chunks(
|
|||||||
:128
|
:128
|
||||||
] # OpenRouter limit
|
] # OpenRouter limit
|
||||||
|
|
||||||
# Enable extended thinking for Anthropic models via OpenRouter
|
# Enable adaptive thinking for Anthropic models via OpenRouter
|
||||||
_apply_thinking_config(extra_body, model)
|
if config.thinking_enabled and "anthropic" in model.lower():
|
||||||
|
extra_body["reasoning"] = {"enabled": True}
|
||||||
|
|
||||||
api_call_start = time_module.perf_counter()
|
api_call_start = time_module.perf_counter()
|
||||||
stream = await client.chat.completions.create(
|
stream = await client.chat.completions.create(
|
||||||
@@ -1416,13 +1408,9 @@ async def _yield_tool_call(
|
|||||||
operation_id=operation_id,
|
operation_id=operation_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Save assistant message with tool_call FIRST (required by LLM)
|
# Attach the tool_call to the current turn's assistant message
|
||||||
assistant_message = ChatMessage(
|
# (or create one if this is a tool-only response with no text).
|
||||||
role="assistant",
|
session.add_tool_call_to_current_turn(tool_calls[yield_idx])
|
||||||
content="",
|
|
||||||
tool_calls=[tool_calls[yield_idx]],
|
|
||||||
)
|
|
||||||
session.messages.append(assistant_message)
|
|
||||||
|
|
||||||
# Then save pending tool result
|
# Then save pending tool result
|
||||||
pending_message = ChatMessage(
|
pending_message = ChatMessage(
|
||||||
@@ -1845,8 +1833,9 @@ async def _generate_llm_continuation(
|
|||||||
if session_id:
|
if session_id:
|
||||||
extra_body["session_id"] = session_id[:128]
|
extra_body["session_id"] = session_id[:128]
|
||||||
|
|
||||||
# Enable extended thinking for Anthropic models via OpenRouter
|
# Enable adaptive thinking for Anthropic models via OpenRouter
|
||||||
_apply_thinking_config(extra_body, config.model)
|
if config.thinking_enabled and "anthropic" in config.model.lower():
|
||||||
|
extra_body["reasoning"] = {"enabled": True}
|
||||||
|
|
||||||
retry_count = 0
|
retry_count = 0
|
||||||
last_error: Exception | None = None
|
last_error: Exception | None = None
|
||||||
@@ -1978,8 +1967,9 @@ async def _generate_llm_continuation_with_streaming(
|
|||||||
if session_id:
|
if session_id:
|
||||||
extra_body["session_id"] = session_id[:128]
|
extra_body["session_id"] = session_id[:128]
|
||||||
|
|
||||||
# Enable extended thinking for Anthropic models via OpenRouter
|
# Enable adaptive thinking for Anthropic models via OpenRouter
|
||||||
_apply_thinking_config(extra_body, config.model)
|
if config.thinking_enabled and "anthropic" in config.model.lower():
|
||||||
|
extra_body["reasoning"] = {"enabled": True}
|
||||||
|
|
||||||
# Make streaming LLM call (no tools - just text response)
|
# Make streaming LLM call (no tools - just text response)
|
||||||
from typing import cast
|
from typing import cast
|
||||||
|
|||||||
@@ -21,43 +21,71 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class HumanInTheLoopBlock(Block):
|
class HumanInTheLoopBlock(Block):
|
||||||
"""
|
"""
|
||||||
This block pauses execution and waits for human approval or modification of the data.
|
Pauses execution and waits for human approval or rejection of the data.
|
||||||
|
|
||||||
When executed, it creates a pending review entry and sets the node execution status
|
When executed, this block creates a pending review entry and sets the node execution
|
||||||
to REVIEW. The execution will remain paused until a human user either:
|
status to REVIEW. The execution remains paused until a human user either approves
|
||||||
- Approves the data (with or without modifications)
|
or rejects the data.
|
||||||
- Rejects the data
|
|
||||||
|
|
||||||
This is useful for workflows that require human validation or intervention before
|
**How it works:**
|
||||||
proceeding to the next steps.
|
- The input data is presented to a human reviewer
|
||||||
|
- The reviewer can approve or reject (and optionally modify the data if editable)
|
||||||
|
- On approval: the data flows out through the `approved_data` output pin
|
||||||
|
- On rejection: the data flows out through the `rejected_data` output pin
|
||||||
|
|
||||||
|
**Important:** The output pins yield the actual data itself, NOT status strings.
|
||||||
|
The approval/rejection decision determines WHICH output pin fires, not the value.
|
||||||
|
You do NOT need to compare the output to "APPROVED" or "REJECTED" - simply connect
|
||||||
|
downstream blocks to the appropriate output pin for each case.
|
||||||
|
|
||||||
|
**Example usage:**
|
||||||
|
- Connect `approved_data` → next step in your workflow (data was approved)
|
||||||
|
- Connect `rejected_data` → error handling or notification (data was rejected)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
class Input(BlockSchemaInput):
|
class Input(BlockSchemaInput):
|
||||||
data: Any = SchemaField(description="The data to be reviewed by a human user")
|
data: Any = SchemaField(
|
||||||
|
description="The data to be reviewed by a human user. "
|
||||||
|
"This exact data will be passed through to either approved_data or "
|
||||||
|
"rejected_data output based on the reviewer's decision."
|
||||||
|
)
|
||||||
name: str = SchemaField(
|
name: str = SchemaField(
|
||||||
description="A descriptive name for what this data represents",
|
description="A descriptive name for what this data represents. "
|
||||||
|
"This helps the reviewer understand what they are reviewing.",
|
||||||
)
|
)
|
||||||
editable: bool = SchemaField(
|
editable: bool = SchemaField(
|
||||||
description="Whether the human reviewer can edit the data",
|
description="Whether the human reviewer can edit the data before "
|
||||||
|
"approving or rejecting it",
|
||||||
default=True,
|
default=True,
|
||||||
advanced=True,
|
advanced=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
class Output(BlockSchemaOutput):
|
class Output(BlockSchemaOutput):
|
||||||
approved_data: Any = SchemaField(
|
approved_data: Any = SchemaField(
|
||||||
description="The data when approved (may be modified by reviewer)"
|
description="Outputs the input data when the reviewer APPROVES it. "
|
||||||
|
"The value is the actual data itself (not a status string like 'APPROVED'). "
|
||||||
|
"If the reviewer edited the data, this contains the modified version. "
|
||||||
|
"Connect downstream blocks here for the 'approved' workflow path."
|
||||||
)
|
)
|
||||||
rejected_data: Any = SchemaField(
|
rejected_data: Any = SchemaField(
|
||||||
description="The data when rejected (may be modified by reviewer)"
|
description="Outputs the input data when the reviewer REJECTS it. "
|
||||||
|
"The value is the actual data itself (not a status string like 'REJECTED'). "
|
||||||
|
"If the reviewer edited the data, this contains the modified version. "
|
||||||
|
"Connect downstream blocks here for the 'rejected' workflow path."
|
||||||
)
|
)
|
||||||
review_message: str = SchemaField(
|
review_message: str = SchemaField(
|
||||||
description="Any message provided by the reviewer", default=""
|
description="Optional message provided by the reviewer explaining their "
|
||||||
|
"decision. Only outputs when the reviewer provides a message; "
|
||||||
|
"this pin does not fire if no message was given.",
|
||||||
|
default="",
|
||||||
)
|
)
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="8b2a7b3c-6e9d-4a5f-8c1b-2e3f4a5b6c7d",
|
id="8b2a7b3c-6e9d-4a5f-8c1b-2e3f4a5b6c7d",
|
||||||
description="Pause execution and wait for human approval or modification of data",
|
description="Pause execution for human review. Data flows through "
|
||||||
|
"approved_data or rejected_data output based on the reviewer's decision. "
|
||||||
|
"Outputs contain the actual data, not status strings.",
|
||||||
categories={BlockCategory.BASIC},
|
categories={BlockCategory.BASIC},
|
||||||
input_schema=HumanInTheLoopBlock.Input,
|
input_schema=HumanInTheLoopBlock.Input,
|
||||||
output_schema=HumanInTheLoopBlock.Output,
|
output_schema=HumanInTheLoopBlock.Output,
|
||||||
|
|||||||
@@ -743,6 +743,11 @@ class GraphModel(Graph, GraphMeta):
|
|||||||
# For invalid blocks, we still raise immediately as this is a structural issue
|
# For invalid blocks, we still raise immediately as this is a structural issue
|
||||||
raise ValueError(f"Invalid block {node.block_id} for node #{node.id}")
|
raise ValueError(f"Invalid block {node.block_id} for node #{node.id}")
|
||||||
|
|
||||||
|
if block.disabled:
|
||||||
|
raise ValueError(
|
||||||
|
f"Block {node.block_id} is disabled and cannot be used in graphs"
|
||||||
|
)
|
||||||
|
|
||||||
node_input_mask = (
|
node_input_mask = (
|
||||||
nodes_input_masks.get(node.id, {}) if nodes_input_masks else {}
|
nodes_input_masks.get(node.id, {}) if nodes_input_masks else {}
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -213,6 +213,9 @@ async def execute_node(
|
|||||||
block_name=node_block.name,
|
block_name=node_block.name,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if node_block.disabled:
|
||||||
|
raise ValueError(f"Block {node_block.id} is disabled and cannot be executed")
|
||||||
|
|
||||||
# Sanity check: validate the execution input.
|
# Sanity check: validate the execution input.
|
||||||
input_data, error = validate_exec(node, data.inputs, resolve_input=False)
|
input_data, error = validate_exec(node, data.inputs, resolve_input=False)
|
||||||
if input_data is None:
|
if input_data is None:
|
||||||
|
|||||||
@@ -364,6 +364,44 @@ def _remove_orphan_tool_responses(
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def validate_and_remove_orphan_tool_responses(
|
||||||
|
messages: list[dict],
|
||||||
|
log_warning: bool = True,
|
||||||
|
) -> list[dict]:
|
||||||
|
"""
|
||||||
|
Validate tool_call/tool_response pairs and remove orphaned responses.
|
||||||
|
|
||||||
|
Scans messages in order, tracking all tool_call IDs. Any tool response
|
||||||
|
referencing an ID not seen in a preceding message is considered orphaned
|
||||||
|
and removed. This prevents API errors like Anthropic's "unexpected tool_use_id".
|
||||||
|
|
||||||
|
Args:
|
||||||
|
messages: List of messages to validate (OpenAI or Anthropic format)
|
||||||
|
log_warning: Whether to log a warning when orphans are found
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A new list with orphaned tool responses removed
|
||||||
|
"""
|
||||||
|
available_ids: set[str] = set()
|
||||||
|
orphan_ids: set[str] = set()
|
||||||
|
|
||||||
|
for msg in messages:
|
||||||
|
available_ids |= _extract_tool_call_ids_from_message(msg)
|
||||||
|
for resp_id in _extract_tool_response_ids_from_message(msg):
|
||||||
|
if resp_id not in available_ids:
|
||||||
|
orphan_ids.add(resp_id)
|
||||||
|
|
||||||
|
if not orphan_ids:
|
||||||
|
return messages
|
||||||
|
|
||||||
|
if log_warning:
|
||||||
|
logger.warning(
|
||||||
|
f"Removing {len(orphan_ids)} orphan tool response(s): {orphan_ids}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return _remove_orphan_tool_responses(messages, orphan_ids)
|
||||||
|
|
||||||
|
|
||||||
def _ensure_tool_pairs_intact(
|
def _ensure_tool_pairs_intact(
|
||||||
recent_messages: list[dict],
|
recent_messages: list[dict],
|
||||||
all_messages: list[dict],
|
all_messages: list[dict],
|
||||||
@@ -723,6 +761,13 @@ async def compress_context(
|
|||||||
|
|
||||||
# Filter out any None values that may have been introduced
|
# Filter out any None values that may have been introduced
|
||||||
final_msgs: list[dict] = [m for m in msgs if m is not None]
|
final_msgs: list[dict] = [m for m in msgs if m is not None]
|
||||||
|
|
||||||
|
# ---- STEP 6: Final tool-pair validation ---------------------------------
|
||||||
|
# After all compression steps, verify that every tool response has a
|
||||||
|
# matching tool_call in a preceding assistant message. Remove orphans
|
||||||
|
# to prevent API errors (e.g., Anthropic's "unexpected tool_use_id").
|
||||||
|
final_msgs = validate_and_remove_orphan_tool_responses(final_msgs)
|
||||||
|
|
||||||
final_count = sum(_msg_tokens(m, enc) for m in final_msgs)
|
final_count = sum(_msg_tokens(m, enc) for m in final_msgs)
|
||||||
error = None
|
error = None
|
||||||
if final_count + reserve > target_tokens:
|
if final_count + reserve > target_tokens:
|
||||||
|
|||||||
10
autogpt_platform/backend/poetry.lock
generated
10
autogpt_platform/backend/poetry.lock
generated
@@ -46,14 +46,14 @@ pycares = ">=4.9.0,<5"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "aiofiles"
|
name = "aiofiles"
|
||||||
version = "24.1.0"
|
version = "25.1.0"
|
||||||
description = "File support for asyncio."
|
description = "File support for asyncio."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.8"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "aiofiles-24.1.0-py3-none-any.whl", hash = "sha256:b4ec55f4195e3eb5d7abd1bf7e061763e864dd4954231fb8539a0ef8bb8260e5"},
|
{file = "aiofiles-25.1.0-py3-none-any.whl", hash = "sha256:abe311e527c862958650f9438e859c1fa7568a141b22abcd015e120e86a85695"},
|
||||||
{file = "aiofiles-24.1.0.tar.gz", hash = "sha256:22a075c9e5a3810f0c2e48f3008c94d68c65d763b9b03857924c99e57355166c"},
|
{file = "aiofiles-25.1.0.tar.gz", hash = "sha256:a8d728f0a29de45dc521f18f07297428d56992a742f0cd2701ba86e44d23d5b2"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -8440,4 +8440,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
|
|||||||
[metadata]
|
[metadata]
|
||||||
lock-version = "2.1"
|
lock-version = "2.1"
|
||||||
python-versions = ">=3.10,<3.14"
|
python-versions = ">=3.10,<3.14"
|
||||||
content-hash = "fc135114e01de39c8adf70f6132045e7d44a19473c1279aee0978de65aad1655"
|
content-hash = "c06e96ad49388ba7a46786e9ea55ea2c1a57408e15613237b4bee40a592a12af"
|
||||||
|
|||||||
@@ -76,7 +76,7 @@ yt-dlp = "2025.12.08"
|
|||||||
zerobouncesdk = "^1.1.2"
|
zerobouncesdk = "^1.1.2"
|
||||||
# NOTE: please insert new dependencies in their alphabetical location
|
# NOTE: please insert new dependencies in their alphabetical location
|
||||||
pytest-snapshot = "^0.9.0"
|
pytest-snapshot = "^0.9.0"
|
||||||
aiofiles = "^24.1.0"
|
aiofiles = "^25.1.0"
|
||||||
tiktoken = "^0.12.0"
|
tiktoken = "^0.12.0"
|
||||||
aioclamd = "^1.0.0"
|
aioclamd = "^1.0.0"
|
||||||
setuptools = "^80.9.0"
|
setuptools = "^80.9.0"
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
"use client";
|
"use client";
|
||||||
|
|
||||||
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
|
||||||
import { SidebarProvider } from "@/components/ui/sidebar";
|
import { SidebarProvider } from "@/components/ui/sidebar";
|
||||||
import { ChatContainer } from "./components/ChatContainer/ChatContainer";
|
import { ChatContainer } from "./components/ChatContainer/ChatContainer";
|
||||||
import { ChatSidebar } from "./components/ChatSidebar/ChatSidebar";
|
import { ChatSidebar } from "./components/ChatSidebar/ChatSidebar";
|
||||||
import { MobileDrawer } from "./components/MobileDrawer/MobileDrawer";
|
import { MobileDrawer } from "./components/MobileDrawer/MobileDrawer";
|
||||||
import { MobileHeader } from "./components/MobileHeader/MobileHeader";
|
import { MobileHeader } from "./components/MobileHeader/MobileHeader";
|
||||||
|
import { ScaleLoader } from "./components/ScaleLoader/ScaleLoader";
|
||||||
import { useCopilotPage } from "./useCopilotPage";
|
import { useCopilotPage } from "./useCopilotPage";
|
||||||
|
|
||||||
export function CopilotPage() {
|
export function CopilotPage() {
|
||||||
@@ -34,7 +34,11 @@ export function CopilotPage() {
|
|||||||
} = useCopilotPage();
|
} = useCopilotPage();
|
||||||
|
|
||||||
if (isUserLoading || !isLoggedIn) {
|
if (isUserLoading || !isLoggedIn) {
|
||||||
return <LoadingSpinner size="large" cover />;
|
return (
|
||||||
|
<div className="fixed inset-0 z-50 flex items-center justify-center bg-[#f8f8f9]">
|
||||||
|
<ScaleLoader className="text-neutral-400" />
|
||||||
|
</div>
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
|
|||||||
@@ -10,8 +10,9 @@ import {
|
|||||||
MessageResponse,
|
MessageResponse,
|
||||||
} from "@/components/ai-elements/message";
|
} from "@/components/ai-elements/message";
|
||||||
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
||||||
|
import { toast } from "@/components/molecules/Toast/use-toast";
|
||||||
import { ToolUIPart, UIDataTypes, UIMessage, UITools } from "ai";
|
import { ToolUIPart, UIDataTypes, UIMessage, UITools } from "ai";
|
||||||
import { useEffect, useState } from "react";
|
import { useEffect, useRef, useState } from "react";
|
||||||
import { CreateAgentTool } from "../../tools/CreateAgent/CreateAgent";
|
import { CreateAgentTool } from "../../tools/CreateAgent/CreateAgent";
|
||||||
import { EditAgentTool } from "../../tools/EditAgent/EditAgent";
|
import { EditAgentTool } from "../../tools/EditAgent/EditAgent";
|
||||||
import { FindAgentsTool } from "../../tools/FindAgents/FindAgents";
|
import { FindAgentsTool } from "../../tools/FindAgents/FindAgents";
|
||||||
@@ -121,6 +122,7 @@ export const ChatMessagesContainer = ({
|
|||||||
isLoading,
|
isLoading,
|
||||||
}: ChatMessagesContainerProps) => {
|
}: ChatMessagesContainerProps) => {
|
||||||
const [thinkingPhrase, setThinkingPhrase] = useState(getRandomPhrase);
|
const [thinkingPhrase, setThinkingPhrase] = useState(getRandomPhrase);
|
||||||
|
const lastToastTimeRef = useRef(0);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (status === "submitted") {
|
if (status === "submitted") {
|
||||||
@@ -128,6 +130,20 @@ export const ChatMessagesContainer = ({
|
|||||||
}
|
}
|
||||||
}, [status]);
|
}, [status]);
|
||||||
|
|
||||||
|
// Show a toast when a new error occurs, debounced to avoid spam
|
||||||
|
useEffect(() => {
|
||||||
|
if (!error) return;
|
||||||
|
const now = Date.now();
|
||||||
|
if (now - lastToastTimeRef.current < 3_000) return;
|
||||||
|
lastToastTimeRef.current = now;
|
||||||
|
toast({
|
||||||
|
variant: "destructive",
|
||||||
|
title: "Something went wrong",
|
||||||
|
description:
|
||||||
|
"The assistant encountered an error. Please try sending your message again.",
|
||||||
|
});
|
||||||
|
}, [error]);
|
||||||
|
|
||||||
const lastMessage = messages[messages.length - 1];
|
const lastMessage = messages[messages.length - 1];
|
||||||
const lastAssistantHasVisibleContent =
|
const lastAssistantHasVisibleContent =
|
||||||
lastMessage?.role === "assistant" &&
|
lastMessage?.role === "assistant" &&
|
||||||
@@ -143,10 +159,10 @@ export const ChatMessagesContainer = ({
|
|||||||
|
|
||||||
return (
|
return (
|
||||||
<Conversation className="min-h-0 flex-1">
|
<Conversation className="min-h-0 flex-1">
|
||||||
<ConversationContent className="gap-6 px-3 py-6">
|
<ConversationContent className="flex min-h-screen flex-1 flex-col gap-6 px-3 py-6">
|
||||||
{isLoading && messages.length === 0 && (
|
{isLoading && messages.length === 0 && (
|
||||||
<div className="flex flex-1 items-center justify-center">
|
<div className="flex min-h-full flex-1 items-center justify-center">
|
||||||
<LoadingSpinner size="large" className="text-neutral-400" />
|
<LoadingSpinner className="text-neutral-600" />
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
{messages.map((message, messageIndex) => {
|
{messages.map((message, messageIndex) => {
|
||||||
@@ -263,8 +279,12 @@ export const ChatMessagesContainer = ({
|
|||||||
</Message>
|
</Message>
|
||||||
)}
|
)}
|
||||||
{error && (
|
{error && (
|
||||||
<div className="rounded-lg bg-red-50 p-3 text-red-600">
|
<div className="rounded-lg bg-red-50 p-4 text-sm text-red-700">
|
||||||
Error: {error.message}
|
<p className="font-medium">Something went wrong</p>
|
||||||
|
<p className="mt-1 text-red-600">
|
||||||
|
The assistant encountered an error. Please try sending your
|
||||||
|
message again.
|
||||||
|
</p>
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
</ConversationContent>
|
</ConversationContent>
|
||||||
|
|||||||
@@ -121,8 +121,8 @@ export function ChatSidebar() {
|
|||||||
className="mt-4 flex flex-col gap-1"
|
className="mt-4 flex flex-col gap-1"
|
||||||
>
|
>
|
||||||
{isLoadingSessions ? (
|
{isLoadingSessions ? (
|
||||||
<div className="flex items-center justify-center py-4">
|
<div className="flex min-h-[30rem] items-center justify-center py-4">
|
||||||
<LoadingSpinner size="small" className="text-neutral-400" />
|
<LoadingSpinner size="small" className="text-neutral-600" />
|
||||||
</div>
|
</div>
|
||||||
) : sessions.length === 0 ? (
|
) : sessions.length === 0 ? (
|
||||||
<p className="py-4 text-center text-sm text-neutral-500">
|
<p className="py-4 text-center text-sm text-neutral-500">
|
||||||
|
|||||||
@@ -0,0 +1,35 @@
|
|||||||
|
.loader {
|
||||||
|
width: 48px;
|
||||||
|
height: 48px;
|
||||||
|
display: inline-block;
|
||||||
|
position: relative;
|
||||||
|
}
|
||||||
|
|
||||||
|
.loader::after,
|
||||||
|
.loader::before {
|
||||||
|
content: "";
|
||||||
|
box-sizing: border-box;
|
||||||
|
width: 100%;
|
||||||
|
height: 100%;
|
||||||
|
border-radius: 50%;
|
||||||
|
background: currentColor;
|
||||||
|
position: absolute;
|
||||||
|
left: 0;
|
||||||
|
top: 0;
|
||||||
|
animation: animloader 2s linear infinite;
|
||||||
|
}
|
||||||
|
|
||||||
|
.loader::after {
|
||||||
|
animation-delay: 1s;
|
||||||
|
}
|
||||||
|
|
||||||
|
@keyframes animloader {
|
||||||
|
0% {
|
||||||
|
transform: scale(0);
|
||||||
|
opacity: 1;
|
||||||
|
}
|
||||||
|
100% {
|
||||||
|
transform: scale(1);
|
||||||
|
opacity: 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
import { cn } from "@/lib/utils";
|
||||||
|
import styles from "./ScaleLoader.module.css";
|
||||||
|
|
||||||
|
interface Props {
|
||||||
|
size?: number;
|
||||||
|
className?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function ScaleLoader({ size = 48, className }: Props) {
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
className={cn(styles.loader, className)}
|
||||||
|
style={{ width: size, height: size }}
|
||||||
|
/>
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -30,7 +30,7 @@ export function ContentCard({
|
|||||||
return (
|
return (
|
||||||
<div
|
<div
|
||||||
className={cn(
|
className={cn(
|
||||||
"rounded-lg bg-gradient-to-r from-purple-500/30 to-blue-500/30 p-[1px]",
|
"min-w-0 rounded-lg bg-gradient-to-r from-purple-500/30 to-blue-500/30 p-[1px]",
|
||||||
className,
|
className,
|
||||||
)}
|
)}
|
||||||
>
|
>
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import { WarningDiamondIcon } from "@phosphor-icons/react";
|
|||||||
import type { ToolUIPart } from "ai";
|
import type { ToolUIPart } from "ai";
|
||||||
import { useCopilotChatActions } from "../../components/CopilotChatActionsProvider/useCopilotChatActions";
|
import { useCopilotChatActions } from "../../components/CopilotChatActionsProvider/useCopilotChatActions";
|
||||||
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
|
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
|
||||||
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader";
|
|
||||||
import { ProgressBar } from "../../components/ProgressBar/ProgressBar";
|
import { ProgressBar } from "../../components/ProgressBar/ProgressBar";
|
||||||
import {
|
import {
|
||||||
ContentCardDescription,
|
ContentCardDescription,
|
||||||
@@ -49,12 +48,7 @@ interface Props {
|
|||||||
part: CreateAgentToolPart;
|
part: CreateAgentToolPart;
|
||||||
}
|
}
|
||||||
|
|
||||||
function getAccordionMeta(output: CreateAgentToolOutput): {
|
function getAccordionMeta(output: CreateAgentToolOutput) {
|
||||||
icon: React.ReactNode;
|
|
||||||
title: React.ReactNode;
|
|
||||||
titleClassName?: string;
|
|
||||||
description?: string;
|
|
||||||
} {
|
|
||||||
const icon = <AccordionIcon />;
|
const icon = <AccordionIcon />;
|
||||||
|
|
||||||
if (isAgentSavedOutput(output)) {
|
if (isAgentSavedOutput(output)) {
|
||||||
@@ -73,6 +67,7 @@ function getAccordionMeta(output: CreateAgentToolOutput): {
|
|||||||
icon,
|
icon,
|
||||||
title: "Needs clarification",
|
title: "Needs clarification",
|
||||||
description: `${questions.length} question${questions.length === 1 ? "" : "s"}`,
|
description: `${questions.length} question${questions.length === 1 ? "" : "s"}`,
|
||||||
|
expanded: true,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
if (
|
if (
|
||||||
@@ -81,7 +76,7 @@ function getAccordionMeta(output: CreateAgentToolOutput): {
|
|||||||
isOperationInProgressOutput(output)
|
isOperationInProgressOutput(output)
|
||||||
) {
|
) {
|
||||||
return {
|
return {
|
||||||
icon: <OrbitLoader size={32} />,
|
icon,
|
||||||
title: "Creating agent, this may take a few minutes. Sit back and relax.",
|
title: "Creating agent, this may take a few minutes. Sit back and relax.",
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@@ -97,18 +92,23 @@ function getAccordionMeta(output: CreateAgentToolOutput): {
|
|||||||
export function CreateAgentTool({ part }: Props) {
|
export function CreateAgentTool({ part }: Props) {
|
||||||
const text = getAnimationText(part);
|
const text = getAnimationText(part);
|
||||||
const { onSend } = useCopilotChatActions();
|
const { onSend } = useCopilotChatActions();
|
||||||
|
|
||||||
const isStreaming =
|
const isStreaming =
|
||||||
part.state === "input-streaming" || part.state === "input-available";
|
part.state === "input-streaming" || part.state === "input-available";
|
||||||
|
|
||||||
const output = getCreateAgentToolOutput(part);
|
const output = getCreateAgentToolOutput(part);
|
||||||
|
|
||||||
const isError =
|
const isError =
|
||||||
part.state === "output-error" || (!!output && isErrorOutput(output));
|
part.state === "output-error" || (!!output && isErrorOutput(output));
|
||||||
|
|
||||||
const isOperating =
|
const isOperating =
|
||||||
!!output &&
|
!!output &&
|
||||||
(isOperationStartedOutput(output) ||
|
(isOperationStartedOutput(output) ||
|
||||||
isOperationPendingOutput(output) ||
|
isOperationPendingOutput(output) ||
|
||||||
isOperationInProgressOutput(output));
|
isOperationInProgressOutput(output));
|
||||||
|
|
||||||
const progress = useAsymptoticProgress(isOperating);
|
const progress = useAsymptoticProgress(isOperating);
|
||||||
|
|
||||||
const hasExpandableContent =
|
const hasExpandableContent =
|
||||||
part.state === "output-available" &&
|
part.state === "output-available" &&
|
||||||
!!output &&
|
!!output &&
|
||||||
@@ -149,10 +149,7 @@ export function CreateAgentTool({ part }: Props) {
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
{hasExpandableContent && output && (
|
{hasExpandableContent && output && (
|
||||||
<ToolAccordion
|
<ToolAccordion {...getAccordionMeta(output)}>
|
||||||
{...getAccordionMeta(output)}
|
|
||||||
defaultExpanded={isOperating || isClarificationNeededOutput(output)}
|
|
||||||
>
|
|
||||||
{isOperating && (
|
{isOperating && (
|
||||||
<ContentGrid>
|
<ContentGrid>
|
||||||
<ProgressBar value={progress} className="max-w-[280px]" />
|
<ProgressBar value={progress} className="max-w-[280px]" />
|
||||||
|
|||||||
@@ -146,10 +146,7 @@ export function EditAgentTool({ part }: Props) {
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
{hasExpandableContent && output && (
|
{hasExpandableContent && output && (
|
||||||
<ToolAccordion
|
<ToolAccordion {...getAccordionMeta(output)}>
|
||||||
{...getAccordionMeta(output)}
|
|
||||||
defaultExpanded={isOperating || isClarificationNeededOutput(output)}
|
|
||||||
>
|
|
||||||
{isOperating && (
|
{isOperating && (
|
||||||
<ContentGrid>
|
<ContentGrid>
|
||||||
<ProgressBar value={progress} className="max-w-[280px]" />
|
<ProgressBar value={progress} className="max-w-[280px]" />
|
||||||
|
|||||||
@@ -61,14 +61,7 @@ export function RunAgentTool({ part }: Props) {
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
{hasExpandableContent && output && (
|
{hasExpandableContent && output && (
|
||||||
<ToolAccordion
|
<ToolAccordion {...getAccordionMeta(output)}>
|
||||||
{...getAccordionMeta(output)}
|
|
||||||
defaultExpanded={
|
|
||||||
isRunAgentExecutionStartedOutput(output) ||
|
|
||||||
isRunAgentSetupRequirementsOutput(output) ||
|
|
||||||
isRunAgentAgentDetailsOutput(output)
|
|
||||||
}
|
|
||||||
>
|
|
||||||
{isRunAgentExecutionStartedOutput(output) && (
|
{isRunAgentExecutionStartedOutput(output) && (
|
||||||
<ExecutionStartedCard output={output} />
|
<ExecutionStartedCard output={output} />
|
||||||
)}
|
)}
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ import {
|
|||||||
WarningDiamondIcon,
|
WarningDiamondIcon,
|
||||||
} from "@phosphor-icons/react";
|
} from "@phosphor-icons/react";
|
||||||
import type { ToolUIPart } from "ai";
|
import type { ToolUIPart } from "ai";
|
||||||
import { SpinnerLoader } from "../../components/SpinnerLoader/SpinnerLoader";
|
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader";
|
||||||
|
|
||||||
export interface RunAgentInput {
|
export interface RunAgentInput {
|
||||||
username_agent_slug?: string;
|
username_agent_slug?: string;
|
||||||
@@ -171,7 +171,7 @@ export function ToolIcon({
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (isStreaming) {
|
if (isStreaming) {
|
||||||
return <SpinnerLoader size={40} className="text-neutral-700" />;
|
return <OrbitLoader size={24} />;
|
||||||
}
|
}
|
||||||
return <PlayIcon size={14} weight="regular" className="text-neutral-400" />;
|
return <PlayIcon size={14} weight="regular" className="text-neutral-400" />;
|
||||||
}
|
}
|
||||||
@@ -203,7 +203,7 @@ export function getAccordionMeta(output: RunAgentToolOutput): {
|
|||||||
? output.status.trim()
|
? output.status.trim()
|
||||||
: "started";
|
: "started";
|
||||||
return {
|
return {
|
||||||
icon: <SpinnerLoader size={28} className="text-neutral-700" />,
|
icon,
|
||||||
title: output.graph_name,
|
title: output.graph_name,
|
||||||
description: `Status: ${statusText}`,
|
description: `Status: ${statusText}`,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -55,13 +55,7 @@ export function RunBlockTool({ part }: Props) {
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
{hasExpandableContent && output && (
|
{hasExpandableContent && output && (
|
||||||
<ToolAccordion
|
<ToolAccordion {...getAccordionMeta(output)}>
|
||||||
{...getAccordionMeta(output)}
|
|
||||||
defaultExpanded={
|
|
||||||
isRunBlockBlockOutput(output) ||
|
|
||||||
isRunBlockSetupRequirementsOutput(output)
|
|
||||||
}
|
|
||||||
>
|
|
||||||
{isRunBlockBlockOutput(output) && <BlockOutputCard output={output} />}
|
{isRunBlockBlockOutput(output) && <BlockOutputCard output={output} />}
|
||||||
|
|
||||||
{isRunBlockSetupRequirementsOutput(output) && (
|
{isRunBlockSetupRequirementsOutput(output) && (
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ import {
|
|||||||
WarningDiamondIcon,
|
WarningDiamondIcon,
|
||||||
} from "@phosphor-icons/react";
|
} from "@phosphor-icons/react";
|
||||||
import type { ToolUIPart } from "ai";
|
import type { ToolUIPart } from "ai";
|
||||||
import { SpinnerLoader } from "../../components/SpinnerLoader/SpinnerLoader";
|
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader";
|
||||||
|
|
||||||
export interface RunBlockInput {
|
export interface RunBlockInput {
|
||||||
block_id?: string;
|
block_id?: string;
|
||||||
@@ -120,7 +120,7 @@ export function ToolIcon({
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (isStreaming) {
|
if (isStreaming) {
|
||||||
return <SpinnerLoader size={40} className="text-neutral-700" />;
|
return <OrbitLoader size={24} />;
|
||||||
}
|
}
|
||||||
return <PlayIcon size={14} weight="regular" className="text-neutral-400" />;
|
return <PlayIcon size={14} weight="regular" className="text-neutral-400" />;
|
||||||
}
|
}
|
||||||
@@ -149,7 +149,7 @@ export function getAccordionMeta(output: RunBlockToolOutput): {
|
|||||||
if (isRunBlockBlockOutput(output)) {
|
if (isRunBlockBlockOutput(output)) {
|
||||||
const keys = Object.keys(output.outputs ?? {});
|
const keys = Object.keys(output.outputs ?? {});
|
||||||
return {
|
return {
|
||||||
icon: <SpinnerLoader size={32} className="text-neutral-700" />,
|
icon,
|
||||||
title: output.block_name,
|
title: output.block_name,
|
||||||
description:
|
description:
|
||||||
keys.length > 0
|
keys.length > 0
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ import { useBreakpoint } from "@/lib/hooks/useBreakpoint";
|
|||||||
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
|
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
|
||||||
import { useChat } from "@ai-sdk/react";
|
import { useChat } from "@ai-sdk/react";
|
||||||
import { DefaultChatTransport } from "ai";
|
import { DefaultChatTransport } from "ai";
|
||||||
import { useRouter } from "next/navigation";
|
|
||||||
import { useEffect, useMemo, useState } from "react";
|
import { useEffect, useMemo, useState } from "react";
|
||||||
import { useChatSession } from "./useChatSession";
|
import { useChatSession } from "./useChatSession";
|
||||||
|
|
||||||
@@ -11,7 +10,6 @@ export function useCopilotPage() {
|
|||||||
const { isUserLoading, isLoggedIn } = useSupabase();
|
const { isUserLoading, isLoggedIn } = useSupabase();
|
||||||
const [isDrawerOpen, setIsDrawerOpen] = useState(false);
|
const [isDrawerOpen, setIsDrawerOpen] = useState(false);
|
||||||
const [pendingMessage, setPendingMessage] = useState<string | null>(null);
|
const [pendingMessage, setPendingMessage] = useState<string | null>(null);
|
||||||
const router = useRouter();
|
|
||||||
|
|
||||||
const {
|
const {
|
||||||
sessionId,
|
sessionId,
|
||||||
@@ -54,10 +52,6 @@ export function useCopilotPage() {
|
|||||||
transport: transport ?? undefined,
|
transport: transport ?? undefined,
|
||||||
});
|
});
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
if (!isUserLoading && !isLoggedIn) router.replace("/login");
|
|
||||||
}, [isUserLoading, isLoggedIn]);
|
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (!hydratedMessages || hydratedMessages.length === 0) return;
|
if (!hydratedMessages || hydratedMessages.length === 0) return;
|
||||||
setMessages((prev) => {
|
setMessages((prev) => {
|
||||||
|
|||||||
@@ -1,11 +1,8 @@
|
|||||||
import { environment } from "@/services/environment";
|
import { environment } from "@/services/environment";
|
||||||
import { getServerAuthToken } from "@/lib/autogpt-server-api/helpers";
|
import { getServerAuthToken } from "@/lib/autogpt-server-api/helpers";
|
||||||
import { NextRequest } from "next/server";
|
import { NextRequest } from "next/server";
|
||||||
|
import { normalizeSSEStream, SSE_HEADERS } from "../../../sse-helpers";
|
||||||
|
|
||||||
/**
|
|
||||||
* SSE Proxy for chat streaming.
|
|
||||||
* Supports POST with context (page content + URL) in the request body.
|
|
||||||
*/
|
|
||||||
export async function POST(
|
export async function POST(
|
||||||
request: NextRequest,
|
request: NextRequest,
|
||||||
{ params }: { params: Promise<{ sessionId: string }> },
|
{ params }: { params: Promise<{ sessionId: string }> },
|
||||||
@@ -23,17 +20,14 @@ export async function POST(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get auth token from server-side session
|
|
||||||
const token = await getServerAuthToken();
|
const token = await getServerAuthToken();
|
||||||
|
|
||||||
// Build backend URL
|
|
||||||
const backendUrl = environment.getAGPTServerBaseUrl();
|
const backendUrl = environment.getAGPTServerBaseUrl();
|
||||||
const streamUrl = new URL(
|
const streamUrl = new URL(
|
||||||
`/api/chat/sessions/${sessionId}/stream`,
|
`/api/chat/sessions/${sessionId}/stream`,
|
||||||
backendUrl,
|
backendUrl,
|
||||||
);
|
);
|
||||||
|
|
||||||
// Forward request to backend with auth header
|
|
||||||
const headers: Record<string, string> = {
|
const headers: Record<string, string> = {
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
Accept: "text/event-stream",
|
Accept: "text/event-stream",
|
||||||
@@ -63,14 +57,15 @@ export async function POST(
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the SSE stream directly
|
if (!response.body) {
|
||||||
return new Response(response.body, {
|
return new Response(
|
||||||
headers: {
|
JSON.stringify({ error: "Empty response from chat service" }),
|
||||||
"Content-Type": "text/event-stream",
|
{ status: 502, headers: { "Content-Type": "application/json" } },
|
||||||
"Cache-Control": "no-cache, no-transform",
|
);
|
||||||
Connection: "keep-alive",
|
}
|
||||||
"X-Accel-Buffering": "no",
|
|
||||||
},
|
return new Response(normalizeSSEStream(response.body), {
|
||||||
|
headers: SSE_HEADERS,
|
||||||
});
|
});
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("SSE proxy error:", error);
|
console.error("SSE proxy error:", error);
|
||||||
@@ -87,13 +82,6 @@ export async function POST(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Resume an active stream for a session.
|
|
||||||
*
|
|
||||||
* Called by the AI SDK's `useChat(resume: true)` on page load.
|
|
||||||
* Proxies to the backend which checks for an active stream and either
|
|
||||||
* replays it (200 + SSE) or returns 204 No Content.
|
|
||||||
*/
|
|
||||||
export async function GET(
|
export async function GET(
|
||||||
_request: NextRequest,
|
_request: NextRequest,
|
||||||
{ params }: { params: Promise<{ sessionId: string }> },
|
{ params }: { params: Promise<{ sessionId: string }> },
|
||||||
@@ -124,7 +112,6 @@ export async function GET(
|
|||||||
headers,
|
headers,
|
||||||
});
|
});
|
||||||
|
|
||||||
// 204 = no active stream to resume
|
|
||||||
if (response.status === 204) {
|
if (response.status === 204) {
|
||||||
return new Response(null, { status: 204 });
|
return new Response(null, { status: 204 });
|
||||||
}
|
}
|
||||||
@@ -137,12 +124,13 @@ export async function GET(
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
return new Response(response.body, {
|
if (!response.body) {
|
||||||
|
return new Response(null, { status: 204 });
|
||||||
|
}
|
||||||
|
|
||||||
|
return new Response(normalizeSSEStream(response.body), {
|
||||||
headers: {
|
headers: {
|
||||||
"Content-Type": "text/event-stream",
|
...SSE_HEADERS,
|
||||||
"Cache-Control": "no-cache, no-transform",
|
|
||||||
Connection: "keep-alive",
|
|
||||||
"X-Accel-Buffering": "no",
|
|
||||||
"x-vercel-ai-ui-message-stream": "v1",
|
"x-vercel-ai-ui-message-stream": "v1",
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|||||||
72
autogpt_platform/frontend/src/app/api/chat/sse-helpers.ts
Normal file
72
autogpt_platform/frontend/src/app/api/chat/sse-helpers.ts
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
export const SSE_HEADERS = {
|
||||||
|
"Content-Type": "text/event-stream",
|
||||||
|
"Cache-Control": "no-cache, no-transform",
|
||||||
|
Connection: "keep-alive",
|
||||||
|
"X-Accel-Buffering": "no",
|
||||||
|
} as const;
|
||||||
|
|
||||||
|
export function normalizeSSEStream(
|
||||||
|
input: ReadableStream<Uint8Array>,
|
||||||
|
): ReadableStream<Uint8Array> {
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
const encoder = new TextEncoder();
|
||||||
|
let buffer = "";
|
||||||
|
|
||||||
|
return input.pipeThrough(
|
||||||
|
new TransformStream<Uint8Array, Uint8Array>({
|
||||||
|
transform(chunk, controller) {
|
||||||
|
buffer += decoder.decode(chunk, { stream: true });
|
||||||
|
|
||||||
|
const parts = buffer.split("\n\n");
|
||||||
|
buffer = parts.pop() ?? "";
|
||||||
|
|
||||||
|
for (const part of parts) {
|
||||||
|
const normalized = normalizeSSEEvent(part);
|
||||||
|
controller.enqueue(encoder.encode(normalized + "\n\n"));
|
||||||
|
}
|
||||||
|
},
|
||||||
|
flush(controller) {
|
||||||
|
if (buffer.trim()) {
|
||||||
|
const normalized = normalizeSSEEvent(buffer);
|
||||||
|
controller.enqueue(encoder.encode(normalized + "\n\n"));
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function normalizeSSEEvent(event: string): string {
|
||||||
|
const lines = event.split("\n");
|
||||||
|
const dataLines: string[] = [];
|
||||||
|
const otherLines: string[] = [];
|
||||||
|
|
||||||
|
for (const line of lines) {
|
||||||
|
if (line.startsWith("data: ")) {
|
||||||
|
dataLines.push(line.slice(6));
|
||||||
|
} else {
|
||||||
|
otherLines.push(line);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dataLines.length === 0) return event;
|
||||||
|
|
||||||
|
const dataStr = dataLines.join("\n");
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(dataStr) as Record<string, unknown>;
|
||||||
|
if (parsed.type === "error") {
|
||||||
|
const normalized = {
|
||||||
|
type: "error",
|
||||||
|
errorText:
|
||||||
|
typeof parsed.errorText === "string"
|
||||||
|
? parsed.errorText
|
||||||
|
: "An unexpected error occurred",
|
||||||
|
};
|
||||||
|
const newData = `data: ${JSON.stringify(normalized)}`;
|
||||||
|
return [...otherLines.filter((l) => l.length > 0), newData].join("\n");
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Not valid JSON — pass through as-is
|
||||||
|
}
|
||||||
|
|
||||||
|
return event;
|
||||||
|
}
|
||||||
@@ -1,20 +1,8 @@
|
|||||||
import { environment } from "@/services/environment";
|
import { environment } from "@/services/environment";
|
||||||
import { getServerAuthToken } from "@/lib/autogpt-server-api/helpers";
|
import { getServerAuthToken } from "@/lib/autogpt-server-api/helpers";
|
||||||
import { NextRequest } from "next/server";
|
import { NextRequest } from "next/server";
|
||||||
|
import { normalizeSSEStream, SSE_HEADERS } from "../../../sse-helpers";
|
||||||
|
|
||||||
/**
|
|
||||||
* SSE Proxy for task stream reconnection.
|
|
||||||
*
|
|
||||||
* This endpoint allows clients to reconnect to an ongoing or recently completed
|
|
||||||
* background task's stream. It replays missed messages from Redis Streams and
|
|
||||||
* subscribes to live updates if the task is still running.
|
|
||||||
*
|
|
||||||
* Client contract:
|
|
||||||
* 1. When receiving an operation_started event, store the task_id
|
|
||||||
* 2. To reconnect: GET /api/chat/tasks/{taskId}/stream?last_message_id={idx}
|
|
||||||
* 3. Messages are replayed from the last_message_id position
|
|
||||||
* 4. Stream ends when "finish" event is received
|
|
||||||
*/
|
|
||||||
export async function GET(
|
export async function GET(
|
||||||
request: NextRequest,
|
request: NextRequest,
|
||||||
{ params }: { params: Promise<{ taskId: string }> },
|
{ params }: { params: Promise<{ taskId: string }> },
|
||||||
@@ -24,15 +12,12 @@ export async function GET(
|
|||||||
const lastMessageId = searchParams.get("last_message_id") || "0-0";
|
const lastMessageId = searchParams.get("last_message_id") || "0-0";
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Get auth token from server-side session
|
|
||||||
const token = await getServerAuthToken();
|
const token = await getServerAuthToken();
|
||||||
|
|
||||||
// Build backend URL
|
|
||||||
const backendUrl = environment.getAGPTServerBaseUrl();
|
const backendUrl = environment.getAGPTServerBaseUrl();
|
||||||
const streamUrl = new URL(`/api/chat/tasks/${taskId}/stream`, backendUrl);
|
const streamUrl = new URL(`/api/chat/tasks/${taskId}/stream`, backendUrl);
|
||||||
streamUrl.searchParams.set("last_message_id", lastMessageId);
|
streamUrl.searchParams.set("last_message_id", lastMessageId);
|
||||||
|
|
||||||
// Forward request to backend with auth header
|
|
||||||
const headers: Record<string, string> = {
|
const headers: Record<string, string> = {
|
||||||
Accept: "text/event-stream",
|
Accept: "text/event-stream",
|
||||||
"Cache-Control": "no-cache",
|
"Cache-Control": "no-cache",
|
||||||
@@ -56,14 +41,12 @@ export async function GET(
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the SSE stream directly
|
if (!response.body) {
|
||||||
return new Response(response.body, {
|
return new Response(null, { status: 204 });
|
||||||
headers: {
|
}
|
||||||
"Content-Type": "text/event-stream",
|
|
||||||
"Cache-Control": "no-cache, no-transform",
|
return new Response(normalizeSSEStream(response.body), {
|
||||||
Connection: "keep-alive",
|
headers: SSE_HEADERS,
|
||||||
"X-Accel-Buffering": "no",
|
|
||||||
},
|
|
||||||
});
|
});
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("Task stream proxy error:", error);
|
console.error("Task stream proxy error:", error);
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import { SupabaseClient } from "@supabase/supabase-js";
|
|||||||
export const PROTECTED_PAGES = [
|
export const PROTECTED_PAGES = [
|
||||||
"/auth/authorize",
|
"/auth/authorize",
|
||||||
"/auth/integrations",
|
"/auth/integrations",
|
||||||
|
"/copilot",
|
||||||
"/monitor",
|
"/monitor",
|
||||||
"/build",
|
"/build",
|
||||||
"/onboarding",
|
"/onboarding",
|
||||||
|
|||||||
@@ -61,7 +61,7 @@ Below is a comprehensive list of all available blocks, categorized by their prim
|
|||||||
| [Get List Item](block-integrations/basic.md#get-list-item) | Returns the element at the given index |
|
| [Get List Item](block-integrations/basic.md#get-list-item) | Returns the element at the given index |
|
||||||
| [Get Store Agent Details](block-integrations/system/store_operations.md#get-store-agent-details) | Get detailed information about an agent from the store |
|
| [Get Store Agent Details](block-integrations/system/store_operations.md#get-store-agent-details) | Get detailed information about an agent from the store |
|
||||||
| [Get Weather Information](block-integrations/basic.md#get-weather-information) | Retrieves weather information for a specified location using OpenWeatherMap API |
|
| [Get Weather Information](block-integrations/basic.md#get-weather-information) | Retrieves weather information for a specified location using OpenWeatherMap API |
|
||||||
| [Human In The Loop](block-integrations/basic.md#human-in-the-loop) | Pause execution and wait for human approval or modification of data |
|
| [Human In The Loop](block-integrations/basic.md#human-in-the-loop) | Pause execution for human review |
|
||||||
| [List Is Empty](block-integrations/basic.md#list-is-empty) | Checks if a list is empty |
|
| [List Is Empty](block-integrations/basic.md#list-is-empty) | Checks if a list is empty |
|
||||||
| [List Library Agents](block-integrations/system/library_operations.md#list-library-agents) | List all agents in your personal library |
|
| [List Library Agents](block-integrations/system/library_operations.md#list-library-agents) | List all agents in your personal library |
|
||||||
| [Note](block-integrations/basic.md#note) | A visual annotation block that displays a sticky note in the workflow editor for documentation and organization purposes |
|
| [Note](block-integrations/basic.md#note) | A visual annotation block that displays a sticky note in the workflow editor for documentation and organization purposes |
|
||||||
|
|||||||
@@ -975,7 +975,7 @@ A travel planning application could use this block to provide users with current
|
|||||||
## Human In The Loop
|
## Human In The Loop
|
||||||
|
|
||||||
### What it is
|
### What it is
|
||||||
Pause execution and wait for human approval or modification of data
|
Pause execution for human review. Data flows through approved_data or rejected_data output based on the reviewer's decision. Outputs contain the actual data, not status strings.
|
||||||
|
|
||||||
### How it works
|
### How it works
|
||||||
<!-- MANUAL: how_it_works -->
|
<!-- MANUAL: how_it_works -->
|
||||||
@@ -988,18 +988,18 @@ This enables human oversight at critical points in automated workflows, ensuring
|
|||||||
|
|
||||||
| Input | Description | Type | Required |
|
| Input | Description | Type | Required |
|
||||||
|-------|-------------|------|----------|
|
|-------|-------------|------|----------|
|
||||||
| data | The data to be reviewed by a human user | Data | Yes |
|
| data | The data to be reviewed by a human user. This exact data will be passed through to either approved_data or rejected_data output based on the reviewer's decision. | Data | Yes |
|
||||||
| name | A descriptive name for what this data represents | str | Yes |
|
| name | A descriptive name for what this data represents. This helps the reviewer understand what they are reviewing. | str | Yes |
|
||||||
| editable | Whether the human reviewer can edit the data | bool | No |
|
| editable | Whether the human reviewer can edit the data before approving or rejecting it | bool | No |
|
||||||
|
|
||||||
### Outputs
|
### Outputs
|
||||||
|
|
||||||
| Output | Description | Type |
|
| Output | Description | Type |
|
||||||
|--------|-------------|------|
|
|--------|-------------|------|
|
||||||
| error | Error message if the operation failed | str |
|
| error | Error message if the operation failed | str |
|
||||||
| approved_data | The data when approved (may be modified by reviewer) | Approved Data |
|
| approved_data | Outputs the input data when the reviewer APPROVES it. The value is the actual data itself (not a status string like 'APPROVED'). If the reviewer edited the data, this contains the modified version. Connect downstream blocks here for the 'approved' workflow path. | Approved Data |
|
||||||
| rejected_data | The data when rejected (may be modified by reviewer) | Rejected Data |
|
| rejected_data | Outputs the input data when the reviewer REJECTS it. The value is the actual data itself (not a status string like 'REJECTED'). If the reviewer edited the data, this contains the modified version. Connect downstream blocks here for the 'rejected' workflow path. | Rejected Data |
|
||||||
| review_message | Any message provided by the reviewer | str |
|
| review_message | Optional message provided by the reviewer explaining their decision. Only outputs when the reviewer provides a message; this pin does not fire if no message was given. | str |
|
||||||
|
|
||||||
### Possible use case
|
### Possible use case
|
||||||
<!-- MANUAL: use_case -->
|
<!-- MANUAL: use_case -->
|
||||||
|
|||||||
Reference in New Issue
Block a user