mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-08 13:55:06 -05:00
Compare commits
8 Commits
fix/execut
...
copilot/fi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e45ef6927f | ||
|
|
df3ba7ab47 | ||
|
|
3d1e9eee0d | ||
|
|
622f46a65e | ||
|
|
261deee048 | ||
|
|
353756019c | ||
|
|
6d52e89466 | ||
|
|
fcc7c8e931 |
126
.github/workflows/platform-frontend-ci.yml
vendored
126
.github/workflows/platform-frontend-ci.yml
vendored
@@ -233,3 +233,129 @@ jobs:
|
||||
- name: Print Final Docker Compose logs
|
||||
if: always()
|
||||
run: docker compose -f ../docker-compose.yml logs
|
||||
|
||||
lighthouse:
|
||||
runs-on: ubuntu-latest
|
||||
needs: setup
|
||||
# Only run Lighthouse on main branches to avoid excessive resource usage
|
||||
if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/dev' || github.event_name == 'pull_request'
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Copy default supabase .env
|
||||
run: |
|
||||
cp ../.env.default ../.env
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-lighthouse-${{ hashFiles('autogpt_platform/docker-compose.yml', 'autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/pyproject.toml', 'autogpt_platform/backend/poetry.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-lighthouse-
|
||||
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
NEXT_PUBLIC_PW_TEST=true docker compose -f ../docker-compose.yml up -d
|
||||
env:
|
||||
DOCKER_BUILDKIT: 1
|
||||
BUILDX_CACHE_FROM: type=local,src=/tmp/.buildx-cache
|
||||
BUILDX_CACHE_TO: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
|
||||
- name: Move cache
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
if [ -d "/tmp/.buildx-cache-new" ]; then
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
fi
|
||||
|
||||
- name: Wait for services to be ready
|
||||
run: |
|
||||
echo "Waiting for rest_server to be ready..."
|
||||
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
||||
echo "Waiting for database to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
|
||||
|
||||
- name: Create E2E test data
|
||||
run: |
|
||||
echo "Creating E2E test data..."
|
||||
# First try to run the script from inside the container
|
||||
if docker compose -f ../docker-compose.yml exec -T rest_server test -f /app/autogpt_platform/backend/test/e2e_test_data.py; then
|
||||
echo "✅ Found e2e_test_data.py in container, running it..."
|
||||
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python backend/test/e2e_test_data.py" || {
|
||||
echo "❌ E2E test data creation failed!"
|
||||
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
|
||||
exit 1
|
||||
}
|
||||
else
|
||||
echo "⚠️ e2e_test_data.py not found in container, copying and running..."
|
||||
# Copy the script into the container and run it
|
||||
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.yml ps -q rest_server):/tmp/e2e_test_data.py || {
|
||||
echo "❌ Failed to copy script to container"
|
||||
exit 1
|
||||
}
|
||||
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
|
||||
echo "❌ E2E test data creation failed!"
|
||||
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --no-frozen-lockfile
|
||||
|
||||
- name: Generate API client code
|
||||
run: |
|
||||
echo "Generating API client code from OpenAPI spec..."
|
||||
pnpm run generate:api:force || {
|
||||
echo "⚠️ API generation failed, but continuing with build"
|
||||
}
|
||||
|
||||
- name: Build application
|
||||
run: NEXT_PUBLIC_PW_TEST=true pnpm build
|
||||
|
||||
- name: Wait for frontend to be ready
|
||||
run: |
|
||||
echo "Waiting for frontend container to be ready..."
|
||||
timeout 60 sh -c 'until curl -f http://localhost:3000 2>/dev/null; do sleep 2; done' || {
|
||||
echo "Frontend readiness check timeout"
|
||||
exit 1
|
||||
}
|
||||
|
||||
- name: Run Lighthouse CI
|
||||
run: pnpm lighthouse
|
||||
|
||||
- name: Upload Lighthouse reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: lighthouse-reports
|
||||
path: lhci_reports/
|
||||
|
||||
- name: Print Final Docker Compose logs
|
||||
if: always()
|
||||
run: docker compose -f ../docker-compose.yml logs
|
||||
|
||||
5
autogpt_platform/frontend/.gitignore
vendored
5
autogpt_platform/frontend/.gitignore
vendored
@@ -57,4 +57,7 @@ storybook-static
|
||||
.cursorrules
|
||||
|
||||
# Generated API files
|
||||
src/app/api/__generated__/
|
||||
src/app/api/__generated__/
|
||||
|
||||
# Lighthouse CI reports
|
||||
lhci_reports/
|
||||
@@ -0,0 +1 @@
|
||||
{ "chromeFlags": " --headless=new" }
|
||||
64
autogpt_platform/frontend/DEVELOPMENT_SETUP.md
Normal file
64
autogpt_platform/frontend/DEVELOPMENT_SETUP.md
Normal file
@@ -0,0 +1,64 @@
|
||||
# Development Setup for Lighthouse CI
|
||||
|
||||
This document outlines the required setup steps to ensure the frontend builds properly with the Lighthouse CI integration.
|
||||
|
||||
## Required Steps Before Building
|
||||
|
||||
### 1. Start Backend Services
|
||||
|
||||
The frontend build requires generated API files that depend on the running backend. Start the services:
|
||||
|
||||
```bash
|
||||
cd autogpt_platform
|
||||
docker compose --profile local up deps --build --detach
|
||||
docker compose up rest_server executor websocket_server database_manager scheduler_server notification_server -d
|
||||
```
|
||||
|
||||
### 2. Generate API Files
|
||||
|
||||
Once the backend is running, generate the TypeScript API client files:
|
||||
|
||||
```bash
|
||||
cd autogpt_platform/frontend
|
||||
pnpm generate:api:force
|
||||
```
|
||||
|
||||
This creates:
|
||||
|
||||
- `src/app/api/__generated__/endpoints/` - API endpoint hooks
|
||||
- `src/app/api/__generated__/models/` - TypeScript type definitions
|
||||
- `src/app/api/openapi.json` - OpenAPI specification
|
||||
|
||||
### 3. Verify Setup
|
||||
|
||||
Test that TypeScript compilation works:
|
||||
|
||||
```bash
|
||||
pnpm types
|
||||
```
|
||||
|
||||
## Why This is Needed
|
||||
|
||||
The frontend application uses auto-generated TypeScript clients that are created from the backend's OpenAPI specification. These files are:
|
||||
|
||||
1. **Not committed to git** (they're in .gitignore)
|
||||
2. **Generated at build time** in CI environments
|
||||
3. **Required for TypeScript compilation** to succeed
|
||||
|
||||
## CI Environment
|
||||
|
||||
In CI, this process is handled automatically by the `generate:api:force` command in the build pipeline. The Docker services are already running as part of the test environment.
|
||||
|
||||
## Common Issues
|
||||
|
||||
- **Module not found errors**: Usually indicates API files weren't generated
|
||||
- **Build failures**: Often resolved by ensuring backend services are healthy
|
||||
- **TypeScript errors**: Check that `pnpm generate:api:force` completed successfully
|
||||
|
||||
## Lighthouse Integration
|
||||
|
||||
With this setup complete, the Lighthouse CI integration will work correctly:
|
||||
|
||||
- Local testing: `pnpm lighthouse:local`
|
||||
- Full audit: `pnpm lighthouse`
|
||||
- CI integration: Automatic via GitHub Actions
|
||||
99
autogpt_platform/frontend/LIGHTHOUSE.md
Normal file
99
autogpt_platform/frontend/LIGHTHOUSE.md
Normal file
@@ -0,0 +1,99 @@
|
||||
# Lighthouse CI Setup
|
||||
|
||||
This document describes the Google Lighthouse CI integration for the AutoGPT Platform frontend.
|
||||
|
||||
## Overview
|
||||
|
||||
Google Lighthouse is automatically run on the CI for every push to `master` or `dev` branches and on pull requests. It audits the application's performance, accessibility, SEO, and best practices.
|
||||
|
||||
## Configuration
|
||||
|
||||
### Main Configuration (`lighthouserc.json`)
|
||||
|
||||
- **URLs audited**: `/`, `/build`, `/login`
|
||||
- **Number of runs**: 3 per URL (for consistent results)
|
||||
- **Thresholds**:
|
||||
- Performance: 70% (warn)
|
||||
- Accessibility: 90% (error)
|
||||
- Best Practices: 85% (warn)
|
||||
- SEO: 85% (warn)
|
||||
- PWA: Disabled
|
||||
|
||||
### Local Testing (`lighthouserc.local.json`)
|
||||
|
||||
- Lower thresholds for development testing
|
||||
- Single run per URL for faster feedback
|
||||
|
||||
## Usage
|
||||
|
||||
### CI Integration
|
||||
|
||||
Lighthouse runs automatically in the `lighthouse` job of the `platform-frontend-ci.yml` workflow. Reports are uploaded as CI artifacts.
|
||||
|
||||
### Local Development
|
||||
|
||||
```bash
|
||||
# Start the development server
|
||||
pnpm dev
|
||||
|
||||
# In another terminal, run Lighthouse (with more lenient thresholds)
|
||||
pnpm lighthouse:local
|
||||
```
|
||||
|
||||
### Production Testing
|
||||
|
||||
```bash
|
||||
# Build and start the application
|
||||
pnpm build && pnpm start
|
||||
|
||||
# In another terminal, run full Lighthouse audit
|
||||
pnpm lighthouse
|
||||
```
|
||||
|
||||
## Reports
|
||||
|
||||
- **CI**: Reports are stored as artifacts and can be downloaded from the GitHub Actions run page
|
||||
- **Local**: Reports are saved to `lhci_reports/` directory
|
||||
- **Format**: Both HTML and JSON reports are generated
|
||||
|
||||
## Customization
|
||||
|
||||
To modify the audit configuration:
|
||||
|
||||
1. **URLs**: Edit the `collect.url` array in `lighthouserc.json`
|
||||
2. **Thresholds**: Adjust the `assert.assertions` values
|
||||
3. **Categories**: Add or remove audit categories as needed
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Frontend not ready**: Ensure the application is fully started before running Lighthouse
|
||||
2. **Memory issues**: Lighthouse can be memory-intensive; ensure sufficient resources
|
||||
3. **Network timeouts**: Check that all services are healthy before running audits
|
||||
|
||||
### Debug Commands
|
||||
|
||||
```bash
|
||||
# Check configuration validity
|
||||
pnpm lhci healthcheck
|
||||
|
||||
# Run only collection (no assertions)
|
||||
pnpm lhci collect
|
||||
|
||||
# Run only assertions on existing reports
|
||||
pnpm lhci assert
|
||||
```
|
||||
|
||||
## Performance Guidelines
|
||||
|
||||
- **Performance scores below 70%** will generate warnings
|
||||
- **Accessibility scores below 90%** will fail the CI
|
||||
- Focus on critical user paths for consistent performance
|
||||
- Consider performance budgets for key metrics
|
||||
|
||||
## Integration with Development Workflow
|
||||
|
||||
1. **Pre-merge**: Lighthouse runs on all PRs to catch regressions
|
||||
2. **Post-merge**: Results on `master`/`dev` establish performance baselines
|
||||
3. **Monitoring**: Track performance trends over time via CI artifacts
|
||||
26
autogpt_platform/frontend/lighthouserc.json
Normal file
26
autogpt_platform/frontend/lighthouserc.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"ci": {
|
||||
"collect": {
|
||||
"url": [
|
||||
"http://localhost:3000/",
|
||||
"http://localhost:3000/build",
|
||||
"http://localhost:3000/login"
|
||||
],
|
||||
"numberOfRuns": 3
|
||||
},
|
||||
"assert": {
|
||||
"assertions": {
|
||||
"categories:performance": ["warn", { "minScore": 0.7 }],
|
||||
"categories:accessibility": ["error", { "minScore": 0.9 }],
|
||||
"categories:best-practices": ["warn", { "minScore": 0.85 }],
|
||||
"categories:seo": ["warn", { "minScore": 0.85 }],
|
||||
"categories:pwa": "off"
|
||||
}
|
||||
},
|
||||
"upload": {
|
||||
"target": "filesystem",
|
||||
"outputDir": "./lhci_reports",
|
||||
"reportFilenamePattern": "%%PATHNAME%%-%%DATETIME%%-report.%%EXTENSION%%"
|
||||
}
|
||||
}
|
||||
}
|
||||
22
autogpt_platform/frontend/lighthouserc.local.json
Normal file
22
autogpt_platform/frontend/lighthouserc.local.json
Normal file
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"ci": {
|
||||
"collect": {
|
||||
"url": ["http://localhost:3000/"],
|
||||
"numberOfRuns": 1
|
||||
},
|
||||
"assert": {
|
||||
"assertions": {
|
||||
"categories:performance": ["warn", { "minScore": 0.6 }],
|
||||
"categories:accessibility": ["warn", { "minScore": 0.8 }],
|
||||
"categories:best-practices": ["warn", { "minScore": 0.7 }],
|
||||
"categories:seo": ["warn", { "minScore": 0.7 }],
|
||||
"categories:pwa": "off"
|
||||
}
|
||||
},
|
||||
"upload": {
|
||||
"target": "filesystem",
|
||||
"outputDir": "./lhci_reports",
|
||||
"reportFilenamePattern": "local-%%PATHNAME%%-%%DATETIME%%-report.%%EXTENSION%%"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -18,6 +18,8 @@
|
||||
"build-storybook": "storybook build",
|
||||
"test-storybook": "test-storybook",
|
||||
"test-storybook:ci": "concurrently -k -s first -n \"SB,TEST\" -c \"magenta,blue\" \"pnpm run build-storybook -- --quiet && npx http-server storybook-static --port 6006 --silent\" \"wait-on tcp:6006 && pnpm run test-storybook\"",
|
||||
"lighthouse": "lhci autorun",
|
||||
"lighthouse:local": "lhci autorun --config=./lighthouserc.local.json",
|
||||
"generate:api": "npx --yes tsx ./scripts/generate-api-queries.ts && orval --config ./orval.config.ts",
|
||||
"generate:api:force": "npx --yes tsx ./scripts/generate-api-queries.ts --force && orval --config ./orval.config.ts"
|
||||
},
|
||||
@@ -107,6 +109,7 @@
|
||||
},
|
||||
"devDependencies": {
|
||||
"@chromatic-com/storybook": "4.1.1",
|
||||
"@lhci/cli": "^0.15.0",
|
||||
"@playwright/test": "1.55.0",
|
||||
"@storybook/addon-a11y": "9.1.5",
|
||||
"@storybook/addon-docs": "9.1.5",
|
||||
|
||||
1709
autogpt_platform/frontend/pnpm-lock.yaml
generated
1709
autogpt_platform/frontend/pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user