mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-12 16:48:06 -05:00
Compare commits
1 Commits
copilot/fi
...
toggle-cor
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
50f39e55f7 |
@@ -3,7 +3,6 @@ name: AutoGPT Platform - Deploy Prod Environment
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: 'read'
|
||||
@@ -18,8 +17,6 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.ref_name || 'master' }}
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
@@ -39,7 +36,7 @@ jobs:
|
||||
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
|
||||
DIRECT_URL: ${{ secrets.BACKEND_DATABASE_URL }}
|
||||
|
||||
|
||||
|
||||
trigger:
|
||||
needs: migrate
|
||||
runs-on: ubuntu-latest
|
||||
@@ -50,5 +47,4 @@ jobs:
|
||||
token: ${{ secrets.DEPLOY_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: build_deploy_prod
|
||||
client-payload: |
|
||||
{"ref": "${{ github.ref_name || 'master' }}", "repository": "${{ github.repository }}"}
|
||||
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "repository": "${{ github.repository }}"}'
|
||||
@@ -5,13 +5,6 @@ on:
|
||||
branches: [ dev ]
|
||||
paths:
|
||||
- 'autogpt_platform/**'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
git_ref:
|
||||
description: 'Git ref (branch/tag) of AutoGPT to deploy'
|
||||
required: true
|
||||
default: 'master'
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: 'read'
|
||||
@@ -26,8 +19,6 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.inputs.git_ref || github.ref_name }}
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
@@ -57,4 +48,4 @@ jobs:
|
||||
token: ${{ secrets.DEPLOY_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: build_deploy_dev
|
||||
client-payload: '{"ref": "${{ github.event.inputs.git_ref || github.ref }}", "repository": "${{ github.repository }}"}'
|
||||
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "repository": "${{ github.repository }}"}'
|
||||
|
||||
113
.github/workflows/platform-container-publish.yml
vendored
113
.github/workflows/platform-container-publish.yml
vendored
@@ -1,113 +0,0 @@
|
||||
name: Platform - Container Publishing
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
no_cache:
|
||||
type: boolean
|
||||
description: 'Build from scratch, without using cached layers'
|
||||
default: false
|
||||
registry:
|
||||
type: choice
|
||||
description: 'Container registry to publish to'
|
||||
options:
|
||||
- 'both'
|
||||
- 'ghcr'
|
||||
- 'dockerhub'
|
||||
default: 'both'
|
||||
|
||||
env:
|
||||
GHCR_REGISTRY: ghcr.io
|
||||
GHCR_IMAGE_BASE: ${{ github.repository_owner }}/autogpt-platform
|
||||
DOCKERHUB_IMAGE_BASE: ${{ secrets.DOCKER_USER }}/autogpt-platform
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
build-and-publish:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
component: [backend, frontend]
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to GitHub Container Registry
|
||||
if: inputs.registry == 'both' || inputs.registry == 'ghcr' || github.event_name == 'release'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.GHCR_REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
if: (inputs.registry == 'both' || inputs.registry == 'dockerhub' || github.event_name == 'release') && secrets.DOCKER_USER
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Extract metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
${{ env.GHCR_REGISTRY }}/${{ env.GHCR_IMAGE_BASE }}-${{ matrix.component }}
|
||||
${{ secrets.DOCKER_USER && format('{0}-{1}', env.DOCKERHUB_IMAGE_BASE, matrix.component) || '' }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=pr
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
|
||||
- name: Set build context and dockerfile for backend
|
||||
if: matrix.component == 'backend'
|
||||
run: |
|
||||
echo "BUILD_CONTEXT=." >> $GITHUB_ENV
|
||||
echo "DOCKERFILE=autogpt_platform/backend/Dockerfile" >> $GITHUB_ENV
|
||||
echo "BUILD_TARGET=server" >> $GITHUB_ENV
|
||||
|
||||
- name: Set build context and dockerfile for frontend
|
||||
if: matrix.component == 'frontend'
|
||||
run: |
|
||||
echo "BUILD_CONTEXT=." >> $GITHUB_ENV
|
||||
echo "DOCKERFILE=autogpt_platform/frontend/Dockerfile" >> $GITHUB_ENV
|
||||
echo "BUILD_TARGET=prod" >> $GITHUB_ENV
|
||||
|
||||
- name: Build and push container image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: ${{ env.BUILD_CONTEXT }}
|
||||
file: ${{ env.DOCKERFILE }}
|
||||
target: ${{ env.BUILD_TARGET }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=platform-${{ matrix.component }}
|
||||
cache-to: type=gha,scope=platform-${{ matrix.component }},mode=max
|
||||
|
||||
- name: Generate build summary
|
||||
run: |
|
||||
echo "## 🐳 Container Build Summary" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Component:** ${{ matrix.component }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Registry:** ${{ inputs.registry || 'both' }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Tags:** ${{ steps.meta.outputs.tags }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### Images Published:" >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
echo "${{ steps.meta.outputs.tags }}" | sed 's/,/\n/g' >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
@@ -1,389 +0,0 @@
|
||||
# AutoGPT Platform Container Publishing
|
||||
|
||||
This document describes the container publishing infrastructure and deployment options for the AutoGPT Platform.
|
||||
|
||||
## Published Container Images
|
||||
|
||||
### GitHub Container Registry (GHCR) - Recommended
|
||||
|
||||
- **Backend**: `ghcr.io/significant-gravitas/autogpt-platform-backend`
|
||||
- **Frontend**: `ghcr.io/significant-gravitas/autogpt-platform-frontend`
|
||||
|
||||
### Docker Hub
|
||||
|
||||
- **Backend**: `significantgravitas/autogpt-platform-backend`
|
||||
- **Frontend**: `significantgravitas/autogpt-platform-frontend`
|
||||
|
||||
## Available Tags
|
||||
|
||||
- `latest` - Latest stable release from master branch
|
||||
- `v1.0.0`, `v1.1.0`, etc. - Specific version releases
|
||||
- `main` - Latest development build (use with caution)
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Using Docker Compose (Recommended)
|
||||
|
||||
```bash
|
||||
# Clone the repository (or just download the compose file)
|
||||
git clone https://github.com/Significant-Gravitas/AutoGPT.git
|
||||
cd AutoGPT/autogpt_platform
|
||||
|
||||
# Deploy with published images
|
||||
./deploy.sh deploy
|
||||
```
|
||||
|
||||
### Manual Docker Run
|
||||
|
||||
```bash
|
||||
# Start dependencies first
|
||||
docker network create autogpt
|
||||
|
||||
# PostgreSQL
|
||||
docker run -d --name postgres --network autogpt \
|
||||
-e POSTGRES_DB=autogpt \
|
||||
-e POSTGRES_USER=autogpt \
|
||||
-e POSTGRES_PASSWORD=password \
|
||||
-v postgres_data:/var/lib/postgresql/data \
|
||||
postgres:15
|
||||
|
||||
# Redis
|
||||
docker run -d --name redis --network autogpt \
|
||||
-v redis_data:/data \
|
||||
redis:7-alpine redis-server --requirepass password
|
||||
|
||||
# RabbitMQ
|
||||
docker run -d --name rabbitmq --network autogpt \
|
||||
-e RABBITMQ_DEFAULT_USER=autogpt \
|
||||
-e RABBITMQ_DEFAULT_PASS=password \
|
||||
-p 15672:15672 \
|
||||
rabbitmq:3-management
|
||||
|
||||
# Backend
|
||||
docker run -d --name backend --network autogpt \
|
||||
-p 8000:8000 \
|
||||
-e DATABASE_URL=postgresql://autogpt:password@postgres:5432/autogpt \
|
||||
-e REDIS_HOST=redis \
|
||||
-e RABBITMQ_HOST=rabbitmq \
|
||||
ghcr.io/significant-gravitas/autogpt-platform-backend:latest
|
||||
|
||||
# Frontend
|
||||
docker run -d --name frontend --network autogpt \
|
||||
-p 3000:3000 \
|
||||
-e AGPT_SERVER_URL=http://localhost:8000/api \
|
||||
ghcr.io/significant-gravitas/autogpt-platform-frontend:latest
|
||||
```
|
||||
|
||||
## Deployment Scripts
|
||||
|
||||
### Deploy Script
|
||||
|
||||
The included `deploy.sh` script provides a complete deployment solution:
|
||||
|
||||
```bash
|
||||
# Basic deployment
|
||||
./deploy.sh deploy
|
||||
|
||||
# Deploy specific version
|
||||
./deploy.sh -v v1.0.0 deploy
|
||||
|
||||
# Deploy from Docker Hub
|
||||
./deploy.sh -r docker.io deploy
|
||||
|
||||
# Production deployment
|
||||
./deploy.sh -p production deploy
|
||||
|
||||
# Other operations
|
||||
./deploy.sh start # Start services
|
||||
./deploy.sh stop # Stop services
|
||||
./deploy.sh restart # Restart services
|
||||
./deploy.sh update # Update to latest
|
||||
./deploy.sh backup # Create backup
|
||||
./deploy.sh status # Show status
|
||||
./deploy.sh logs # Show logs
|
||||
./deploy.sh cleanup # Remove everything
|
||||
```
|
||||
|
||||
## Platform-Specific Deployment Guides
|
||||
|
||||
### Unraid
|
||||
|
||||
See [Unraid Deployment Guide](../docs/content/platform/deployment/unraid.md)
|
||||
|
||||
Key features:
|
||||
- Community Applications template
|
||||
- Web UI management
|
||||
- Automatic updates
|
||||
- Built-in backup system
|
||||
|
||||
### Home Assistant Add-on
|
||||
|
||||
See [Home Assistant Add-on Guide](../docs/content/platform/deployment/home-assistant.md)
|
||||
|
||||
Key features:
|
||||
- Native Home Assistant integration
|
||||
- Automation services
|
||||
- Entity monitoring
|
||||
- Backup integration
|
||||
|
||||
### Kubernetes
|
||||
|
||||
See [Kubernetes Deployment Guide](../docs/content/platform/deployment/kubernetes.md)
|
||||
|
||||
Key features:
|
||||
- Helm charts
|
||||
- Horizontal scaling
|
||||
- Health checks
|
||||
- Persistent volumes
|
||||
|
||||
## Container Architecture
|
||||
|
||||
### Backend Container
|
||||
|
||||
- **Base Image**: `debian:13-slim`
|
||||
- **Runtime**: Python 3.13 with Poetry
|
||||
- **Services**: REST API, WebSocket, Executor, Scheduler, Database Manager, Notification
|
||||
- **Ports**: 8000-8007 (depending on service)
|
||||
- **Health Check**: `GET /health`
|
||||
|
||||
### Frontend Container
|
||||
|
||||
- **Base Image**: `node:21-alpine`
|
||||
- **Runtime**: Next.js production build
|
||||
- **Port**: 3000
|
||||
- **Health Check**: HTTP 200 on root path
|
||||
|
||||
## Environment Configuration
|
||||
|
||||
### Required Environment Variables
|
||||
|
||||
#### Backend
|
||||
```env
|
||||
DATABASE_URL=postgresql://user:pass@host:5432/db
|
||||
REDIS_HOST=redis
|
||||
RABBITMQ_HOST=rabbitmq
|
||||
JWT_SECRET=your-secret-key
|
||||
```
|
||||
|
||||
#### Frontend
|
||||
```env
|
||||
AGPT_SERVER_URL=http://backend:8000/api
|
||||
SUPABASE_URL=http://auth:8000
|
||||
```
|
||||
|
||||
### Optional Configuration
|
||||
|
||||
```env
|
||||
# Logging
|
||||
LOG_LEVEL=INFO
|
||||
ENABLE_DEBUG=false
|
||||
|
||||
# Performance
|
||||
REDIS_PASSWORD=your-redis-password
|
||||
RABBITMQ_PASSWORD=your-rabbitmq-password
|
||||
|
||||
# Security
|
||||
CORS_ORIGINS=http://localhost:3000
|
||||
```
|
||||
|
||||
## CI/CD Pipeline
|
||||
|
||||
### GitHub Actions Workflow
|
||||
|
||||
The publishing workflow (`.github/workflows/platform-container-publish.yml`) automatically:
|
||||
|
||||
1. **Triggers** on releases and manual dispatch
|
||||
2. **Builds** both backend and frontend containers
|
||||
3. **Tests** container functionality
|
||||
4. **Publishes** to both GHCR and Docker Hub
|
||||
5. **Tags** with version and latest
|
||||
|
||||
### Manual Publishing
|
||||
|
||||
```bash
|
||||
# Build and tag locally
|
||||
docker build -t ghcr.io/significant-gravitas/autogpt-platform-backend:latest \
|
||||
-f autogpt_platform/backend/Dockerfile \
|
||||
--target server .
|
||||
|
||||
docker build -t ghcr.io/significant-gravitas/autogpt-platform-frontend:latest \
|
||||
-f autogpt_platform/frontend/Dockerfile \
|
||||
--target prod .
|
||||
|
||||
# Push to registry
|
||||
docker push ghcr.io/significant-gravitas/autogpt-platform-backend:latest
|
||||
docker push ghcr.io/significant-gravitas/autogpt-platform-frontend:latest
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Container Security
|
||||
|
||||
1. **Non-root users** - Containers run as non-root
|
||||
2. **Minimal base images** - Using slim/alpine images
|
||||
3. **No secrets in images** - All secrets via environment variables
|
||||
4. **Read-only filesystem** - Where possible
|
||||
5. **Resource limits** - CPU and memory limits set
|
||||
|
||||
### Deployment Security
|
||||
|
||||
1. **Network isolation** - Use dedicated networks
|
||||
2. **TLS encryption** - Enable HTTPS in production
|
||||
3. **Secret management** - Use Docker secrets or external secret stores
|
||||
4. **Regular updates** - Keep images updated
|
||||
5. **Vulnerability scanning** - Regular security scans
|
||||
|
||||
## Monitoring
|
||||
|
||||
### Health Checks
|
||||
|
||||
All containers include health checks:
|
||||
|
||||
```bash
|
||||
# Check container health
|
||||
docker inspect --format='{{.State.Health.Status}}' container_name
|
||||
|
||||
# Manual health check
|
||||
curl http://localhost:8000/health
|
||||
```
|
||||
|
||||
### Metrics
|
||||
|
||||
The backend exposes Prometheus metrics at `/metrics`:
|
||||
|
||||
```bash
|
||||
curl http://localhost:8000/metrics
|
||||
```
|
||||
|
||||
### Logging
|
||||
|
||||
Containers log to stdout/stderr for easy aggregation:
|
||||
|
||||
```bash
|
||||
# View logs
|
||||
docker logs container_name
|
||||
|
||||
# Follow logs
|
||||
docker logs -f container_name
|
||||
|
||||
# Aggregate logs
|
||||
docker compose logs -f
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Container won't start**
|
||||
```bash
|
||||
# Check logs
|
||||
docker logs container_name
|
||||
|
||||
# Check environment
|
||||
docker exec container_name env
|
||||
```
|
||||
|
||||
2. **Database connection failed**
|
||||
```bash
|
||||
# Test connectivity
|
||||
docker exec backend ping postgres
|
||||
|
||||
# Check database status
|
||||
docker exec postgres pg_isready
|
||||
```
|
||||
|
||||
3. **Port conflicts**
|
||||
```bash
|
||||
# Check port usage
|
||||
ss -tuln | grep :3000
|
||||
|
||||
# Use different ports
|
||||
docker run -p 3001:3000 ...
|
||||
```
|
||||
|
||||
### Debug Mode
|
||||
|
||||
Enable debug mode for detailed logging:
|
||||
|
||||
```env
|
||||
LOG_LEVEL=DEBUG
|
||||
ENABLE_DEBUG=true
|
||||
```
|
||||
|
||||
## Performance Optimization
|
||||
|
||||
### Resource Limits
|
||||
|
||||
```yaml
|
||||
# Docker Compose
|
||||
services:
|
||||
backend:
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 2G
|
||||
cpus: '1.0'
|
||||
reservations:
|
||||
memory: 1G
|
||||
cpus: '0.5'
|
||||
```
|
||||
|
||||
### Scaling
|
||||
|
||||
```bash
|
||||
# Scale backend services
|
||||
docker compose up -d --scale backend=3
|
||||
|
||||
# Or use Docker Swarm
|
||||
docker service scale backend=3
|
||||
```
|
||||
|
||||
## Backup and Recovery
|
||||
|
||||
### Data Backup
|
||||
|
||||
```bash
|
||||
# Database backup
|
||||
docker exec postgres pg_dump -U autogpt autogpt > backup.sql
|
||||
|
||||
# Volume backup
|
||||
docker run --rm -v postgres_data:/data -v $(pwd):/backup \
|
||||
alpine tar czf /backup/postgres_backup.tar.gz /data
|
||||
```
|
||||
|
||||
### Restore
|
||||
|
||||
```bash
|
||||
# Database restore
|
||||
docker exec -i postgres psql -U autogpt autogpt < backup.sql
|
||||
|
||||
# Volume restore
|
||||
docker run --rm -v postgres_data:/data -v $(pwd):/backup \
|
||||
alpine tar xzf /backup/postgres_backup.tar.gz -C /
|
||||
```
|
||||
|
||||
## Support
|
||||
|
||||
- **Documentation**: [Platform Docs](../docs/content/platform/)
|
||||
- **Issues**: [GitHub Issues](https://github.com/Significant-Gravitas/AutoGPT/issues)
|
||||
- **Discord**: [AutoGPT Community](https://discord.gg/autogpt)
|
||||
- **Docker Hub**: [Container Registry](https://hub.docker.com/r/significantgravitas/)
|
||||
|
||||
## Contributing
|
||||
|
||||
To contribute to the container infrastructure:
|
||||
|
||||
1. **Test locally** with `docker build` and `docker run`
|
||||
2. **Update documentation** if making changes
|
||||
3. **Test deployment scripts** on your platform
|
||||
4. **Submit PR** with clear description of changes
|
||||
|
||||
## Roadmap
|
||||
|
||||
- [ ] ARM64 support for Apple Silicon
|
||||
- [ ] Helm charts for Kubernetes
|
||||
- [ ] Official Unraid template
|
||||
- [ ] Home Assistant Add-on store submission
|
||||
- [ ] Multi-stage builds optimization
|
||||
- [ ] Security scanning integration
|
||||
- [ ] Performance benchmarking
|
||||
@@ -2,38 +2,16 @@
|
||||
|
||||
Welcome to the AutoGPT Platform - a powerful system for creating and running AI agents to solve business problems. This platform enables you to harness the power of artificial intelligence to automate tasks, analyze data, and generate insights for your organization.
|
||||
|
||||
## Deployment Options
|
||||
|
||||
### Quick Deploy with Published Containers (Recommended)
|
||||
|
||||
The fastest way to get started is using our pre-built containers:
|
||||
|
||||
```bash
|
||||
# Download and run with published images
|
||||
curl -fsSL https://raw.githubusercontent.com/Significant-Gravitas/AutoGPT/master/autogpt_platform/deploy.sh -o deploy.sh
|
||||
chmod +x deploy.sh
|
||||
./deploy.sh deploy
|
||||
```
|
||||
|
||||
Access the platform at http://localhost:3000 after deployment completes.
|
||||
|
||||
### Platform-Specific Deployments
|
||||
|
||||
- **Unraid**: [Deployment Guide](../docs/content/platform/deployment/unraid.md)
|
||||
- **Home Assistant**: [Add-on Guide](../docs/content/platform/deployment/home-assistant.md)
|
||||
- **Kubernetes**: [K8s Deployment](../docs/content/platform/deployment/kubernetes.md)
|
||||
- **General Containers**: [Container Guide](../docs/content/platform/container-deployment.md)
|
||||
|
||||
## Development Setup
|
||||
## Getting Started
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Docker
|
||||
- Docker Compose V2 (comes with Docker Desktop, or can be installed separately)
|
||||
|
||||
### Running from Source
|
||||
### Running the System
|
||||
|
||||
To run the AutoGPT Platform from source for development:
|
||||
To run the AutoGPT Platform, follow these steps:
|
||||
|
||||
1. Clone this repository to your local machine and navigate to the `autogpt_platform` directory within the repository:
|
||||
|
||||
@@ -179,28 +157,3 @@ If you need to update the API client after making changes to the backend API:
|
||||
```
|
||||
|
||||
This will fetch the latest OpenAPI specification and regenerate the TypeScript client code.
|
||||
|
||||
## Container Deployment
|
||||
|
||||
For production deployments and specific platforms, see our container deployment guides:
|
||||
|
||||
- **[Container Deployment Overview](CONTAINERS.md)** - Complete guide to using published containers
|
||||
- **[Deployment Script](deploy.sh)** - Automated deployment and management tool
|
||||
- **[Published Images](docker-compose.published.yml)** - Docker Compose for published containers
|
||||
|
||||
### Published Container Images
|
||||
|
||||
- **Backend**: `ghcr.io/significant-gravitas/autogpt-platform-backend:latest`
|
||||
- **Frontend**: `ghcr.io/significant-gravitas/autogpt-platform-frontend:latest`
|
||||
|
||||
### Quick Production Deployment
|
||||
|
||||
```bash
|
||||
# Deploy with published containers
|
||||
./deploy.sh deploy
|
||||
|
||||
# Or use the published compose file directly
|
||||
docker compose -f docker-compose.published.yml up -d
|
||||
```
|
||||
|
||||
For detailed deployment instructions, troubleshooting, and platform-specific guides, see the [Container Documentation](CONTAINERS.md).
|
||||
|
||||
@@ -241,7 +241,6 @@ class AirtableCreateRecordsBlock(Block):
|
||||
|
||||
class Output(BlockSchema):
|
||||
records: list[dict] = SchemaField(description="Array of created record objects")
|
||||
details: dict = SchemaField(description="Details of the created records")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -280,9 +279,6 @@ class AirtableCreateRecordsBlock(Block):
|
||||
result_records = normalized_data["records"]
|
||||
|
||||
yield "records", result_records
|
||||
details = data.get("details", None)
|
||||
if details:
|
||||
yield "details", details
|
||||
|
||||
|
||||
class AirtableUpdateRecordsBlock(Block):
|
||||
|
||||
@@ -896,7 +896,6 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
prompt = [json.to_dict(p) for p in input_data.conversation_history]
|
||||
|
||||
def trim_prompt(s: str) -> str:
|
||||
"""Removes indentation up to and including `|` from a multi-line prompt."""
|
||||
lines = s.strip().split("\n")
|
||||
return "\n".join([line.strip().lstrip("|") for line in lines])
|
||||
|
||||
@@ -910,25 +909,24 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
|
||||
if input_data.expected_format:
|
||||
expected_format = [
|
||||
f"{json.dumps(k)}: {json.dumps(v)}"
|
||||
for k, v in input_data.expected_format.items()
|
||||
f'"{k}": "{v}"' for k, v in input_data.expected_format.items()
|
||||
]
|
||||
if input_data.list_result:
|
||||
format_prompt = (
|
||||
f'"results": [\n {{\n {", ".join(expected_format)}\n }}\n]'
|
||||
)
|
||||
else:
|
||||
format_prompt = ",\n| ".join(expected_format)
|
||||
format_prompt = "\n ".join(expected_format)
|
||||
|
||||
sys_prompt = trim_prompt(
|
||||
f"""
|
||||
|Reply with pure JSON strictly following this JSON format:
|
||||
|{{
|
||||
| {format_prompt}
|
||||
|}}
|
||||
|
|
||||
|Ensure the response is valid JSON. DO NOT include any additional text (e.g. markdown code block fences) outside of the JSON.
|
||||
|If you cannot provide all the keys, provide an empty string for the values you cannot answer.
|
||||
|Reply strictly only in the following JSON format:
|
||||
|{{
|
||||
| {format_prompt}
|
||||
|}}
|
||||
|
|
||||
|Ensure the response is valid JSON. Do not include any additional text outside of the JSON.
|
||||
|If you cannot provide all the keys, provide an empty string for the values you cannot answer.
|
||||
"""
|
||||
)
|
||||
prompt.append({"role": "system", "content": sys_prompt})
|
||||
@@ -948,7 +946,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
return f"JSON decode error: {e}"
|
||||
|
||||
logger.debug(f"LLM request: {prompt}")
|
||||
error_feedback_message = ""
|
||||
retry_prompt = ""
|
||||
llm_model = input_data.model
|
||||
|
||||
for retry_count in range(input_data.retry):
|
||||
@@ -972,25 +970,8 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
logger.debug(f"LLM attempt-{retry_count} response: {response_text}")
|
||||
|
||||
if input_data.expected_format:
|
||||
try:
|
||||
response_obj = json.loads(response_text)
|
||||
except JSONDecodeError as json_error:
|
||||
prompt.append({"role": "assistant", "content": response_text})
|
||||
|
||||
indented_json_error = str(json_error).replace("\n", "\n|")
|
||||
error_feedback_message = trim_prompt(
|
||||
f"""
|
||||
|Your previous response could not be parsed as valid JSON:
|
||||
|
|
||||
|{indented_json_error}
|
||||
|
|
||||
|Please provide a valid JSON response that matches the expected format.
|
||||
"""
|
||||
)
|
||||
prompt.append(
|
||||
{"role": "user", "content": error_feedback_message}
|
||||
)
|
||||
continue
|
||||
response_obj = json.loads(response_text)
|
||||
|
||||
if input_data.list_result and isinstance(response_obj, dict):
|
||||
if "results" in response_obj:
|
||||
@@ -998,7 +979,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
elif len(response_obj) == 1:
|
||||
response_obj = list(response_obj.values())
|
||||
|
||||
validation_errors = "\n".join(
|
||||
response_error = "\n".join(
|
||||
[
|
||||
validation_error
|
||||
for response_item in (
|
||||
@@ -1010,7 +991,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
]
|
||||
)
|
||||
|
||||
if not validation_errors:
|
||||
if not response_error:
|
||||
self.merge_stats(
|
||||
NodeExecutionStats(
|
||||
llm_call_count=retry_count + 1,
|
||||
@@ -1020,16 +1001,6 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
yield "response", response_obj
|
||||
yield "prompt", self.prompt
|
||||
return
|
||||
|
||||
prompt.append({"role": "assistant", "content": response_text})
|
||||
error_feedback_message = trim_prompt(
|
||||
f"""
|
||||
|Your response did not match the expected format:
|
||||
|
|
||||
|{validation_errors}
|
||||
"""
|
||||
)
|
||||
prompt.append({"role": "user", "content": error_feedback_message})
|
||||
else:
|
||||
self.merge_stats(
|
||||
NodeExecutionStats(
|
||||
@@ -1040,6 +1011,21 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
yield "response", {"response": response_text}
|
||||
yield "prompt", self.prompt
|
||||
return
|
||||
|
||||
retry_prompt = trim_prompt(
|
||||
f"""
|
||||
|This is your previous error response:
|
||||
|--
|
||||
|{response_text}
|
||||
|--
|
||||
|
|
||||
|And this is the error:
|
||||
|--
|
||||
|{response_error}
|
||||
|--
|
||||
"""
|
||||
)
|
||||
prompt.append({"role": "user", "content": retry_prompt})
|
||||
except Exception as e:
|
||||
logger.exception(f"Error calling LLM: {e}")
|
||||
if (
|
||||
@@ -1052,12 +1038,9 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
logger.debug(
|
||||
f"Reducing max_tokens to {input_data.max_tokens} for next attempt"
|
||||
)
|
||||
# Don't add retry prompt for token limit errors,
|
||||
# just retry with lower maximum output tokens
|
||||
retry_prompt = f"Error calling LLM: {e}"
|
||||
|
||||
error_feedback_message = f"Error calling LLM: {e}"
|
||||
|
||||
raise RuntimeError(error_feedback_message)
|
||||
raise RuntimeError(retry_prompt)
|
||||
|
||||
|
||||
class AITextGeneratorBlock(AIBlockBase):
|
||||
|
||||
@@ -92,31 +92,6 @@ ExecutionStatus = AgentExecutionStatus
|
||||
NodeInputMask = Mapping[str, JsonValue]
|
||||
NodesInputMasks = Mapping[str, NodeInputMask]
|
||||
|
||||
# dest: source
|
||||
VALID_STATUS_TRANSITIONS = {
|
||||
ExecutionStatus.QUEUED: [
|
||||
ExecutionStatus.INCOMPLETE,
|
||||
],
|
||||
ExecutionStatus.RUNNING: [
|
||||
ExecutionStatus.INCOMPLETE,
|
||||
ExecutionStatus.QUEUED,
|
||||
ExecutionStatus.TERMINATED, # For resuming halted execution
|
||||
],
|
||||
ExecutionStatus.COMPLETED: [
|
||||
ExecutionStatus.RUNNING,
|
||||
],
|
||||
ExecutionStatus.FAILED: [
|
||||
ExecutionStatus.INCOMPLETE,
|
||||
ExecutionStatus.QUEUED,
|
||||
ExecutionStatus.RUNNING,
|
||||
],
|
||||
ExecutionStatus.TERMINATED: [
|
||||
ExecutionStatus.INCOMPLETE,
|
||||
ExecutionStatus.QUEUED,
|
||||
ExecutionStatus.RUNNING,
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
class GraphExecutionMeta(BaseDbModel):
|
||||
id: str # type: ignore # Override base class to make this required
|
||||
@@ -130,8 +105,6 @@ class GraphExecutionMeta(BaseDbModel):
|
||||
status: ExecutionStatus
|
||||
started_at: datetime
|
||||
ended_at: datetime
|
||||
is_shared: bool = False
|
||||
share_token: Optional[str] = None
|
||||
|
||||
class Stats(BaseModel):
|
||||
model_config = ConfigDict(
|
||||
@@ -248,8 +221,6 @@ class GraphExecutionMeta(BaseDbModel):
|
||||
if stats
|
||||
else None
|
||||
),
|
||||
is_shared=_graph_exec.isShared,
|
||||
share_token=_graph_exec.shareToken,
|
||||
)
|
||||
|
||||
|
||||
@@ -609,7 +580,7 @@ async def create_graph_execution(
|
||||
data={
|
||||
"agentGraphId": graph_id,
|
||||
"agentGraphVersion": graph_version,
|
||||
"executionStatus": ExecutionStatus.INCOMPLETE,
|
||||
"executionStatus": ExecutionStatus.QUEUED,
|
||||
"inputs": SafeJson(inputs),
|
||||
"credentialInputs": (
|
||||
SafeJson(credential_inputs) if credential_inputs else Json({})
|
||||
@@ -756,11 +727,6 @@ async def update_graph_execution_stats(
|
||||
status: ExecutionStatus | None = None,
|
||||
stats: GraphExecutionStats | None = None,
|
||||
) -> GraphExecution | None:
|
||||
if not status and not stats:
|
||||
raise ValueError(
|
||||
f"Must provide either status or stats to update for execution {graph_exec_id}"
|
||||
)
|
||||
|
||||
update_data: AgentGraphExecutionUpdateManyMutationInput = {}
|
||||
|
||||
if stats:
|
||||
@@ -772,25 +738,20 @@ async def update_graph_execution_stats(
|
||||
if status:
|
||||
update_data["executionStatus"] = status
|
||||
|
||||
where_clause: AgentGraphExecutionWhereInput = {"id": graph_exec_id}
|
||||
|
||||
if status:
|
||||
if allowed_from := VALID_STATUS_TRANSITIONS.get(status, []):
|
||||
# Add OR clause to check if current status is one of the allowed source statuses
|
||||
where_clause["AND"] = [
|
||||
{"id": graph_exec_id},
|
||||
{"OR": [{"executionStatus": s} for s in allowed_from]},
|
||||
]
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Status {status} cannot be set via update for execution {graph_exec_id}. "
|
||||
f"This status can only be set at creation or is not a valid target status."
|
||||
)
|
||||
|
||||
await AgentGraphExecution.prisma().update_many(
|
||||
where=where_clause,
|
||||
updated_count = await AgentGraphExecution.prisma().update_many(
|
||||
where={
|
||||
"id": graph_exec_id,
|
||||
"OR": [
|
||||
{"executionStatus": ExecutionStatus.RUNNING},
|
||||
{"executionStatus": ExecutionStatus.QUEUED},
|
||||
# Terminated graph can be resumed.
|
||||
{"executionStatus": ExecutionStatus.TERMINATED},
|
||||
],
|
||||
},
|
||||
data=update_data,
|
||||
)
|
||||
if updated_count == 0:
|
||||
return None
|
||||
|
||||
graph_exec = await AgentGraphExecution.prisma().find_unique_or_raise(
|
||||
where={"id": graph_exec_id},
|
||||
@@ -798,7 +759,6 @@ async def update_graph_execution_stats(
|
||||
[*get_io_block_ids(), *get_webhook_block_ids()]
|
||||
),
|
||||
)
|
||||
|
||||
return GraphExecution.from_db(graph_exec)
|
||||
|
||||
|
||||
@@ -1025,18 +985,6 @@ class NodeExecutionEvent(NodeExecutionResult):
|
||||
)
|
||||
|
||||
|
||||
class SharedExecutionResponse(BaseModel):
|
||||
"""Public-safe response for shared executions"""
|
||||
|
||||
id: str
|
||||
graph_name: str
|
||||
graph_description: Optional[str]
|
||||
status: ExecutionStatus
|
||||
created_at: datetime
|
||||
outputs: CompletedBlockOutput # Only the final outputs, no intermediate data
|
||||
# Deliberately exclude: user_id, inputs, credentials, node details
|
||||
|
||||
|
||||
ExecutionEvent = Annotated[
|
||||
GraphExecutionEvent | NodeExecutionEvent, Field(discriminator="event_type")
|
||||
]
|
||||
@@ -1214,98 +1162,3 @@ async def get_block_error_stats(
|
||||
)
|
||||
for row in result
|
||||
]
|
||||
|
||||
|
||||
async def update_graph_execution_share_status(
|
||||
execution_id: str,
|
||||
user_id: str,
|
||||
is_shared: bool,
|
||||
share_token: str | None,
|
||||
shared_at: datetime | None,
|
||||
) -> None:
|
||||
"""Update the sharing status of a graph execution."""
|
||||
await AgentGraphExecution.prisma().update(
|
||||
where={"id": execution_id},
|
||||
data={
|
||||
"isShared": is_shared,
|
||||
"shareToken": share_token,
|
||||
"sharedAt": shared_at,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
async def get_graph_execution_by_share_token(
|
||||
share_token: str,
|
||||
) -> SharedExecutionResponse | None:
|
||||
"""Get a shared execution with limited public-safe data."""
|
||||
execution = await AgentGraphExecution.prisma().find_first(
|
||||
where={
|
||||
"shareToken": share_token,
|
||||
"isShared": True,
|
||||
"isDeleted": False,
|
||||
},
|
||||
include={
|
||||
"AgentGraph": True,
|
||||
"NodeExecutions": {
|
||||
"include": {
|
||||
"Output": True,
|
||||
"Node": {
|
||||
"include": {
|
||||
"AgentBlock": True,
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
if not execution:
|
||||
return None
|
||||
|
||||
# Extract outputs from OUTPUT blocks only (consistent with GraphExecution.from_db)
|
||||
outputs: CompletedBlockOutput = defaultdict(list)
|
||||
if execution.NodeExecutions:
|
||||
for node_exec in execution.NodeExecutions:
|
||||
if node_exec.Node and node_exec.Node.agentBlockId:
|
||||
# Get the block definition to check its type
|
||||
block = get_block(node_exec.Node.agentBlockId)
|
||||
|
||||
if block and block.block_type == BlockType.OUTPUT:
|
||||
# For OUTPUT blocks, the data is stored in executionData or Input
|
||||
# The executionData contains the structured input with 'name' and 'value' fields
|
||||
if hasattr(node_exec, "executionData") and node_exec.executionData:
|
||||
exec_data = type_utils.convert(
|
||||
node_exec.executionData, dict[str, Any]
|
||||
)
|
||||
if "name" in exec_data:
|
||||
name = exec_data["name"]
|
||||
value = exec_data.get("value")
|
||||
outputs[name].append(value)
|
||||
elif node_exec.Input:
|
||||
# Build input_data from Input relation
|
||||
input_data = {}
|
||||
for data in node_exec.Input:
|
||||
if data.name and data.data is not None:
|
||||
input_data[data.name] = type_utils.convert(
|
||||
data.data, JsonValue
|
||||
)
|
||||
|
||||
if "name" in input_data:
|
||||
name = input_data["name"]
|
||||
value = input_data.get("value")
|
||||
outputs[name].append(value)
|
||||
|
||||
return SharedExecutionResponse(
|
||||
id=execution.id,
|
||||
graph_name=(
|
||||
execution.AgentGraph.name
|
||||
if (execution.AgentGraph and execution.AgentGraph.name)
|
||||
else "Untitled Agent"
|
||||
),
|
||||
graph_description=(
|
||||
execution.AgentGraph.description if execution.AgentGraph else None
|
||||
),
|
||||
status=ExecutionStatus(execution.executionStatus),
|
||||
created_at=execution.createdAt,
|
||||
outputs=outputs,
|
||||
)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import logging
|
||||
import uuid
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timezone
|
||||
from typing import TYPE_CHECKING, Any, Literal, Optional, cast
|
||||
|
||||
from prisma.enums import SubmissionStatus
|
||||
@@ -161,7 +160,6 @@ class BaseGraph(BaseDbModel):
|
||||
is_active: bool = True
|
||||
name: str
|
||||
description: str
|
||||
instructions: str | None = None
|
||||
recommended_schedule_cron: str | None = None
|
||||
nodes: list[Node] = []
|
||||
links: list[Link] = []
|
||||
@@ -383,8 +381,6 @@ class GraphModel(Graph):
|
||||
user_id: str
|
||||
nodes: list[NodeModel] = [] # type: ignore
|
||||
|
||||
created_at: datetime
|
||||
|
||||
@property
|
||||
def starting_nodes(self) -> list[NodeModel]:
|
||||
outbound_nodes = {link.sink_id for link in self.links}
|
||||
@@ -397,10 +393,6 @@ class GraphModel(Graph):
|
||||
if node.id not in outbound_nodes or node.id in input_nodes
|
||||
]
|
||||
|
||||
@property
|
||||
def webhook_input_node(self) -> NodeModel | None: # type: ignore
|
||||
return cast(NodeModel, super().webhook_input_node)
|
||||
|
||||
def meta(self) -> "GraphMeta":
|
||||
"""
|
||||
Returns a GraphMeta object with metadata about the graph.
|
||||
@@ -702,11 +694,9 @@ class GraphModel(Graph):
|
||||
version=graph.version,
|
||||
forked_from_id=graph.forkedFromId,
|
||||
forked_from_version=graph.forkedFromVersion,
|
||||
created_at=graph.createdAt,
|
||||
is_active=graph.isActive,
|
||||
name=graph.name or "",
|
||||
description=graph.description or "",
|
||||
instructions=graph.instructions,
|
||||
recommended_schedule_cron=graph.recommendedScheduleCron,
|
||||
nodes=[NodeModel.from_db(node, for_export) for node in graph.Nodes or []],
|
||||
links=list(
|
||||
@@ -1154,7 +1144,6 @@ def make_graph_model(creatable_graph: Graph, user_id: str) -> GraphModel:
|
||||
return GraphModel(
|
||||
**creatable_graph.model_dump(exclude={"nodes"}),
|
||||
user_id=user_id,
|
||||
created_at=datetime.now(tz=timezone.utc),
|
||||
nodes=[
|
||||
NodeModel(
|
||||
**creatable_node.model_dump(),
|
||||
|
||||
@@ -107,7 +107,7 @@ async def generate_activity_status_for_execution(
|
||||
# Check if we have OpenAI API key
|
||||
try:
|
||||
settings = Settings()
|
||||
if not settings.secrets.openai_internal_api_key:
|
||||
if not settings.secrets.openai_api_key:
|
||||
logger.debug(
|
||||
"OpenAI API key not configured, skipping activity status generation"
|
||||
)
|
||||
@@ -187,7 +187,7 @@ async def generate_activity_status_for_execution(
|
||||
credentials = APIKeyCredentials(
|
||||
id="openai",
|
||||
provider="openai",
|
||||
api_key=SecretStr(settings.secrets.openai_internal_api_key),
|
||||
api_key=SecretStr(settings.secrets.openai_api_key),
|
||||
title="System OpenAI",
|
||||
)
|
||||
|
||||
|
||||
@@ -468,7 +468,7 @@ class TestGenerateActivityStatusForExecution:
|
||||
):
|
||||
|
||||
mock_get_block.side_effect = lambda block_id: mock_blocks.get(block_id)
|
||||
mock_settings.return_value.secrets.openai_internal_api_key = "test_key"
|
||||
mock_settings.return_value.secrets.openai_api_key = "test_key"
|
||||
mock_llm.return_value = (
|
||||
"I analyzed your data and provided the requested insights."
|
||||
)
|
||||
@@ -520,7 +520,7 @@ class TestGenerateActivityStatusForExecution:
|
||||
"backend.executor.activity_status_generator.is_feature_enabled",
|
||||
return_value=True,
|
||||
):
|
||||
mock_settings.return_value.secrets.openai_internal_api_key = ""
|
||||
mock_settings.return_value.secrets.openai_api_key = ""
|
||||
|
||||
result = await generate_activity_status_for_execution(
|
||||
graph_exec_id="test_exec",
|
||||
@@ -546,7 +546,7 @@ class TestGenerateActivityStatusForExecution:
|
||||
"backend.executor.activity_status_generator.is_feature_enabled",
|
||||
return_value=True,
|
||||
):
|
||||
mock_settings.return_value.secrets.openai_internal_api_key = "test_key"
|
||||
mock_settings.return_value.secrets.openai_api_key = "test_key"
|
||||
|
||||
result = await generate_activity_status_for_execution(
|
||||
graph_exec_id="test_exec",
|
||||
@@ -581,7 +581,7 @@ class TestGenerateActivityStatusForExecution:
|
||||
):
|
||||
|
||||
mock_get_block.side_effect = lambda block_id: mock_blocks.get(block_id)
|
||||
mock_settings.return_value.secrets.openai_internal_api_key = "test_key"
|
||||
mock_settings.return_value.secrets.openai_api_key = "test_key"
|
||||
mock_llm.return_value = "Agent completed execution."
|
||||
|
||||
result = await generate_activity_status_for_execution(
|
||||
@@ -633,7 +633,7 @@ class TestIntegration:
|
||||
):
|
||||
|
||||
mock_get_block.side_effect = lambda block_id: mock_blocks.get(block_id)
|
||||
mock_settings.return_value.secrets.openai_internal_api_key = "test_key"
|
||||
mock_settings.return_value.secrets.openai_api_key = "test_key"
|
||||
|
||||
mock_response = LLMResponse(
|
||||
raw_response={},
|
||||
|
||||
@@ -605,7 +605,7 @@ class ExecutionProcessor:
|
||||
)
|
||||
return
|
||||
|
||||
if exec_meta.status in [ExecutionStatus.QUEUED, ExecutionStatus.INCOMPLETE]:
|
||||
if exec_meta.status == ExecutionStatus.QUEUED:
|
||||
log_metadata.info(f"⚙️ Starting graph execution #{graph_exec.graph_exec_id}")
|
||||
exec_meta.status = ExecutionStatus.RUNNING
|
||||
send_execution_update(
|
||||
|
||||
@@ -191,22 +191,15 @@ class GraphExecutionJobInfo(GraphExecutionJobArgs):
|
||||
id: str
|
||||
name: str
|
||||
next_run_time: str
|
||||
timezone: str = Field(default="UTC", description="Timezone used for scheduling")
|
||||
|
||||
@staticmethod
|
||||
def from_db(
|
||||
job_args: GraphExecutionJobArgs, job_obj: JobObj
|
||||
) -> "GraphExecutionJobInfo":
|
||||
# Extract timezone from the trigger if it's a CronTrigger
|
||||
timezone_str = "UTC"
|
||||
if hasattr(job_obj.trigger, "timezone"):
|
||||
timezone_str = str(job_obj.trigger.timezone)
|
||||
|
||||
return GraphExecutionJobInfo(
|
||||
id=job_obj.id,
|
||||
name=job_obj.name,
|
||||
next_run_time=job_obj.next_run_time.isoformat(),
|
||||
timezone=timezone_str,
|
||||
**job_args.model_dump(),
|
||||
)
|
||||
|
||||
@@ -402,7 +395,6 @@ class Scheduler(AppService):
|
||||
input_data: BlockInput,
|
||||
input_credentials: dict[str, CredentialsMetaInput],
|
||||
name: Optional[str] = None,
|
||||
user_timezone: str | None = None,
|
||||
) -> GraphExecutionJobInfo:
|
||||
# Validate the graph before scheduling to prevent runtime failures
|
||||
# We don't need the return value, just want the validation to run
|
||||
@@ -416,18 +408,7 @@ class Scheduler(AppService):
|
||||
)
|
||||
)
|
||||
|
||||
# Use provided timezone or default to UTC
|
||||
# Note: Timezone should be passed from the client to avoid database lookups
|
||||
if not user_timezone:
|
||||
user_timezone = "UTC"
|
||||
logger.warning(
|
||||
f"No timezone provided for user {user_id}, using UTC for scheduling. "
|
||||
f"Client should pass user's timezone for correct scheduling."
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Scheduling job for user {user_id} with timezone {user_timezone} (cron: {cron})"
|
||||
)
|
||||
logger.info(f"Scheduling job for user {user_id} in UTC (cron: {cron})")
|
||||
|
||||
job_args = GraphExecutionJobArgs(
|
||||
user_id=user_id,
|
||||
@@ -441,12 +422,12 @@ class Scheduler(AppService):
|
||||
execute_graph,
|
||||
kwargs=job_args.model_dump(),
|
||||
name=name,
|
||||
trigger=CronTrigger.from_crontab(cron, timezone=user_timezone),
|
||||
trigger=CronTrigger.from_crontab(cron, timezone="UTC"),
|
||||
jobstore=Jobstores.EXECUTION.value,
|
||||
replace_existing=True,
|
||||
)
|
||||
logger.info(
|
||||
f"Added job {job.id} with cron schedule '{cron}' in timezone {user_timezone}, input data: {input_data}"
|
||||
f"Added job {job.id} with cron schedule '{cron}' in UTC, input data: {input_data}"
|
||||
)
|
||||
return GraphExecutionJobInfo.from_db(job_args, job)
|
||||
|
||||
|
||||
@@ -914,30 +914,29 @@ async def add_graph_execution(
|
||||
preset_id=preset_id,
|
||||
)
|
||||
|
||||
# Fetch user context for the graph execution
|
||||
user_context = await get_user_context(user_id)
|
||||
|
||||
queue = await get_async_execution_queue()
|
||||
graph_exec_entry = graph_exec.to_graph_execution_entry(
|
||||
user_context=await get_user_context(user_id),
|
||||
compiled_nodes_input_masks=compiled_nodes_input_masks,
|
||||
user_context, compiled_nodes_input_masks
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Created graph execution #{graph_exec.id} for graph "
|
||||
f"#{graph_id} with {len(starting_nodes_input)} starting nodes. "
|
||||
f"Now publishing to execution queue."
|
||||
)
|
||||
|
||||
exec_queue = await get_async_execution_queue()
|
||||
await exec_queue.publish_message(
|
||||
await queue.publish_message(
|
||||
routing_key=GRAPH_EXECUTION_ROUTING_KEY,
|
||||
message=graph_exec_entry.model_dump_json(),
|
||||
exchange=GRAPH_EXECUTION_EXCHANGE,
|
||||
)
|
||||
logger.info(f"Published execution {graph_exec.id} to RabbitMQ queue")
|
||||
|
||||
graph_exec.status = ExecutionStatus.QUEUED
|
||||
await edb.update_graph_execution_stats(
|
||||
graph_exec_id=graph_exec.id,
|
||||
status=graph_exec.status,
|
||||
)
|
||||
await get_async_execution_event_bus().publish(graph_exec)
|
||||
bus = get_async_execution_event_bus()
|
||||
await bus.publish(graph_exec)
|
||||
|
||||
return graph_exec
|
||||
except BaseException as e:
|
||||
|
||||
@@ -316,7 +316,6 @@ async def test_add_graph_execution_is_repeatable(mocker: MockerFixture):
|
||||
# Mock the graph execution object
|
||||
mock_graph_exec = mocker.MagicMock(spec=GraphExecutionWithNodes)
|
||||
mock_graph_exec.id = "execution-id-123"
|
||||
mock_graph_exec.node_executions = [] # Add this to avoid AttributeError
|
||||
mock_graph_exec.to_graph_execution_entry.return_value = mocker.MagicMock()
|
||||
|
||||
# Mock user context
|
||||
@@ -347,10 +346,6 @@ async def test_add_graph_execution_is_repeatable(mocker: MockerFixture):
|
||||
)
|
||||
mock_prisma.is_connected.return_value = True
|
||||
mock_edb.create_graph_execution = mocker.AsyncMock(return_value=mock_graph_exec)
|
||||
mock_edb.update_graph_execution_stats = mocker.AsyncMock(
|
||||
return_value=mock_graph_exec
|
||||
)
|
||||
mock_edb.update_node_execution_status_batch = mocker.AsyncMock()
|
||||
mock_get_user_context.return_value = mock_user_context
|
||||
mock_get_queue.return_value = mock_queue
|
||||
mock_get_event_bus.return_value = mock_event_bus
|
||||
|
||||
@@ -7,9 +7,10 @@ from backend.data.graph import set_node_webhook
|
||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
|
||||
from . import get_webhook_manager, supports_webhooks
|
||||
from .utils import setup_webhook_for_block
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.data.graph import BaseGraph, GraphModel, NodeModel
|
||||
from backend.data.graph import BaseGraph, GraphModel, Node, NodeModel
|
||||
from backend.data.model import Credentials
|
||||
|
||||
from ._base import BaseWebhooksManager
|
||||
@@ -42,19 +43,32 @@ async def _on_graph_activate(graph: "BaseGraph", user_id: str) -> "BaseGraph": .
|
||||
|
||||
async def _on_graph_activate(graph: "BaseGraph | GraphModel", user_id: str):
|
||||
get_credentials = credentials_manager.cached_getter(user_id)
|
||||
updated_nodes = []
|
||||
for new_node in graph.nodes:
|
||||
block_input_schema = cast(BlockSchema, new_node.block.input_schema)
|
||||
|
||||
for creds_field_name in block_input_schema.get_credentials_fields().keys():
|
||||
# Prevent saving graph with non-existent credentials
|
||||
if (
|
||||
creds_meta := new_node.input_default.get(creds_field_name)
|
||||
) and not await get_credentials(creds_meta["id"]):
|
||||
raise ValueError(
|
||||
f"Node #{new_node.id} input '{creds_field_name}' updated with "
|
||||
f"non-existent credentials #{creds_meta['id']}"
|
||||
node_credentials = None
|
||||
if (
|
||||
# Webhook-triggered blocks are only allowed to have 1 credentials input
|
||||
(
|
||||
creds_field_name := next(
|
||||
iter(block_input_schema.get_credentials_fields()), None
|
||||
)
|
||||
)
|
||||
and (creds_meta := new_node.input_default.get(creds_field_name))
|
||||
and not (node_credentials := await get_credentials(creds_meta["id"]))
|
||||
):
|
||||
raise ValueError(
|
||||
f"Node #{new_node.id} input '{creds_field_name}' updated with "
|
||||
f"non-existent credentials #{creds_meta['id']}"
|
||||
)
|
||||
|
||||
updated_node = await on_node_activate(
|
||||
user_id, graph.id, new_node, credentials=node_credentials
|
||||
)
|
||||
updated_nodes.append(updated_node)
|
||||
|
||||
graph.nodes = updated_nodes
|
||||
return graph
|
||||
|
||||
|
||||
@@ -71,14 +85,20 @@ async def on_graph_deactivate(graph: "GraphModel", user_id: str):
|
||||
block_input_schema = cast(BlockSchema, node.block.input_schema)
|
||||
|
||||
node_credentials = None
|
||||
for creds_field_name in block_input_schema.get_credentials_fields().keys():
|
||||
if (creds_meta := node.input_default.get(creds_field_name)) and not (
|
||||
node_credentials := await get_credentials(creds_meta["id"])
|
||||
):
|
||||
logger.warning(
|
||||
f"Node #{node.id} input '{creds_field_name}' referenced "
|
||||
f"non-existent credentials #{creds_meta['id']}"
|
||||
if (
|
||||
# Webhook-triggered blocks are only allowed to have 1 credentials input
|
||||
(
|
||||
creds_field_name := next(
|
||||
iter(block_input_schema.get_credentials_fields()), None
|
||||
)
|
||||
)
|
||||
and (creds_meta := node.input_default.get(creds_field_name))
|
||||
and not (node_credentials := await get_credentials(creds_meta["id"]))
|
||||
):
|
||||
logger.error(
|
||||
f"Node #{node.id} input '{creds_field_name}' referenced non-existent "
|
||||
f"credentials #{creds_meta['id']}"
|
||||
)
|
||||
|
||||
updated_node = await on_node_deactivate(
|
||||
user_id, node, credentials=node_credentials
|
||||
@@ -89,6 +109,32 @@ async def on_graph_deactivate(graph: "GraphModel", user_id: str):
|
||||
return graph
|
||||
|
||||
|
||||
async def on_node_activate(
|
||||
user_id: str,
|
||||
graph_id: str,
|
||||
node: "Node",
|
||||
*,
|
||||
credentials: Optional["Credentials"] = None,
|
||||
) -> "Node":
|
||||
"""Hook to be called when the node is activated/created"""
|
||||
|
||||
if node.block.webhook_config:
|
||||
new_webhook, feedback = await setup_webhook_for_block(
|
||||
user_id=user_id,
|
||||
trigger_block=node.block,
|
||||
trigger_config=node.input_default,
|
||||
for_graph_id=graph_id,
|
||||
)
|
||||
if new_webhook:
|
||||
node = await set_node_webhook(node.id, new_webhook.id)
|
||||
else:
|
||||
logger.debug(
|
||||
f"Node #{node.id} does not have everything for a webhook: {feedback}"
|
||||
)
|
||||
|
||||
return node
|
||||
|
||||
|
||||
async def on_node_deactivate(
|
||||
user_id: str,
|
||||
node: "NodeModel",
|
||||
|
||||
@@ -4,6 +4,7 @@ from typing import TYPE_CHECKING, Optional, cast
|
||||
from pydantic import JsonValue
|
||||
|
||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.settings import Config
|
||||
|
||||
from . import get_webhook_manager, supports_webhooks
|
||||
@@ -12,7 +13,6 @@ if TYPE_CHECKING:
|
||||
from backend.data.block import Block, BlockSchema
|
||||
from backend.data.integrations import Webhook
|
||||
from backend.data.model import Credentials
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
app_config = Config()
|
||||
@@ -20,7 +20,7 @@ credentials_manager = IntegrationCredentialsManager()
|
||||
|
||||
|
||||
# TODO: add test to assert this matches the actual API route
|
||||
def webhook_ingress_url(provider_name: "ProviderName", webhook_id: str) -> str:
|
||||
def webhook_ingress_url(provider_name: ProviderName, webhook_id: str) -> str:
|
||||
return (
|
||||
f"{app_config.platform_base_url}/api/integrations/{provider_name.value}"
|
||||
f"/webhooks/{webhook_id}/ingress"
|
||||
@@ -144,62 +144,3 @@ async def setup_webhook_for_block(
|
||||
)
|
||||
logger.debug(f"Acquired webhook: {webhook}")
|
||||
return webhook, None
|
||||
|
||||
|
||||
async def migrate_legacy_triggered_graphs():
|
||||
from prisma.models import AgentGraph
|
||||
|
||||
from backend.data.graph import AGENT_GRAPH_INCLUDE, GraphModel, set_node_webhook
|
||||
from backend.data.model import is_credentials_field_name
|
||||
from backend.server.v2.library.db import create_preset
|
||||
from backend.server.v2.library.model import LibraryAgentPresetCreatable
|
||||
|
||||
triggered_graphs = [
|
||||
GraphModel.from_db(_graph)
|
||||
for _graph in await AgentGraph.prisma().find_many(
|
||||
where={
|
||||
"isActive": True,
|
||||
"Nodes": {"some": {"NOT": [{"webhookId": None}]}},
|
||||
},
|
||||
include=AGENT_GRAPH_INCLUDE,
|
||||
)
|
||||
]
|
||||
|
||||
n_migrated_webhooks = 0
|
||||
|
||||
for graph in triggered_graphs:
|
||||
if not ((trigger_node := graph.webhook_input_node) and trigger_node.webhook_id):
|
||||
continue
|
||||
|
||||
# Use trigger node's inputs for the preset
|
||||
preset_credentials = {
|
||||
field_name: creds_meta
|
||||
for field_name, creds_meta in trigger_node.input_default.items()
|
||||
if is_credentials_field_name(field_name)
|
||||
}
|
||||
preset_inputs = {
|
||||
field_name: value
|
||||
for field_name, value in trigger_node.input_default.items()
|
||||
if not is_credentials_field_name(field_name)
|
||||
}
|
||||
|
||||
# Create a triggered preset for the graph
|
||||
await create_preset(
|
||||
graph.user_id,
|
||||
LibraryAgentPresetCreatable(
|
||||
graph_id=graph.id,
|
||||
graph_version=graph.version,
|
||||
inputs=preset_inputs,
|
||||
credentials=preset_credentials,
|
||||
name=graph.name,
|
||||
description=graph.description,
|
||||
webhook_id=trigger_node.webhook_id,
|
||||
is_active=True,
|
||||
),
|
||||
)
|
||||
# Detach webhook from the graph node
|
||||
await set_node_webhook(trigger_node.id, None)
|
||||
|
||||
n_migrated_webhooks += 1
|
||||
|
||||
logger.info(f"Migrated {n_migrated_webhooks} node triggers to triggered presets")
|
||||
|
||||
@@ -1,287 +0,0 @@
|
||||
"""
|
||||
Prometheus instrumentation for FastAPI services.
|
||||
|
||||
This module provides centralized metrics collection and instrumentation
|
||||
for all FastAPI services in the AutoGPT platform.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import FastAPI
|
||||
from prometheus_client import Counter, Gauge, Histogram, Info
|
||||
from prometheus_fastapi_instrumentator import Instrumentator, metrics
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Custom business metrics with controlled cardinality
|
||||
GRAPH_EXECUTIONS = Counter(
|
||||
"autogpt_graph_executions_total",
|
||||
"Total number of graph executions",
|
||||
labelnames=[
|
||||
"status"
|
||||
], # Removed graph_id and user_id to prevent cardinality explosion
|
||||
)
|
||||
|
||||
GRAPH_EXECUTIONS_BY_USER = Counter(
|
||||
"autogpt_graph_executions_by_user_total",
|
||||
"Total number of graph executions by user (sampled)",
|
||||
labelnames=["status"], # Only status, user_id tracked separately when needed
|
||||
)
|
||||
|
||||
BLOCK_EXECUTIONS = Counter(
|
||||
"autogpt_block_executions_total",
|
||||
"Total number of block executions",
|
||||
labelnames=["block_type", "status"], # block_type is bounded
|
||||
)
|
||||
|
||||
BLOCK_DURATION = Histogram(
|
||||
"autogpt_block_duration_seconds",
|
||||
"Duration of block executions in seconds",
|
||||
labelnames=["block_type"],
|
||||
buckets=[0.1, 0.25, 0.5, 1, 2.5, 5, 10, 30, 60],
|
||||
)
|
||||
|
||||
WEBSOCKET_CONNECTIONS = Gauge(
|
||||
"autogpt_websocket_connections_total",
|
||||
"Total number of active WebSocket connections",
|
||||
# Removed user_id label - track total only to prevent cardinality explosion
|
||||
)
|
||||
|
||||
SCHEDULER_JOBS = Gauge(
|
||||
"autogpt_scheduler_jobs",
|
||||
"Current number of scheduled jobs",
|
||||
labelnames=["job_type", "status"],
|
||||
)
|
||||
|
||||
DATABASE_QUERIES = Histogram(
|
||||
"autogpt_database_query_duration_seconds",
|
||||
"Duration of database queries in seconds",
|
||||
labelnames=["operation", "table"],
|
||||
buckets=[0.01, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5],
|
||||
)
|
||||
|
||||
RABBITMQ_MESSAGES = Counter(
|
||||
"autogpt_rabbitmq_messages_total",
|
||||
"Total number of RabbitMQ messages",
|
||||
labelnames=["queue", "status"],
|
||||
)
|
||||
|
||||
AUTHENTICATION_ATTEMPTS = Counter(
|
||||
"autogpt_auth_attempts_total",
|
||||
"Total number of authentication attempts",
|
||||
labelnames=["method", "status"],
|
||||
)
|
||||
|
||||
API_KEY_USAGE = Counter(
|
||||
"autogpt_api_key_usage_total",
|
||||
"API key usage by provider",
|
||||
labelnames=["provider", "block_type", "status"],
|
||||
)
|
||||
|
||||
# Function/operation level metrics with controlled cardinality
|
||||
GRAPH_OPERATIONS = Counter(
|
||||
"autogpt_graph_operations_total",
|
||||
"Graph operations by type",
|
||||
labelnames=["operation", "status"], # create, update, delete, execute, etc.
|
||||
)
|
||||
|
||||
USER_OPERATIONS = Counter(
|
||||
"autogpt_user_operations_total",
|
||||
"User operations by type",
|
||||
labelnames=["operation", "status"], # login, register, update_profile, etc.
|
||||
)
|
||||
|
||||
RATE_LIMIT_HITS = Counter(
|
||||
"autogpt_rate_limit_hits_total",
|
||||
"Number of rate limit hits",
|
||||
labelnames=["endpoint"], # Removed user_id to prevent cardinality explosion
|
||||
)
|
||||
|
||||
SERVICE_INFO = Info(
|
||||
"autogpt_service",
|
||||
"Service information",
|
||||
)
|
||||
|
||||
|
||||
def instrument_fastapi(
|
||||
app: FastAPI,
|
||||
service_name: str,
|
||||
expose_endpoint: bool = True,
|
||||
endpoint: str = "/metrics",
|
||||
include_in_schema: bool = False,
|
||||
excluded_handlers: Optional[list] = None,
|
||||
) -> Instrumentator:
|
||||
"""
|
||||
Instrument a FastAPI application with Prometheus metrics.
|
||||
|
||||
Args:
|
||||
app: FastAPI application instance
|
||||
service_name: Name of the service for metrics labeling
|
||||
expose_endpoint: Whether to expose /metrics endpoint
|
||||
endpoint: Path for metrics endpoint
|
||||
include_in_schema: Whether to include metrics endpoint in OpenAPI schema
|
||||
excluded_handlers: List of paths to exclude from metrics
|
||||
|
||||
Returns:
|
||||
Configured Instrumentator instance
|
||||
"""
|
||||
|
||||
# Set service info
|
||||
try:
|
||||
from importlib.metadata import version
|
||||
|
||||
service_version = version("autogpt-platform-backend")
|
||||
except Exception:
|
||||
service_version = "unknown"
|
||||
|
||||
SERVICE_INFO.info(
|
||||
{
|
||||
"service": service_name,
|
||||
"version": service_version,
|
||||
}
|
||||
)
|
||||
|
||||
# Create instrumentator with default metrics
|
||||
instrumentator = Instrumentator(
|
||||
should_group_status_codes=True,
|
||||
should_ignore_untemplated=True,
|
||||
should_respect_env_var=True,
|
||||
should_instrument_requests_inprogress=True,
|
||||
excluded_handlers=excluded_handlers or ["/health", "/readiness"],
|
||||
env_var_name="ENABLE_METRICS",
|
||||
inprogress_name="autogpt_http_requests_inprogress",
|
||||
inprogress_labels=True,
|
||||
)
|
||||
|
||||
# Add default HTTP metrics
|
||||
instrumentator.add(
|
||||
metrics.default(
|
||||
metric_namespace="autogpt",
|
||||
metric_subsystem=service_name.replace("-", "_"),
|
||||
)
|
||||
)
|
||||
|
||||
# Add request size metrics
|
||||
instrumentator.add(
|
||||
metrics.request_size(
|
||||
metric_namespace="autogpt",
|
||||
metric_subsystem=service_name.replace("-", "_"),
|
||||
)
|
||||
)
|
||||
|
||||
# Add response size metrics
|
||||
instrumentator.add(
|
||||
metrics.response_size(
|
||||
metric_namespace="autogpt",
|
||||
metric_subsystem=service_name.replace("-", "_"),
|
||||
)
|
||||
)
|
||||
|
||||
# Add latency metrics with custom buckets for better granularity
|
||||
instrumentator.add(
|
||||
metrics.latency(
|
||||
metric_namespace="autogpt",
|
||||
metric_subsystem=service_name.replace("-", "_"),
|
||||
buckets=[0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 30, 60],
|
||||
)
|
||||
)
|
||||
|
||||
# Add combined metrics (requests by method and status)
|
||||
instrumentator.add(
|
||||
metrics.combined_size(
|
||||
metric_namespace="autogpt",
|
||||
metric_subsystem=service_name.replace("-", "_"),
|
||||
)
|
||||
)
|
||||
|
||||
# Instrument the app
|
||||
instrumentator.instrument(app)
|
||||
|
||||
# Expose metrics endpoint if requested
|
||||
if expose_endpoint:
|
||||
instrumentator.expose(
|
||||
app,
|
||||
endpoint=endpoint,
|
||||
include_in_schema=include_in_schema,
|
||||
tags=["monitoring"] if include_in_schema else None,
|
||||
)
|
||||
logger.info(f"Metrics endpoint exposed at {endpoint} for {service_name}")
|
||||
|
||||
return instrumentator
|
||||
|
||||
|
||||
def record_graph_execution(graph_id: str, status: str, user_id: str):
|
||||
"""Record a graph execution event.
|
||||
|
||||
Args:
|
||||
graph_id: Graph identifier (kept for future sampling/debugging)
|
||||
status: Execution status (success/error/validation_error)
|
||||
user_id: User identifier (kept for future sampling/debugging)
|
||||
"""
|
||||
# Track overall executions without high-cardinality labels
|
||||
GRAPH_EXECUTIONS.labels(status=status).inc()
|
||||
|
||||
# Optionally track per-user executions (implement sampling if needed)
|
||||
# For now, just track status to avoid cardinality explosion
|
||||
GRAPH_EXECUTIONS_BY_USER.labels(status=status).inc()
|
||||
|
||||
|
||||
def record_block_execution(block_type: str, status: str, duration: float):
|
||||
"""Record a block execution event with duration."""
|
||||
BLOCK_EXECUTIONS.labels(block_type=block_type, status=status).inc()
|
||||
BLOCK_DURATION.labels(block_type=block_type).observe(duration)
|
||||
|
||||
|
||||
def update_websocket_connections(user_id: str, delta: int):
|
||||
"""Update the number of active WebSocket connections.
|
||||
|
||||
Args:
|
||||
user_id: User identifier (kept for future sampling/debugging)
|
||||
delta: Change in connection count (+1 for connect, -1 for disconnect)
|
||||
"""
|
||||
# Track total connections without user_id to prevent cardinality explosion
|
||||
if delta > 0:
|
||||
WEBSOCKET_CONNECTIONS.inc(delta)
|
||||
else:
|
||||
WEBSOCKET_CONNECTIONS.dec(abs(delta))
|
||||
|
||||
|
||||
def record_database_query(operation: str, table: str, duration: float):
|
||||
"""Record a database query with duration."""
|
||||
DATABASE_QUERIES.labels(operation=operation, table=table).observe(duration)
|
||||
|
||||
|
||||
def record_rabbitmq_message(queue: str, status: str):
|
||||
"""Record a RabbitMQ message event."""
|
||||
RABBITMQ_MESSAGES.labels(queue=queue, status=status).inc()
|
||||
|
||||
|
||||
def record_authentication_attempt(method: str, status: str):
|
||||
"""Record an authentication attempt."""
|
||||
AUTHENTICATION_ATTEMPTS.labels(method=method, status=status).inc()
|
||||
|
||||
|
||||
def record_api_key_usage(provider: str, block_type: str, status: str):
|
||||
"""Record API key usage by provider and block."""
|
||||
API_KEY_USAGE.labels(provider=provider, block_type=block_type, status=status).inc()
|
||||
|
||||
|
||||
def record_rate_limit_hit(endpoint: str, user_id: str):
|
||||
"""Record a rate limit hit.
|
||||
|
||||
Args:
|
||||
endpoint: API endpoint that was rate limited
|
||||
user_id: User identifier (kept for future sampling/debugging)
|
||||
"""
|
||||
RATE_LIMIT_HITS.labels(endpoint=endpoint).inc()
|
||||
|
||||
|
||||
def record_graph_operation(operation: str, status: str):
|
||||
"""Record a graph operation (create, update, delete, execute, etc.)."""
|
||||
GRAPH_OPERATIONS.labels(operation=operation, status=status).inc()
|
||||
|
||||
|
||||
def record_user_operation(operation: str, status: str):
|
||||
"""Record a user operation (login, register, etc.)."""
|
||||
USER_OPERATIONS.labels(operation=operation, status=status).inc()
|
||||
@@ -6,10 +6,10 @@ import logging
|
||||
import threading
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, SecretStr
|
||||
|
||||
from backend.blocks.basic import Block
|
||||
from backend.data.model import Credentials
|
||||
from backend.data.model import APIKeyCredentials, Credentials
|
||||
from backend.integrations.oauth.base import BaseOAuthHandler
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.integrations.webhooks._base import BaseWebhooksManager
|
||||
@@ -17,8 +17,6 @@ from backend.integrations.webhooks._base import BaseWebhooksManager
|
||||
if TYPE_CHECKING:
|
||||
from backend.sdk.provider import Provider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SDKOAuthCredentials(BaseModel):
|
||||
"""OAuth credentials configuration for SDK providers."""
|
||||
@@ -104,8 +102,21 @@ class AutoRegistry:
|
||||
"""Register an environment variable as an API key for a provider."""
|
||||
with cls._lock:
|
||||
cls._api_key_mappings[provider] = env_var_name
|
||||
# Note: The credential itself is created by ProviderBuilder.with_api_key()
|
||||
# We only store the mapping here to avoid duplication
|
||||
|
||||
# Dynamically check if the env var exists and create credential
|
||||
import os
|
||||
|
||||
api_key = os.getenv(env_var_name)
|
||||
if api_key:
|
||||
credential = APIKeyCredentials(
|
||||
id=f"{provider}-default",
|
||||
provider=provider,
|
||||
api_key=SecretStr(api_key),
|
||||
title=f"Default {provider} credentials",
|
||||
)
|
||||
# Check if credential already exists to avoid duplicates
|
||||
if not any(c.id == credential.id for c in cls._default_credentials):
|
||||
cls._default_credentials.append(credential)
|
||||
|
||||
@classmethod
|
||||
def get_all_credentials(cls) -> List[Credentials]:
|
||||
@@ -199,43 +210,3 @@ class AutoRegistry:
|
||||
webhooks.load_webhook_managers = patched_load
|
||||
except Exception as e:
|
||||
logging.warning(f"Failed to patch webhook managers: {e}")
|
||||
|
||||
# Patch credentials store to include SDK-registered credentials
|
||||
try:
|
||||
import sys
|
||||
from typing import Any
|
||||
|
||||
# Get the module from sys.modules to respect mocking
|
||||
if "backend.integrations.credentials_store" in sys.modules:
|
||||
creds_store: Any = sys.modules["backend.integrations.credentials_store"]
|
||||
else:
|
||||
import backend.integrations.credentials_store
|
||||
|
||||
creds_store: Any = backend.integrations.credentials_store
|
||||
|
||||
if hasattr(creds_store, "IntegrationCredentialsStore"):
|
||||
store_class = creds_store.IntegrationCredentialsStore
|
||||
if hasattr(store_class, "get_all_creds"):
|
||||
original_get_all_creds = store_class.get_all_creds
|
||||
|
||||
async def patched_get_all_creds(self, user_id: str):
|
||||
# Get original credentials
|
||||
original_creds = await original_get_all_creds(self, user_id)
|
||||
|
||||
# Add SDK-registered credentials
|
||||
sdk_creds = cls.get_all_credentials()
|
||||
|
||||
# Combine credentials, avoiding duplicates by ID
|
||||
existing_ids = {c.id for c in original_creds}
|
||||
for cred in sdk_creds:
|
||||
if cred.id not in existing_ids:
|
||||
original_creds.append(cred)
|
||||
|
||||
return original_creds
|
||||
|
||||
store_class.get_all_creds = patched_get_all_creds
|
||||
logger.info(
|
||||
"Successfully patched IntegrationCredentialsStore.get_all_creds"
|
||||
)
|
||||
except Exception as e:
|
||||
logging.warning(f"Failed to patch credentials store: {e}")
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
from fastapi import FastAPI
|
||||
|
||||
from backend.monitoring.instrumentation import instrument_fastapi
|
||||
from backend.server.middleware.security import SecurityHeadersMiddleware
|
||||
|
||||
from .routes.v1 import v1_router
|
||||
@@ -14,12 +13,3 @@ external_app = FastAPI(
|
||||
|
||||
external_app.add_middleware(SecurityHeadersMiddleware)
|
||||
external_app.include_router(v1_router, prefix="/v1")
|
||||
|
||||
# Add Prometheus instrumentation
|
||||
instrument_fastapi(
|
||||
external_app,
|
||||
service_name="external-api",
|
||||
expose_endpoint=True,
|
||||
endpoint="/metrics",
|
||||
include_in_schema=True,
|
||||
)
|
||||
|
||||
@@ -81,10 +81,6 @@ class SecurityHeadersMiddleware(BaseHTTPMiddleware):
|
||||
response.headers["X-XSS-Protection"] = "1; mode=block"
|
||||
response.headers["Referrer-Policy"] = "strict-origin-when-cross-origin"
|
||||
|
||||
# Add noindex header for shared execution pages
|
||||
if "/public/shared" in request.url.path:
|
||||
response.headers["X-Robots-Tag"] = "noindex, nofollow"
|
||||
|
||||
# Default: Disable caching for all endpoints
|
||||
# Only allow caching for explicitly permitted paths
|
||||
if not self.is_cacheable_path(request.url.path):
|
||||
|
||||
@@ -18,7 +18,6 @@ import backend.data.block
|
||||
import backend.data.db
|
||||
import backend.data.graph
|
||||
import backend.data.user
|
||||
import backend.integrations.webhooks.utils
|
||||
import backend.server.routers.postmark.postmark
|
||||
import backend.server.routers.v1
|
||||
import backend.server.v2.admin.credit_admin_routes
|
||||
@@ -37,7 +36,6 @@ import backend.util.settings
|
||||
from backend.blocks.llm import LlmModel
|
||||
from backend.data.model import Credentials
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.monitoring.instrumentation import instrument_fastapi
|
||||
from backend.server.external.api import external_app
|
||||
from backend.server.middleware.security import SecurityHeadersMiddleware
|
||||
from backend.util import json
|
||||
@@ -80,8 +78,6 @@ async def lifespan_context(app: fastapi.FastAPI):
|
||||
await backend.data.user.migrate_and_encrypt_user_integrations()
|
||||
await backend.data.graph.fix_llm_provider_credentials()
|
||||
await backend.data.graph.migrate_llm_models(LlmModel.GPT4O)
|
||||
await backend.integrations.webhooks.utils.migrate_legacy_triggered_graphs()
|
||||
|
||||
with launch_darkly_context():
|
||||
yield
|
||||
|
||||
@@ -143,16 +139,6 @@ app.add_middleware(SecurityHeadersMiddleware)
|
||||
# Add 401 responses to authenticated endpoints in OpenAPI spec
|
||||
add_auth_responses_to_openapi(app)
|
||||
|
||||
# Add Prometheus instrumentation
|
||||
instrument_fastapi(
|
||||
app,
|
||||
service_name="rest-api",
|
||||
expose_endpoint=True,
|
||||
endpoint="/metrics",
|
||||
include_in_schema=settings.config.app_env
|
||||
== backend.util.settings.AppEnvironment.LOCAL,
|
||||
)
|
||||
|
||||
|
||||
def handle_internal_http_error(status_code: int = 500, log_error: bool = True):
|
||||
def handler(request: fastapi.Request, exc: Exception):
|
||||
@@ -266,13 +252,19 @@ async def health():
|
||||
|
||||
class AgentServer(backend.util.service.AppProcess):
|
||||
def run(self):
|
||||
server_app = starlette.middleware.cors.CORSMiddleware(
|
||||
app=app,
|
||||
allow_origins=settings.config.backend_cors_allow_origins,
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"], # Allows all methods
|
||||
allow_headers=["*"], # Allows all headers
|
||||
)
|
||||
|
||||
if settings.config.enable_cors_all_origins:
|
||||
server_app = starlette.middleware.cors.CORSMiddleware(
|
||||
app=app,
|
||||
allow_origins=settings.config.backend_cors_allow_origins,
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"], # Allows all methods
|
||||
allow_headers=["*"], # Allows all headers
|
||||
)
|
||||
else:
|
||||
logger.info("CORS is disabled")
|
||||
server_app = app
|
||||
|
||||
uvicorn.run(
|
||||
server_app,
|
||||
host=backend.util.settings.Config().agent_api_host,
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
import asyncio
|
||||
import base64
|
||||
import logging
|
||||
import time
|
||||
import uuid
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timezone
|
||||
from datetime import datetime
|
||||
from typing import Annotated, Any, Sequence
|
||||
|
||||
import pydantic
|
||||
@@ -65,11 +63,6 @@ from backend.integrations.webhooks.graph_lifecycle_hooks import (
|
||||
on_graph_activate,
|
||||
on_graph_deactivate,
|
||||
)
|
||||
from backend.monitoring.instrumentation import (
|
||||
record_block_execution,
|
||||
record_graph_execution,
|
||||
record_graph_operation,
|
||||
)
|
||||
from backend.server.model import (
|
||||
CreateAPIKeyRequest,
|
||||
CreateAPIKeyResponse,
|
||||
@@ -86,6 +79,7 @@ from backend.util.cloud_storage import get_cloud_storage_handler
|
||||
from backend.util.exceptions import GraphValidationError, NotFoundError
|
||||
from backend.util.settings import Settings
|
||||
from backend.util.timezone_utils import (
|
||||
convert_cron_to_utc,
|
||||
convert_utc_time_to_user_timezone,
|
||||
get_user_timezone_or_utc,
|
||||
)
|
||||
@@ -103,7 +97,6 @@ def _create_file_size_error(size_bytes: int, max_size_mb: int) -> HTTPException:
|
||||
settings = Settings()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
_user_credit_model = get_user_credit_model()
|
||||
|
||||
# Define the API routes
|
||||
@@ -287,26 +280,10 @@ async def execute_graph_block(block_id: str, data: BlockInput) -> CompletedBlock
|
||||
if not obj:
|
||||
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
output = defaultdict(list)
|
||||
async for name, data in obj.execute(data):
|
||||
output[name].append(data)
|
||||
|
||||
# Record successful block execution with duration
|
||||
duration = time.time() - start_time
|
||||
block_type = obj.__class__.__name__
|
||||
record_block_execution(
|
||||
block_type=block_type, status="success", duration=duration
|
||||
)
|
||||
|
||||
return output
|
||||
except Exception:
|
||||
# Record failed block execution
|
||||
duration = time.time() - start_time
|
||||
block_type = obj.__class__.__name__
|
||||
record_block_execution(block_type=block_type, status="error", duration=duration)
|
||||
raise
|
||||
output = defaultdict(list)
|
||||
async for name, data in obj.execute(data):
|
||||
output[name].append(data)
|
||||
return output
|
||||
|
||||
|
||||
@v1_router.post(
|
||||
@@ -802,7 +779,7 @@ async def execute_graph(
|
||||
)
|
||||
|
||||
try:
|
||||
result = await execution_utils.add_graph_execution(
|
||||
return await execution_utils.add_graph_execution(
|
||||
graph_id=graph_id,
|
||||
user_id=user_id,
|
||||
inputs=inputs,
|
||||
@@ -810,16 +787,7 @@ async def execute_graph(
|
||||
graph_version=graph_version,
|
||||
graph_credentials_inputs=credentials_inputs,
|
||||
)
|
||||
# Record successful graph execution
|
||||
record_graph_execution(graph_id=graph_id, status="success", user_id=user_id)
|
||||
record_graph_operation(operation="execute", status="success")
|
||||
return result
|
||||
except GraphValidationError as e:
|
||||
# Record failed graph execution
|
||||
record_graph_execution(
|
||||
graph_id=graph_id, status="validation_error", user_id=user_id
|
||||
)
|
||||
record_graph_operation(operation="execute", status="validation_error")
|
||||
# Return structured validation errors that the frontend can parse
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
@@ -830,11 +798,6 @@ async def execute_graph(
|
||||
"node_errors": e.node_errors,
|
||||
},
|
||||
)
|
||||
except Exception:
|
||||
# Record any other failures
|
||||
record_graph_execution(graph_id=graph_id, status="error", user_id=user_id)
|
||||
record_graph_operation(operation="execute", status="error")
|
||||
raise
|
||||
|
||||
|
||||
@v1_router.post(
|
||||
@@ -959,99 +922,6 @@ async def delete_graph_execution(
|
||||
)
|
||||
|
||||
|
||||
class ShareRequest(pydantic.BaseModel):
|
||||
"""Optional request body for share endpoint."""
|
||||
|
||||
pass # Empty body is fine
|
||||
|
||||
|
||||
class ShareResponse(pydantic.BaseModel):
|
||||
"""Response from share endpoints."""
|
||||
|
||||
share_url: str
|
||||
share_token: str
|
||||
|
||||
|
||||
@v1_router.post(
|
||||
"/graphs/{graph_id}/executions/{graph_exec_id}/share",
|
||||
dependencies=[Security(requires_user)],
|
||||
)
|
||||
async def enable_execution_sharing(
|
||||
graph_id: Annotated[str, Path],
|
||||
graph_exec_id: Annotated[str, Path],
|
||||
user_id: Annotated[str, Security(get_user_id)],
|
||||
_body: ShareRequest = Body(default=ShareRequest()),
|
||||
) -> ShareResponse:
|
||||
"""Enable sharing for a graph execution."""
|
||||
# Verify the execution belongs to the user
|
||||
execution = await execution_db.get_graph_execution(
|
||||
user_id=user_id, execution_id=graph_exec_id
|
||||
)
|
||||
if not execution:
|
||||
raise HTTPException(status_code=404, detail="Execution not found")
|
||||
|
||||
# Generate a unique share token
|
||||
share_token = str(uuid.uuid4())
|
||||
|
||||
# Update the execution with share info
|
||||
await execution_db.update_graph_execution_share_status(
|
||||
execution_id=graph_exec_id,
|
||||
user_id=user_id,
|
||||
is_shared=True,
|
||||
share_token=share_token,
|
||||
shared_at=datetime.now(timezone.utc),
|
||||
)
|
||||
|
||||
# Return the share URL
|
||||
frontend_url = Settings().config.frontend_base_url or "http://localhost:3000"
|
||||
share_url = f"{frontend_url}/share/{share_token}"
|
||||
|
||||
return ShareResponse(share_url=share_url, share_token=share_token)
|
||||
|
||||
|
||||
@v1_router.delete(
|
||||
"/graphs/{graph_id}/executions/{graph_exec_id}/share",
|
||||
status_code=HTTP_204_NO_CONTENT,
|
||||
dependencies=[Security(requires_user)],
|
||||
)
|
||||
async def disable_execution_sharing(
|
||||
graph_id: Annotated[str, Path],
|
||||
graph_exec_id: Annotated[str, Path],
|
||||
user_id: Annotated[str, Security(get_user_id)],
|
||||
) -> None:
|
||||
"""Disable sharing for a graph execution."""
|
||||
# Verify the execution belongs to the user
|
||||
execution = await execution_db.get_graph_execution(
|
||||
user_id=user_id, execution_id=graph_exec_id
|
||||
)
|
||||
if not execution:
|
||||
raise HTTPException(status_code=404, detail="Execution not found")
|
||||
|
||||
# Remove share info
|
||||
await execution_db.update_graph_execution_share_status(
|
||||
execution_id=graph_exec_id,
|
||||
user_id=user_id,
|
||||
is_shared=False,
|
||||
share_token=None,
|
||||
shared_at=None,
|
||||
)
|
||||
|
||||
|
||||
@v1_router.get("/public/shared/{share_token}")
|
||||
async def get_shared_execution(
|
||||
share_token: Annotated[
|
||||
str,
|
||||
Path(regex=r"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"),
|
||||
],
|
||||
) -> execution_db.SharedExecutionResponse:
|
||||
"""Get a shared graph execution by share token (no auth required)."""
|
||||
execution = await execution_db.get_graph_execution_by_share_token(share_token)
|
||||
if not execution:
|
||||
raise HTTPException(status_code=404, detail="Shared execution not found")
|
||||
|
||||
return execution
|
||||
|
||||
|
||||
########################################################
|
||||
##################### Schedules ########################
|
||||
########################################################
|
||||
@@ -1063,10 +933,6 @@ class ScheduleCreationRequest(pydantic.BaseModel):
|
||||
cron: str
|
||||
inputs: dict[str, Any]
|
||||
credentials: dict[str, CredentialsMetaInput] = pydantic.Field(default_factory=dict)
|
||||
timezone: Optional[str] = pydantic.Field(
|
||||
default=None,
|
||||
description="User's timezone for scheduling (e.g., 'America/New_York'). If not provided, will use user's saved timezone or UTC.",
|
||||
)
|
||||
|
||||
|
||||
@v1_router.post(
|
||||
@@ -1091,22 +957,26 @@ async def create_graph_execution_schedule(
|
||||
detail=f"Graph #{graph_id} v{schedule_params.graph_version} not found.",
|
||||
)
|
||||
|
||||
# Use timezone from request if provided, otherwise fetch from user profile
|
||||
if schedule_params.timezone:
|
||||
user_timezone = schedule_params.timezone
|
||||
else:
|
||||
user = await get_user_by_id(user_id)
|
||||
user_timezone = get_user_timezone_or_utc(user.timezone if user else None)
|
||||
user = await get_user_by_id(user_id)
|
||||
user_timezone = get_user_timezone_or_utc(user.timezone if user else None)
|
||||
|
||||
# Convert cron expression from user timezone to UTC
|
||||
try:
|
||||
utc_cron = convert_cron_to_utc(schedule_params.cron, user_timezone)
|
||||
except ValueError as e:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid cron expression for timezone {user_timezone}: {e}",
|
||||
)
|
||||
|
||||
result = await get_scheduler_client().add_execution_schedule(
|
||||
user_id=user_id,
|
||||
graph_id=graph_id,
|
||||
graph_version=graph.version,
|
||||
name=schedule_params.name,
|
||||
cron=schedule_params.cron,
|
||||
cron=utc_cron, # Send UTC cron to scheduler
|
||||
input_data=schedule_params.inputs,
|
||||
input_credentials=schedule_params.credentials,
|
||||
user_timezone=user_timezone,
|
||||
)
|
||||
|
||||
# Convert the next_run_time back to user timezone for display
|
||||
@@ -1128,11 +998,24 @@ async def list_graph_execution_schedules(
|
||||
user_id: Annotated[str, Security(get_user_id)],
|
||||
graph_id: str = Path(),
|
||||
) -> list[scheduler.GraphExecutionJobInfo]:
|
||||
return await get_scheduler_client().get_execution_schedules(
|
||||
schedules = await get_scheduler_client().get_execution_schedules(
|
||||
user_id=user_id,
|
||||
graph_id=graph_id,
|
||||
)
|
||||
|
||||
# Get user timezone for conversion
|
||||
user = await get_user_by_id(user_id)
|
||||
user_timezone = get_user_timezone_or_utc(user.timezone if user else None)
|
||||
|
||||
# Convert next_run_time to user timezone for display
|
||||
for schedule in schedules:
|
||||
if schedule.next_run_time:
|
||||
schedule.next_run_time = convert_utc_time_to_user_timezone(
|
||||
schedule.next_run_time, user_timezone
|
||||
)
|
||||
|
||||
return schedules
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
path="/schedules",
|
||||
@@ -1143,7 +1026,20 @@ async def list_graph_execution_schedules(
|
||||
async def list_all_graphs_execution_schedules(
|
||||
user_id: Annotated[str, Security(get_user_id)],
|
||||
) -> list[scheduler.GraphExecutionJobInfo]:
|
||||
return await get_scheduler_client().get_execution_schedules(user_id=user_id)
|
||||
schedules = await get_scheduler_client().get_execution_schedules(user_id=user_id)
|
||||
|
||||
# Get user timezone for conversion
|
||||
user = await get_user_by_id(user_id)
|
||||
user_timezone = get_user_timezone_or_utc(user.timezone if user else None)
|
||||
|
||||
# Convert UTC next_run_time to user timezone for display
|
||||
for schedule in schedules:
|
||||
if schedule.next_run_time:
|
||||
schedule.next_run_time = convert_utc_time_to_user_timezone(
|
||||
schedule.next_run_time, user_timezone
|
||||
)
|
||||
|
||||
return schedules
|
||||
|
||||
|
||||
@v1_router.delete(
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import json
|
||||
from datetime import datetime
|
||||
from io import BytesIO
|
||||
from unittest.mock import AsyncMock, Mock, patch
|
||||
|
||||
@@ -266,7 +265,6 @@ def test_get_graphs(
|
||||
name="Test Graph",
|
||||
description="A test graph",
|
||||
user_id=test_user_id,
|
||||
created_at=datetime(2025, 9, 4, 13, 37),
|
||||
)
|
||||
|
||||
mocker.patch(
|
||||
@@ -301,7 +299,6 @@ def test_get_graph(
|
||||
name="Test Graph",
|
||||
description="A test graph",
|
||||
user_id=test_user_id,
|
||||
created_at=datetime(2025, 9, 4, 13, 37),
|
||||
)
|
||||
|
||||
mocker.patch(
|
||||
@@ -351,7 +348,6 @@ def test_delete_graph(
|
||||
name="Test Graph",
|
||||
description="A test graph",
|
||||
user_id=test_user_id,
|
||||
created_at=datetime(2025, 9, 4, 13, 37),
|
||||
)
|
||||
|
||||
mocker.patch(
|
||||
|
||||
@@ -144,92 +144,6 @@ async def list_library_agents(
|
||||
raise store_exceptions.DatabaseError("Failed to fetch library agents") from e
|
||||
|
||||
|
||||
async def list_favorite_library_agents(
|
||||
user_id: str,
|
||||
page: int = 1,
|
||||
page_size: int = 50,
|
||||
) -> library_model.LibraryAgentResponse:
|
||||
"""
|
||||
Retrieves a paginated list of favorite LibraryAgent records for a given user.
|
||||
|
||||
Args:
|
||||
user_id: The ID of the user whose favorite LibraryAgents we want to retrieve.
|
||||
page: Current page (1-indexed).
|
||||
page_size: Number of items per page.
|
||||
|
||||
Returns:
|
||||
A LibraryAgentResponse containing the list of favorite agents and pagination details.
|
||||
|
||||
Raises:
|
||||
DatabaseError: If there is an issue fetching from Prisma.
|
||||
"""
|
||||
logger.debug(
|
||||
f"Fetching favorite library agents for user_id={user_id}, "
|
||||
f"page={page}, page_size={page_size}"
|
||||
)
|
||||
|
||||
if page < 1 or page_size < 1:
|
||||
logger.warning(f"Invalid pagination: page={page}, page_size={page_size}")
|
||||
raise store_exceptions.DatabaseError("Invalid pagination input")
|
||||
|
||||
where_clause: prisma.types.LibraryAgentWhereInput = {
|
||||
"userId": user_id,
|
||||
"isDeleted": False,
|
||||
"isArchived": False,
|
||||
"isFavorite": True, # Only fetch favorites
|
||||
}
|
||||
|
||||
# Sort favorites by updated date descending
|
||||
order_by: prisma.types.LibraryAgentOrderByInput = {"updatedAt": "desc"}
|
||||
|
||||
try:
|
||||
library_agents = await prisma.models.LibraryAgent.prisma().find_many(
|
||||
where=where_clause,
|
||||
include=library_agent_include(user_id),
|
||||
order=order_by,
|
||||
skip=(page - 1) * page_size,
|
||||
take=page_size,
|
||||
)
|
||||
agent_count = await prisma.models.LibraryAgent.prisma().count(
|
||||
where=where_clause
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"Retrieved {len(library_agents)} favorite library agents for user #{user_id}"
|
||||
)
|
||||
|
||||
# Only pass valid agents to the response
|
||||
valid_library_agents: list[library_model.LibraryAgent] = []
|
||||
|
||||
for agent in library_agents:
|
||||
try:
|
||||
library_agent = library_model.LibraryAgent.from_db(agent)
|
||||
valid_library_agents.append(library_agent)
|
||||
except Exception as e:
|
||||
# Skip this agent if there was an error
|
||||
logger.error(
|
||||
f"Error parsing LibraryAgent #{agent.id} from DB item: {e}"
|
||||
)
|
||||
continue
|
||||
|
||||
# Return the response with only valid agents
|
||||
return library_model.LibraryAgentResponse(
|
||||
agents=valid_library_agents,
|
||||
pagination=Pagination(
|
||||
total_items=agent_count,
|
||||
total_pages=(agent_count + page_size - 1) // page_size,
|
||||
current_page=page,
|
||||
page_size=page_size,
|
||||
),
|
||||
)
|
||||
|
||||
except prisma.errors.PrismaError as e:
|
||||
logger.error(f"Database error fetching favorite library agents: {e}")
|
||||
raise store_exceptions.DatabaseError(
|
||||
"Failed to fetch favorite library agents"
|
||||
) from e
|
||||
|
||||
|
||||
async def get_library_agent(id: str, user_id: str) -> library_model.LibraryAgent:
|
||||
"""
|
||||
Get a specific agent from the user's library.
|
||||
@@ -795,7 +709,10 @@ async def create_preset(
|
||||
)
|
||||
for name, data in {
|
||||
**preset.inputs,
|
||||
**preset.credentials,
|
||||
**{
|
||||
key: creds_meta.model_dump(exclude_none=True)
|
||||
for key, creds_meta in preset.credentials.items()
|
||||
},
|
||||
}.items()
|
||||
]
|
||||
},
|
||||
|
||||
@@ -43,7 +43,6 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
|
||||
name: str
|
||||
description: str
|
||||
instructions: str | None = None
|
||||
|
||||
input_schema: dict[str, Any] # Should be BlockIOObjectSubSchema in frontend
|
||||
output_schema: dict[str, Any]
|
||||
@@ -65,9 +64,6 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
# Indicates if this agent is the latest version
|
||||
is_latest_version: bool
|
||||
|
||||
# Whether the agent is marked as favorite by the user
|
||||
is_favorite: bool
|
||||
|
||||
# Recommended schedule cron (from marketplace agents)
|
||||
recommended_schedule_cron: str | None = None
|
||||
|
||||
@@ -127,7 +123,6 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
updated_at=updated_at,
|
||||
name=graph.name,
|
||||
description=graph.description,
|
||||
instructions=graph.instructions,
|
||||
input_schema=graph.input_schema,
|
||||
output_schema=graph.output_schema,
|
||||
credentials_input_schema=(
|
||||
@@ -138,7 +133,6 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
new_output=new_output,
|
||||
can_access_graph=can_access_graph,
|
||||
is_latest_version=is_latest_version,
|
||||
is_favorite=agent.isFavorite,
|
||||
recommended_schedule_cron=agent.AgentGraph.recommendedScheduleCron,
|
||||
)
|
||||
|
||||
@@ -263,7 +257,6 @@ class LibraryAgentPreset(LibraryAgentPresetCreatable):
|
||||
|
||||
id: str
|
||||
user_id: str
|
||||
created_at: datetime.datetime
|
||||
updated_at: datetime.datetime
|
||||
|
||||
webhook: "Webhook | None"
|
||||
@@ -293,7 +286,6 @@ class LibraryAgentPreset(LibraryAgentPresetCreatable):
|
||||
return cls(
|
||||
id=preset.id,
|
||||
user_id=preset.userId,
|
||||
created_at=preset.createdAt,
|
||||
updated_at=preset.updatedAt,
|
||||
graph_id=preset.agentGraphId,
|
||||
graph_version=preset.agentGraphVersion,
|
||||
|
||||
@@ -79,54 +79,6 @@ async def list_library_agents(
|
||||
) from e
|
||||
|
||||
|
||||
@router.get(
|
||||
"/favorites",
|
||||
summary="List Favorite Library Agents",
|
||||
responses={
|
||||
500: {"description": "Server error", "content": {"application/json": {}}},
|
||||
},
|
||||
)
|
||||
async def list_favorite_library_agents(
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
page: int = Query(
|
||||
1,
|
||||
ge=1,
|
||||
description="Page number to retrieve (must be >= 1)",
|
||||
),
|
||||
page_size: int = Query(
|
||||
15,
|
||||
ge=1,
|
||||
description="Number of agents per page (must be >= 1)",
|
||||
),
|
||||
) -> library_model.LibraryAgentResponse:
|
||||
"""
|
||||
Get all favorite agents in the user's library.
|
||||
|
||||
Args:
|
||||
user_id: ID of the authenticated user.
|
||||
page: Page number to retrieve.
|
||||
page_size: Number of agents per page.
|
||||
|
||||
Returns:
|
||||
A LibraryAgentResponse containing favorite agents and pagination metadata.
|
||||
|
||||
Raises:
|
||||
HTTPException: If a server/database error occurs.
|
||||
"""
|
||||
try:
|
||||
return await library_db.list_favorite_library_agents(
|
||||
user_id=user_id,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not list favorite library agents for user #{user_id}: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e),
|
||||
) from e
|
||||
|
||||
|
||||
@router.get("/{library_agent_id}", summary="Get Library Agent")
|
||||
async def get_library_agent(
|
||||
library_agent_id: str,
|
||||
|
||||
@@ -54,7 +54,6 @@ async def test_get_library_agents_success(
|
||||
new_output=False,
|
||||
can_access_graph=True,
|
||||
is_latest_version=True,
|
||||
is_favorite=False,
|
||||
updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
|
||||
),
|
||||
library_model.LibraryAgent(
|
||||
@@ -75,7 +74,6 @@ async def test_get_library_agents_success(
|
||||
new_output=False,
|
||||
can_access_graph=False,
|
||||
is_latest_version=True,
|
||||
is_favorite=False,
|
||||
updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
|
||||
),
|
||||
],
|
||||
@@ -123,76 +121,6 @@ def test_get_library_agents_error(mocker: pytest_mock.MockFixture, test_user_id:
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_favorite_library_agents_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
test_user_id: str,
|
||||
) -> None:
|
||||
mocked_value = library_model.LibraryAgentResponse(
|
||||
agents=[
|
||||
library_model.LibraryAgent(
|
||||
id="test-agent-1",
|
||||
graph_id="test-agent-1",
|
||||
graph_version=1,
|
||||
name="Favorite Agent 1",
|
||||
description="Test Favorite Description 1",
|
||||
image_url=None,
|
||||
creator_name="Test Creator",
|
||||
creator_image_url="",
|
||||
input_schema={"type": "object", "properties": {}},
|
||||
output_schema={"type": "object", "properties": {}},
|
||||
credentials_input_schema={"type": "object", "properties": {}},
|
||||
has_external_trigger=False,
|
||||
status=library_model.LibraryAgentStatus.COMPLETED,
|
||||
recommended_schedule_cron=None,
|
||||
new_output=False,
|
||||
can_access_graph=True,
|
||||
is_latest_version=True,
|
||||
is_favorite=True,
|
||||
updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
|
||||
),
|
||||
],
|
||||
pagination=Pagination(
|
||||
total_items=1, total_pages=1, current_page=1, page_size=15
|
||||
),
|
||||
)
|
||||
mock_db_call = mocker.patch(
|
||||
"backend.server.v2.library.db.list_favorite_library_agents"
|
||||
)
|
||||
mock_db_call.return_value = mocked_value
|
||||
|
||||
response = client.get("/agents/favorites")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = library_model.LibraryAgentResponse.model_validate(response.json())
|
||||
assert len(data.agents) == 1
|
||||
assert data.agents[0].is_favorite is True
|
||||
assert data.agents[0].name == "Favorite Agent 1"
|
||||
|
||||
mock_db_call.assert_called_once_with(
|
||||
user_id=test_user_id,
|
||||
page=1,
|
||||
page_size=15,
|
||||
)
|
||||
|
||||
|
||||
def test_get_favorite_library_agents_error(
|
||||
mocker: pytest_mock.MockFixture, test_user_id: str
|
||||
):
|
||||
mock_db_call = mocker.patch(
|
||||
"backend.server.v2.library.db.list_favorite_library_agents"
|
||||
)
|
||||
mock_db_call.side_effect = Exception("Test error")
|
||||
|
||||
response = client.get("/agents/favorites")
|
||||
assert response.status_code == 500
|
||||
mock_db_call.assert_called_once_with(
|
||||
user_id=test_user_id,
|
||||
page=1,
|
||||
page_size=15,
|
||||
)
|
||||
|
||||
|
||||
def test_add_agent_to_library_success(
|
||||
mocker: pytest_mock.MockFixture, test_user_id: str
|
||||
):
|
||||
@@ -213,7 +141,6 @@ def test_add_agent_to_library_success(
|
||||
new_output=False,
|
||||
can_access_graph=True,
|
||||
is_latest_version=True,
|
||||
is_favorite=False,
|
||||
updated_at=FIXED_NOW,
|
||||
)
|
||||
|
||||
|
||||
@@ -183,29 +183,6 @@ async def get_store_agent_details(
|
||||
store_listing.hasApprovedVersion if store_listing else False
|
||||
)
|
||||
|
||||
if active_version_id:
|
||||
agent_by_active = await prisma.models.StoreAgent.prisma().find_first(
|
||||
where={"storeListingVersionId": active_version_id}
|
||||
)
|
||||
if agent_by_active:
|
||||
agent = agent_by_active
|
||||
elif store_listing:
|
||||
latest_approved = (
|
||||
await prisma.models.StoreListingVersion.prisma().find_first(
|
||||
where={
|
||||
"storeListingId": store_listing.id,
|
||||
"submissionStatus": prisma.enums.SubmissionStatus.APPROVED,
|
||||
},
|
||||
order=[{"version": "desc"}],
|
||||
)
|
||||
)
|
||||
if latest_approved:
|
||||
agent_latest = await prisma.models.StoreAgent.prisma().find_first(
|
||||
where={"storeListingVersionId": latest_approved.id}
|
||||
)
|
||||
if agent_latest:
|
||||
agent = agent_latest
|
||||
|
||||
if store_listing and store_listing.ActiveVersion:
|
||||
recommended_schedule_cron = (
|
||||
store_listing.ActiveVersion.recommendedScheduleCron
|
||||
@@ -499,7 +476,6 @@ async def get_store_submissions(
|
||||
sub_heading=sub.sub_heading,
|
||||
slug=sub.slug,
|
||||
description=sub.description,
|
||||
instructions=getattr(sub, "instructions", None),
|
||||
image_urls=sub.image_urls or [],
|
||||
date_submitted=sub.date_submitted or datetime.now(tz=timezone.utc),
|
||||
status=sub.status,
|
||||
@@ -591,7 +567,6 @@ async def create_store_submission(
|
||||
video_url: str | None = None,
|
||||
image_urls: list[str] = [],
|
||||
description: str = "",
|
||||
instructions: str | None = None,
|
||||
sub_heading: str = "",
|
||||
categories: list[str] = [],
|
||||
changes_summary: str | None = "Initial Submission",
|
||||
@@ -663,7 +638,6 @@ async def create_store_submission(
|
||||
video_url=video_url,
|
||||
image_urls=image_urls,
|
||||
description=description,
|
||||
instructions=instructions,
|
||||
sub_heading=sub_heading,
|
||||
categories=categories,
|
||||
changes_summary=changes_summary,
|
||||
@@ -685,7 +659,6 @@ async def create_store_submission(
|
||||
videoUrl=video_url,
|
||||
imageUrls=image_urls,
|
||||
description=description,
|
||||
instructions=instructions,
|
||||
categories=categories,
|
||||
subHeading=sub_heading,
|
||||
submissionStatus=prisma.enums.SubmissionStatus.PENDING,
|
||||
@@ -716,7 +689,6 @@ async def create_store_submission(
|
||||
slug=slug,
|
||||
sub_heading=sub_heading,
|
||||
description=description,
|
||||
instructions=instructions,
|
||||
image_urls=image_urls,
|
||||
date_submitted=listing.createdAt,
|
||||
status=prisma.enums.SubmissionStatus.PENDING,
|
||||
@@ -749,7 +721,6 @@ async def edit_store_submission(
|
||||
categories: list[str] = [],
|
||||
changes_summary: str | None = "Update submission",
|
||||
recommended_schedule_cron: str | None = None,
|
||||
instructions: str | None = None,
|
||||
) -> backend.server.v2.store.model.StoreSubmission:
|
||||
"""
|
||||
Edit an existing store listing submission.
|
||||
@@ -830,7 +801,6 @@ async def edit_store_submission(
|
||||
categories=categories,
|
||||
changes_summary=changes_summary,
|
||||
recommended_schedule_cron=recommended_schedule_cron,
|
||||
instructions=instructions,
|
||||
)
|
||||
|
||||
# For PENDING submissions, we can update the existing version
|
||||
@@ -847,7 +817,6 @@ async def edit_store_submission(
|
||||
subHeading=sub_heading,
|
||||
changesSummary=changes_summary,
|
||||
recommendedScheduleCron=recommended_schedule_cron,
|
||||
instructions=instructions,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -866,7 +835,6 @@ async def edit_store_submission(
|
||||
sub_heading=sub_heading,
|
||||
slug=current_version.StoreListing.slug,
|
||||
description=description,
|
||||
instructions=instructions,
|
||||
image_urls=image_urls,
|
||||
date_submitted=updated_version.submittedAt or updated_version.createdAt,
|
||||
status=updated_version.submissionStatus,
|
||||
@@ -908,7 +876,6 @@ async def create_store_version(
|
||||
video_url: str | None = None,
|
||||
image_urls: list[str] = [],
|
||||
description: str = "",
|
||||
instructions: str | None = None,
|
||||
sub_heading: str = "",
|
||||
categories: list[str] = [],
|
||||
changes_summary: str | None = "Initial submission",
|
||||
@@ -977,7 +944,6 @@ async def create_store_version(
|
||||
videoUrl=video_url,
|
||||
imageUrls=image_urls,
|
||||
description=description,
|
||||
instructions=instructions,
|
||||
categories=categories,
|
||||
subHeading=sub_heading,
|
||||
submissionStatus=prisma.enums.SubmissionStatus.PENDING,
|
||||
@@ -999,7 +965,6 @@ async def create_store_version(
|
||||
slug=listing.slug,
|
||||
sub_heading=sub_heading,
|
||||
description=description,
|
||||
instructions=instructions,
|
||||
image_urls=image_urls,
|
||||
date_submitted=datetime.now(),
|
||||
status=prisma.enums.SubmissionStatus.PENDING,
|
||||
@@ -1176,20 +1141,7 @@ async def get_my_agents(
|
||||
try:
|
||||
search_filter: prisma.types.LibraryAgentWhereInput = {
|
||||
"userId": user_id,
|
||||
"AgentGraph": {
|
||||
"is": {
|
||||
"StoreListings": {
|
||||
"none": {
|
||||
"isDeleted": False,
|
||||
"Versions": {
|
||||
"some": {
|
||||
"isAvailable": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"AgentGraph": {"is": {"StoreListings": {"none": {"isDeleted": False}}}},
|
||||
"isArchived": False,
|
||||
"isDeleted": False,
|
||||
}
|
||||
@@ -1427,7 +1379,6 @@ async def review_store_submission(
|
||||
"name": store_listing_version.name,
|
||||
"description": store_listing_version.description,
|
||||
"recommendedScheduleCron": store_listing_version.recommendedScheduleCron,
|
||||
"instructions": store_listing_version.instructions,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -1593,7 +1544,6 @@ async def review_store_submission(
|
||||
else ""
|
||||
),
|
||||
description=submission.description,
|
||||
instructions=submission.instructions,
|
||||
image_urls=submission.imageUrls or [],
|
||||
date_submitted=submission.submittedAt or submission.createdAt,
|
||||
status=submission.submissionStatus,
|
||||
@@ -1729,7 +1679,6 @@ async def get_admin_listings_with_versions(
|
||||
sub_heading=version.subHeading,
|
||||
slug=listing.slug,
|
||||
description=version.description,
|
||||
instructions=version.instructions,
|
||||
image_urls=version.imageUrls or [],
|
||||
date_submitted=version.submittedAt or version.createdAt,
|
||||
status=version.submissionStatus,
|
||||
|
||||
@@ -86,27 +86,6 @@ async def test_get_store_agent_details(mocker):
|
||||
is_available=False,
|
||||
)
|
||||
|
||||
# Mock active version agent (what we want to return for active version)
|
||||
mock_active_agent = prisma.models.StoreAgent(
|
||||
listing_id="test-id",
|
||||
storeListingVersionId="active-version-id",
|
||||
slug="test-agent",
|
||||
agent_name="Test Agent Active",
|
||||
agent_video="active_video.mp4",
|
||||
agent_image=["active_image.jpg"],
|
||||
featured=False,
|
||||
creator_username="creator",
|
||||
creator_avatar="avatar.jpg",
|
||||
sub_heading="Test heading active",
|
||||
description="Test description active",
|
||||
categories=["test"],
|
||||
runs=15,
|
||||
rating=4.8,
|
||||
versions=["1.0", "2.0"],
|
||||
updated_at=datetime.now(),
|
||||
is_available=True,
|
||||
)
|
||||
|
||||
# Create a mock StoreListing result
|
||||
mock_store_listing = mocker.MagicMock()
|
||||
mock_store_listing.activeVersionId = "active-version-id"
|
||||
@@ -114,22 +93,9 @@ async def test_get_store_agent_details(mocker):
|
||||
mock_store_listing.ActiveVersion = mocker.MagicMock()
|
||||
mock_store_listing.ActiveVersion.recommendedScheduleCron = None
|
||||
|
||||
# Mock StoreAgent prisma call - need to handle multiple calls
|
||||
# Mock StoreAgent prisma call
|
||||
mock_store_agent = mocker.patch("prisma.models.StoreAgent.prisma")
|
||||
|
||||
# Set up side_effect to return different results for different calls
|
||||
def mock_find_first_side_effect(*args, **kwargs):
|
||||
where_clause = kwargs.get("where", {})
|
||||
if "storeListingVersionId" in where_clause:
|
||||
# Second call for active version
|
||||
return mock_active_agent
|
||||
else:
|
||||
# First call for initial lookup
|
||||
return mock_agent
|
||||
|
||||
mock_store_agent.return_value.find_first = mocker.AsyncMock(
|
||||
side_effect=mock_find_first_side_effect
|
||||
)
|
||||
mock_store_agent.return_value.find_first = mocker.AsyncMock(return_value=mock_agent)
|
||||
|
||||
# Mock Profile prisma call
|
||||
mock_profile = mocker.MagicMock()
|
||||
@@ -139,7 +105,7 @@ async def test_get_store_agent_details(mocker):
|
||||
return_value=mock_profile
|
||||
)
|
||||
|
||||
# Mock StoreListing prisma call
|
||||
# Mock StoreListing prisma call - this is what was missing
|
||||
mock_store_listing_db = mocker.patch("prisma.models.StoreListing.prisma")
|
||||
mock_store_listing_db.return_value.find_first = mocker.AsyncMock(
|
||||
return_value=mock_store_listing
|
||||
@@ -148,25 +114,16 @@ async def test_get_store_agent_details(mocker):
|
||||
# Call function
|
||||
result = await db.get_store_agent_details("creator", "test-agent")
|
||||
|
||||
# Verify results - should use active version data
|
||||
# Verify results
|
||||
assert result.slug == "test-agent"
|
||||
assert result.agent_name == "Test Agent Active" # From active version
|
||||
assert result.agent_name == "Test Agent"
|
||||
assert result.active_version_id == "active-version-id"
|
||||
assert result.has_approved_version is True
|
||||
assert (
|
||||
result.store_listing_version_id == "active-version-id"
|
||||
) # Should be active version ID
|
||||
|
||||
# Verify mocks called correctly - now expecting 2 calls
|
||||
assert mock_store_agent.return_value.find_first.call_count == 2
|
||||
|
||||
# Check the specific calls
|
||||
calls = mock_store_agent.return_value.find_first.call_args_list
|
||||
assert calls[0] == mocker.call(
|
||||
# Verify mocks called correctly
|
||||
mock_store_agent.return_value.find_first.assert_called_once_with(
|
||||
where={"creator_username": "creator", "slug": "test-agent"}
|
||||
)
|
||||
assert calls[1] == mocker.call(where={"storeListingVersionId": "active-version-id"})
|
||||
|
||||
mock_store_listing_db.return_value.find_first.assert_called_once()
|
||||
|
||||
|
||||
|
||||
@@ -49,7 +49,6 @@ class StoreAgentDetails(pydantic.BaseModel):
|
||||
creator_avatar: str
|
||||
sub_heading: str
|
||||
description: str
|
||||
instructions: str | None = None
|
||||
categories: list[str]
|
||||
runs: int
|
||||
rating: float
|
||||
@@ -104,7 +103,6 @@ class StoreSubmission(pydantic.BaseModel):
|
||||
sub_heading: str
|
||||
slug: str
|
||||
description: str
|
||||
instructions: str | None = None
|
||||
image_urls: list[str]
|
||||
date_submitted: datetime.datetime
|
||||
status: prisma.enums.SubmissionStatus
|
||||
@@ -159,7 +157,6 @@ class StoreSubmissionRequest(pydantic.BaseModel):
|
||||
video_url: str | None = None
|
||||
image_urls: list[str] = []
|
||||
description: str = ""
|
||||
instructions: str | None = None
|
||||
categories: list[str] = []
|
||||
changes_summary: str | None = None
|
||||
recommended_schedule_cron: str | None = None
|
||||
@@ -171,7 +168,6 @@ class StoreSubmissionEditRequest(pydantic.BaseModel):
|
||||
video_url: str | None = None
|
||||
image_urls: list[str] = []
|
||||
description: str = ""
|
||||
instructions: str | None = None
|
||||
categories: list[str] = []
|
||||
changes_summary: str | None = None
|
||||
recommended_schedule_cron: str | None = None
|
||||
|
||||
@@ -532,7 +532,6 @@ async def create_submission(
|
||||
video_url=submission_request.video_url,
|
||||
image_urls=submission_request.image_urls,
|
||||
description=submission_request.description,
|
||||
instructions=submission_request.instructions,
|
||||
sub_heading=submission_request.sub_heading,
|
||||
categories=submission_request.categories,
|
||||
changes_summary=submission_request.changes_summary or "Initial Submission",
|
||||
@@ -579,7 +578,6 @@ async def edit_submission(
|
||||
video_url=submission_request.video_url,
|
||||
image_urls=submission_request.image_urls,
|
||||
description=submission_request.description,
|
||||
instructions=submission_request.instructions,
|
||||
sub_heading=submission_request.sub_heading,
|
||||
categories=submission_request.categories,
|
||||
changes_summary=submission_request.changes_summary,
|
||||
|
||||
@@ -11,10 +11,6 @@ from starlette.middleware.cors import CORSMiddleware
|
||||
|
||||
from backend.data.execution import AsyncRedisExecutionEventBus
|
||||
from backend.data.user import DEFAULT_USER_ID
|
||||
from backend.monitoring.instrumentation import (
|
||||
instrument_fastapi,
|
||||
update_websocket_connections,
|
||||
)
|
||||
from backend.server.conn_manager import ConnectionManager
|
||||
from backend.server.model import (
|
||||
WSMessage,
|
||||
@@ -42,15 +38,6 @@ docs_url = "/docs" if settings.config.app_env == AppEnvironment.LOCAL else None
|
||||
app = FastAPI(lifespan=lifespan, docs_url=docs_url)
|
||||
_connection_manager = None
|
||||
|
||||
# Add Prometheus instrumentation
|
||||
instrument_fastapi(
|
||||
app,
|
||||
service_name="websocket-server",
|
||||
expose_endpoint=True,
|
||||
endpoint="/metrics",
|
||||
include_in_schema=settings.config.app_env == AppEnvironment.LOCAL,
|
||||
)
|
||||
|
||||
|
||||
def get_connection_manager():
|
||||
global _connection_manager
|
||||
@@ -229,10 +216,6 @@ async def websocket_router(
|
||||
if not user_id:
|
||||
return
|
||||
await manager.connect_socket(websocket)
|
||||
|
||||
# Track WebSocket connection
|
||||
update_websocket_connections(user_id, 1)
|
||||
|
||||
try:
|
||||
while True:
|
||||
data = await websocket.receive_text()
|
||||
@@ -303,8 +286,6 @@ async def websocket_router(
|
||||
except WebSocketDisconnect:
|
||||
manager.disconnect_socket(websocket)
|
||||
logger.debug("WebSocket client disconnected")
|
||||
finally:
|
||||
update_websocket_connections(user_id, -1)
|
||||
|
||||
|
||||
@app.get("/")
|
||||
@@ -314,14 +295,17 @@ async def health():
|
||||
|
||||
class WebsocketServer(AppProcess):
|
||||
def run(self):
|
||||
logger.info(f"CORS allow origins: {settings.config.backend_cors_allow_origins}")
|
||||
server_app = CORSMiddleware(
|
||||
app=app,
|
||||
allow_origins=settings.config.backend_cors_allow_origins,
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
if settings.config.enable_cors_all_origins:
|
||||
server_app = CORSMiddleware(
|
||||
app=app,
|
||||
allow_origins=settings.config.backend_cors_allow_origins,
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
else:
|
||||
logger.info("CORS is disabled")
|
||||
server_app = app
|
||||
|
||||
uvicorn.run(
|
||||
server_app,
|
||||
|
||||
@@ -368,6 +368,11 @@ class Config(UpdateTrackingModel["Config"], BaseSettings):
|
||||
description="Maximum message size limit for communication with the message bus",
|
||||
)
|
||||
|
||||
enable_cors_all_origins: bool = Field(
|
||||
default=True,
|
||||
description="Whether to enable all CORS origins",
|
||||
)
|
||||
|
||||
backend_cors_allow_origins: List[str] = Field(default=["http://localhost:3000"])
|
||||
|
||||
@field_validator("backend_cors_allow_origins")
|
||||
@@ -479,9 +484,6 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
|
||||
)
|
||||
|
||||
openai_api_key: str = Field(default="", description="OpenAI API key")
|
||||
openai_internal_api_key: str = Field(
|
||||
default="", description="OpenAI Internal API key"
|
||||
)
|
||||
aiml_api_key: str = Field(default="", description="'AI/ML API' key")
|
||||
anthropic_api_key: str = Field(default="", description="Anthropic API key")
|
||||
groq_api_key: str = Field(default="", description="Groq API key")
|
||||
|
||||
@@ -1,66 +0,0 @@
|
||||
-- Fixes the refresh function+job introduced in 20250604130249_optimise_store_agent_and_creator_views
|
||||
-- by improving the function to accept a schema parameter and updating the cron job to use it.
|
||||
-- This resolves the issue where pg_cron jobs fail because they run in 'public' schema
|
||||
-- but the materialized views exist in 'platform' schema.
|
||||
|
||||
|
||||
-- Create parameterized refresh function that accepts schema name
|
||||
CREATE OR REPLACE FUNCTION refresh_store_materialized_views()
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
DECLARE
|
||||
target_schema text := current_schema(); -- Use the current schema where the function is called
|
||||
BEGIN
|
||||
-- Use CONCURRENTLY for better performance during refresh
|
||||
REFRESH MATERIALIZED VIEW CONCURRENTLY "mv_agent_run_counts";
|
||||
REFRESH MATERIALIZED VIEW CONCURRENTLY "mv_review_stats";
|
||||
RAISE NOTICE 'Materialized views refreshed in schema % at %', target_schema, NOW();
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
-- Fallback to non-concurrent refresh if concurrent fails
|
||||
REFRESH MATERIALIZED VIEW "mv_agent_run_counts";
|
||||
REFRESH MATERIALIZED VIEW "mv_review_stats";
|
||||
RAISE NOTICE 'Materialized views refreshed (non-concurrent) in schema % at %. Concurrent refresh failed due to: %', target_schema, NOW(), SQLERRM;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Initial refresh + test of the function to ensure it works
|
||||
SELECT refresh_store_materialized_views();
|
||||
|
||||
-- Re-create the cron job to use the improved function
|
||||
DO $$
|
||||
DECLARE
|
||||
has_pg_cron BOOLEAN;
|
||||
current_schema_name text := current_schema();
|
||||
old_job_name text;
|
||||
job_name text;
|
||||
BEGIN
|
||||
-- Check if pg_cron extension exists
|
||||
SELECT EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'pg_cron') INTO has_pg_cron;
|
||||
|
||||
IF has_pg_cron THEN
|
||||
old_job_name := format('refresh-store-views-%s', current_schema_name);
|
||||
job_name := format('refresh-store-views_%s', current_schema_name);
|
||||
|
||||
-- Try to unschedule existing job (ignore errors if it doesn't exist)
|
||||
BEGIN
|
||||
PERFORM cron.unschedule(old_job_name);
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
NULL;
|
||||
END;
|
||||
|
||||
-- Schedule the new job with explicit schema parameter
|
||||
PERFORM cron.schedule(
|
||||
job_name,
|
||||
'*/15 * * * *',
|
||||
format('SET search_path TO %I; SELECT refresh_store_materialized_views();', current_schema_name)
|
||||
);
|
||||
RAISE NOTICE 'Scheduled job %; runs every 15 minutes for schema %', job_name, current_schema_name;
|
||||
ELSE
|
||||
RAISE WARNING '⚠️ Automatic refresh NOT configured - pg_cron is not available';
|
||||
RAISE WARNING '⚠️ You must manually refresh views with: SELECT refresh_store_materialized_views();';
|
||||
RAISE WARNING '⚠️ Or install pg_cron for automatic refresh in production';
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
@@ -1,3 +0,0 @@
|
||||
-- Re-create foreign key CreditTransaction <- User with ON DELETE NO ACTION
|
||||
ALTER TABLE "CreditTransaction" DROP CONSTRAINT "CreditTransaction_userId_fkey";
|
||||
ALTER TABLE "CreditTransaction" ADD CONSTRAINT "CreditTransaction_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE NO ACTION ON UPDATE CASCADE;
|
||||
@@ -1,22 +0,0 @@
|
||||
/*
|
||||
Warnings:
|
||||
|
||||
- A unique constraint covering the columns `[shareToken]` on the table `AgentGraphExecution` will be added. If there are existing duplicate values, this will fail.
|
||||
|
||||
*/
|
||||
-- AlterTable
|
||||
ALTER TABLE "AgentGraphExecution" ADD COLUMN "isShared" BOOLEAN NOT NULL DEFAULT false,
|
||||
ADD COLUMN "shareToken" TEXT,
|
||||
ADD COLUMN "sharedAt" TIMESTAMP(3);
|
||||
|
||||
-- CreateIndex
|
||||
CREATE UNIQUE INDEX "AgentGraphExecution_shareToken_key" ON "AgentGraphExecution"("shareToken");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "AgentGraphExecution_shareToken_idx" ON "AgentGraphExecution"("shareToken");
|
||||
|
||||
-- RenameIndex
|
||||
ALTER INDEX "APIKey_key_key" RENAME TO "APIKey_hash_key";
|
||||
|
||||
-- RenameIndex
|
||||
ALTER INDEX "APIKey_prefix_name_idx" RENAME TO "APIKey_head_name_idx";
|
||||
@@ -1,53 +0,0 @@
|
||||
-- Add instructions field to AgentGraph and StoreListingVersion tables and update StoreSubmission view
|
||||
|
||||
BEGIN;
|
||||
|
||||
-- AddColumn
|
||||
ALTER TABLE "AgentGraph" ADD COLUMN "instructions" TEXT;
|
||||
|
||||
-- AddColumn
|
||||
ALTER TABLE "StoreListingVersion" ADD COLUMN "instructions" TEXT;
|
||||
|
||||
-- Drop the existing view
|
||||
DROP VIEW IF EXISTS "StoreSubmission";
|
||||
|
||||
-- Recreate the view with the new instructions field
|
||||
CREATE VIEW "StoreSubmission" AS
|
||||
SELECT
|
||||
sl.id AS listing_id,
|
||||
sl."owningUserId" AS user_id,
|
||||
slv."agentGraphId" AS agent_id,
|
||||
slv.version AS agent_version,
|
||||
sl.slug,
|
||||
COALESCE(slv.name, '') AS name,
|
||||
slv."subHeading" AS sub_heading,
|
||||
slv.description,
|
||||
slv.instructions,
|
||||
slv."imageUrls" AS image_urls,
|
||||
slv."submittedAt" AS date_submitted,
|
||||
slv."submissionStatus" AS status,
|
||||
COALESCE(ar.run_count, 0::bigint) AS runs,
|
||||
COALESCE(avg(sr.score::numeric), 0.0)::double precision AS rating,
|
||||
slv.id AS store_listing_version_id,
|
||||
slv."reviewerId" AS reviewer_id,
|
||||
slv."reviewComments" AS review_comments,
|
||||
slv."internalComments" AS internal_comments,
|
||||
slv."reviewedAt" AS reviewed_at,
|
||||
slv."changesSummary" AS changes_summary,
|
||||
slv."videoUrl" AS video_url,
|
||||
slv.categories
|
||||
FROM "StoreListing" sl
|
||||
JOIN "StoreListingVersion" slv ON slv."storeListingId" = sl.id
|
||||
LEFT JOIN "StoreListingReview" sr ON sr."storeListingVersionId" = slv.id
|
||||
LEFT JOIN (
|
||||
SELECT "AgentGraphExecution"."agentGraphId", count(*) AS run_count
|
||||
FROM "AgentGraphExecution"
|
||||
GROUP BY "AgentGraphExecution"."agentGraphId"
|
||||
) ar ON ar."agentGraphId" = slv."agentGraphId"
|
||||
WHERE sl."isDeleted" = false
|
||||
GROUP BY sl.id, sl."owningUserId", slv.id, slv."agentGraphId", slv.version, sl.slug, slv.name,
|
||||
slv."subHeading", slv.description, slv.instructions, slv."imageUrls", slv."submittedAt",
|
||||
slv."submissionStatus", slv."reviewerId", slv."reviewComments", slv."internalComments",
|
||||
slv."reviewedAt", slv."changesSummary", slv."videoUrl", slv.categories, ar.run_count;
|
||||
|
||||
COMMIT;
|
||||
18
autogpt_platform/backend/poetry.lock
generated
18
autogpt_platform/backend/poetry.lock
generated
@@ -4145,22 +4145,6 @@ files = [
|
||||
[package.extras]
|
||||
twisted = ["twisted"]
|
||||
|
||||
[[package]]
|
||||
name = "prometheus-fastapi-instrumentator"
|
||||
version = "7.1.0"
|
||||
description = "Instrument your FastAPI app with Prometheus metrics"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "prometheus_fastapi_instrumentator-7.1.0-py3-none-any.whl", hash = "sha256:978130f3c0bb7b8ebcc90d35516a6fe13e02d2eb358c8f83887cdef7020c31e9"},
|
||||
{file = "prometheus_fastapi_instrumentator-7.1.0.tar.gz", hash = "sha256:be7cd61eeea4e5912aeccb4261c6631b3f227d8924542d79eaf5af3f439cbe5e"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
prometheus-client = ">=0.8.0,<1.0.0"
|
||||
starlette = ">=0.30.0,<1.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "propcache"
|
||||
version = "0.3.2"
|
||||
@@ -7159,4 +7143,4 @@ cffi = ["cffi (>=1.11)"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<3.14"
|
||||
content-hash = "2c7e9370f500039b99868376021627c5a120e0ee31c5c5e6de39db2c3d82f414"
|
||||
content-hash = "80d4dc2cbcd1ae33b2fa3920db5dcb1f82ad252d1e4a8bfeba8b2f2eebbdda0d"
|
||||
|
||||
@@ -45,7 +45,6 @@ postmarker = "^1.0"
|
||||
praw = "~7.8.1"
|
||||
prisma = "^0.15.0"
|
||||
prometheus-client = "^0.22.1"
|
||||
prometheus-fastapi-instrumentator = "^7.0.0"
|
||||
psutil = "^7.0.0"
|
||||
psycopg2-binary = "^2.9.10"
|
||||
pydantic = { extras = ["email"], version = "^2.11.7" }
|
||||
|
||||
@@ -110,7 +110,6 @@ model AgentGraph {
|
||||
|
||||
name String?
|
||||
description String?
|
||||
instructions String?
|
||||
recommendedScheduleCron String?
|
||||
|
||||
isActive Boolean @default(true)
|
||||
@@ -371,16 +370,10 @@ model AgentGraphExecution {
|
||||
|
||||
stats Json?
|
||||
|
||||
// Sharing fields
|
||||
isShared Boolean @default(false)
|
||||
shareToken String? @unique
|
||||
sharedAt DateTime?
|
||||
|
||||
@@index([agentGraphId, agentGraphVersion])
|
||||
@@index([userId])
|
||||
@@index([createdAt])
|
||||
@@index([agentPresetId])
|
||||
@@index([shareToken])
|
||||
}
|
||||
|
||||
// This model describes the execution of an AgentNode.
|
||||
@@ -535,7 +528,7 @@ model CreditTransaction {
|
||||
createdAt DateTime @default(now())
|
||||
|
||||
userId String
|
||||
User User? @relation(fields: [userId], references: [id], onDelete: NoAction)
|
||||
User User @relation(fields: [userId], references: [id], onDelete: Cascade)
|
||||
|
||||
amount Int
|
||||
type CreditTransactionType
|
||||
@@ -764,7 +757,6 @@ model StoreListingVersion {
|
||||
videoUrl String?
|
||||
imageUrls String[]
|
||||
description String
|
||||
instructions String?
|
||||
categories String[]
|
||||
|
||||
isFeatured Boolean @default(false)
|
||||
|
||||
@@ -11,7 +11,6 @@
|
||||
"creator_avatar": "avatar1.jpg",
|
||||
"sub_heading": "Test agent subheading",
|
||||
"description": "Test agent description",
|
||||
"instructions": null,
|
||||
"categories": [
|
||||
"category1",
|
||||
"category2"
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
{
|
||||
"created_at": "2025-09-04T13:37:00",
|
||||
"credentials_input_schema": {
|
||||
"properties": {},
|
||||
"title": "TestGraphCredentialsInputSchema",
|
||||
@@ -15,7 +14,6 @@
|
||||
"required": [],
|
||||
"type": "object"
|
||||
},
|
||||
"instructions": null,
|
||||
"is_active": true,
|
||||
"links": [],
|
||||
"name": "Test Graph",
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
"required": [],
|
||||
"type": "object"
|
||||
},
|
||||
"instructions": null,
|
||||
"is_active": true,
|
||||
"name": "Test Graph",
|
||||
"output_schema": {
|
||||
|
||||
@@ -11,7 +11,6 @@
|
||||
"updated_at": "2023-01-01T00:00:00",
|
||||
"name": "Test Agent 1",
|
||||
"description": "Test Description 1",
|
||||
"instructions": null,
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {}
|
||||
@@ -29,7 +28,6 @@
|
||||
"new_output": false,
|
||||
"can_access_graph": true,
|
||||
"is_latest_version": true,
|
||||
"is_favorite": false,
|
||||
"recommended_schedule_cron": null
|
||||
},
|
||||
{
|
||||
@@ -43,7 +41,6 @@
|
||||
"updated_at": "2023-01-01T00:00:00",
|
||||
"name": "Test Agent 2",
|
||||
"description": "Test Description 2",
|
||||
"instructions": null,
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {}
|
||||
@@ -61,7 +58,6 @@
|
||||
"new_output": false,
|
||||
"can_access_graph": false,
|
||||
"is_latest_version": true,
|
||||
"is_favorite": false,
|
||||
"recommended_schedule_cron": null
|
||||
}
|
||||
],
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
"sub_heading": "Test agent subheading",
|
||||
"slug": "test-agent",
|
||||
"description": "Test agent description",
|
||||
"instructions": null,
|
||||
"image_urls": [
|
||||
"test.jpg"
|
||||
],
|
||||
|
||||
@@ -146,23 +146,16 @@ class TestAutoRegistry:
|
||||
"""Test API key environment variable registration."""
|
||||
import os
|
||||
|
||||
from backend.sdk.builder import ProviderBuilder
|
||||
|
||||
# Set up a test environment variable
|
||||
os.environ["TEST_API_KEY"] = "test-api-key-value"
|
||||
|
||||
try:
|
||||
# Use ProviderBuilder which calls register_api_key and creates the credential
|
||||
provider = (
|
||||
ProviderBuilder("test_provider")
|
||||
.with_api_key("TEST_API_KEY", "Test API Key")
|
||||
.build()
|
||||
)
|
||||
AutoRegistry.register_api_key("test_provider", "TEST_API_KEY")
|
||||
|
||||
# Verify the mapping is stored
|
||||
assert AutoRegistry._api_key_mappings["test_provider"] == "TEST_API_KEY"
|
||||
|
||||
# Verify a credential was created through the provider
|
||||
# Verify a credential was created
|
||||
all_creds = AutoRegistry.get_all_credentials()
|
||||
test_cred = next(
|
||||
(c for c in all_creds if c.id == "test_provider-default"), None
|
||||
|
||||
@@ -1,262 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# AutoGPT Platform Container Build Test Script
|
||||
# This script tests container builds locally before CI/CD
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Configuration
|
||||
REGISTRY="ghcr.io"
|
||||
IMAGE_PREFIX="significant-gravitas/autogpt-platform"
|
||||
VERSION="test"
|
||||
BUILD_ARGS=""
|
||||
|
||||
# Functions
|
||||
info() {
|
||||
echo -e "${BLUE}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
success() {
|
||||
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
||||
}
|
||||
|
||||
warning() {
|
||||
echo -e "${YELLOW}[WARNING]${NC} $1"
|
||||
}
|
||||
|
||||
error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
usage() {
|
||||
cat << EOF
|
||||
AutoGPT Platform Container Build Test Script
|
||||
|
||||
Usage: $0 [OPTIONS] [COMPONENT]
|
||||
|
||||
COMPONENTS:
|
||||
backend Build backend container only
|
||||
frontend Build frontend container only
|
||||
all Build both containers (default)
|
||||
|
||||
OPTIONS:
|
||||
-r, --registry REGISTRY Container registry (default: ghcr.io)
|
||||
-t, --tag TAG Image tag (default: test)
|
||||
--no-cache Build without cache
|
||||
--push Push images after build
|
||||
-h, --help Show this help message
|
||||
|
||||
EXAMPLES:
|
||||
$0 # Build both containers
|
||||
$0 backend # Build backend only
|
||||
$0 --no-cache all # Build without cache
|
||||
$0 --push frontend # Build and push frontend
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
check_docker() {
|
||||
if ! command -v docker &> /dev/null; then
|
||||
error "Docker is not installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! docker info &> /dev/null; then
|
||||
error "Docker daemon is not running"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
success "Docker is available"
|
||||
}
|
||||
|
||||
build_backend() {
|
||||
info "Building backend container..."
|
||||
|
||||
local image_name="$REGISTRY/$IMAGE_PREFIX-backend:$VERSION"
|
||||
local dockerfile="autogpt_platform/backend/Dockerfile"
|
||||
|
||||
info "Building: $image_name"
|
||||
info "Dockerfile: $dockerfile"
|
||||
info "Context: ."
|
||||
info "Target: server"
|
||||
|
||||
if docker build \
|
||||
-t "$image_name" \
|
||||
-f "$dockerfile" \
|
||||
--target server \
|
||||
$BUILD_ARGS \
|
||||
.; then
|
||||
success "Backend container built successfully: $image_name"
|
||||
|
||||
# Test the container
|
||||
info "Testing backend container..."
|
||||
if docker run --rm -d --name autogpt-backend-test "$image_name" > /dev/null; then
|
||||
sleep 5
|
||||
if docker ps | grep -q autogpt-backend-test; then
|
||||
success "Backend container is running"
|
||||
docker stop autogpt-backend-test > /dev/null
|
||||
else
|
||||
warning "Backend container started but may have issues"
|
||||
fi
|
||||
else
|
||||
warning "Failed to start backend container for testing"
|
||||
fi
|
||||
|
||||
return 0
|
||||
else
|
||||
error "Backend container build failed"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
build_frontend() {
|
||||
info "Building frontend container..."
|
||||
|
||||
local image_name="$REGISTRY/$IMAGE_PREFIX-frontend:$VERSION"
|
||||
local dockerfile="autogpt_platform/frontend/Dockerfile"
|
||||
|
||||
info "Building: $image_name"
|
||||
info "Dockerfile: $dockerfile"
|
||||
info "Context: ."
|
||||
info "Target: prod"
|
||||
|
||||
if docker build \
|
||||
-t "$image_name" \
|
||||
-f "$dockerfile" \
|
||||
--target prod \
|
||||
$BUILD_ARGS \
|
||||
.; then
|
||||
success "Frontend container built successfully: $image_name"
|
||||
|
||||
# Test the container
|
||||
info "Testing frontend container..."
|
||||
if docker run --rm -d --name autogpt-frontend-test -p 3001:3000 "$image_name" > /dev/null; then
|
||||
sleep 10
|
||||
if docker ps | grep -q autogpt-frontend-test; then
|
||||
if curl -s -o /dev/null -w "%{http_code}" http://localhost:3001 | grep -q "200\|302\|404"; then
|
||||
success "Frontend container is responding"
|
||||
else
|
||||
warning "Frontend container started but not responding to HTTP requests"
|
||||
fi
|
||||
docker stop autogpt-frontend-test > /dev/null
|
||||
else
|
||||
warning "Frontend container started but may have issues"
|
||||
fi
|
||||
else
|
||||
warning "Failed to start frontend container for testing"
|
||||
fi
|
||||
|
||||
return 0
|
||||
else
|
||||
error "Frontend container build failed"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
push_images() {
|
||||
if [[ "$PUSH_IMAGES" == "true" ]]; then
|
||||
info "Pushing images to registry..."
|
||||
|
||||
local backend_image="$REGISTRY/$IMAGE_PREFIX-backend:$VERSION"
|
||||
local frontend_image="$REGISTRY/$IMAGE_PREFIX-frontend:$VERSION"
|
||||
|
||||
for image in "$backend_image" "$frontend_image"; do
|
||||
if docker images | grep -q "$image"; then
|
||||
info "Pushing $image..."
|
||||
if docker push "$image"; then
|
||||
success "Pushed $image"
|
||||
else
|
||||
error "Failed to push $image"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
show_images() {
|
||||
info "Built images:"
|
||||
docker images | grep "$IMAGE_PREFIX" | grep "$VERSION"
|
||||
}
|
||||
|
||||
cleanup_test_containers() {
|
||||
# Clean up any test containers that might be left running
|
||||
docker ps -a | grep "autogpt-.*-test" | awk '{print $1}' | xargs -r docker rm -f > /dev/null 2>&1 || true
|
||||
}
|
||||
|
||||
# Parse command line arguments
|
||||
COMPONENT="all"
|
||||
PUSH_IMAGES="false"
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-r|--registry)
|
||||
REGISTRY="$2"
|
||||
shift 2
|
||||
;;
|
||||
-t|--tag)
|
||||
VERSION="$2"
|
||||
shift 2
|
||||
;;
|
||||
--no-cache)
|
||||
BUILD_ARGS="$BUILD_ARGS --no-cache"
|
||||
shift
|
||||
;;
|
||||
--push)
|
||||
PUSH_IMAGES="true"
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
backend|frontend|all)
|
||||
COMPONENT="$1"
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
error "Unknown option: $1"
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Main execution
|
||||
info "AutoGPT Platform Container Build Test"
|
||||
info "Component: $COMPONENT"
|
||||
info "Registry: $REGISTRY"
|
||||
info "Tag: $VERSION"
|
||||
|
||||
check_docker
|
||||
cleanup_test_containers
|
||||
|
||||
# Build containers based on component selection
|
||||
case "$COMPONENT" in
|
||||
backend)
|
||||
build_backend
|
||||
;;
|
||||
frontend)
|
||||
build_frontend
|
||||
;;
|
||||
all)
|
||||
if build_backend && build_frontend; then
|
||||
success "All containers built successfully"
|
||||
else
|
||||
error "Some container builds failed"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
push_images
|
||||
show_images
|
||||
cleanup_test_containers
|
||||
|
||||
success "Build test completed successfully"
|
||||
@@ -1,480 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# AutoGPT Platform Deployment Script
|
||||
# This script deploys AutoGPT Platform using published container images
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Configuration
|
||||
COMPOSE_FILE="docker-compose.published.yml"
|
||||
ENV_FILE=".env"
|
||||
BACKUP_DIR="backups"
|
||||
LOG_FILE="deploy.log"
|
||||
|
||||
# Default values
|
||||
REGISTRY="ghcr.io"
|
||||
IMAGE_PREFIX="significant-gravitas/autogpt-platform"
|
||||
VERSION="latest"
|
||||
PROFILE="local"
|
||||
ACTION=""
|
||||
|
||||
# Functions
|
||||
log() {
|
||||
echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "$LOG_FILE"
|
||||
}
|
||||
|
||||
info() {
|
||||
echo -e "${BLUE}[INFO]${NC} $1" | tee -a "$LOG_FILE"
|
||||
}
|
||||
|
||||
success() {
|
||||
echo -e "${GREEN}[SUCCESS]${NC} $1" | tee -a "$LOG_FILE"
|
||||
}
|
||||
|
||||
warning() {
|
||||
echo -e "${YELLOW}[WARNING]${NC} $1" | tee -a "$LOG_FILE"
|
||||
}
|
||||
|
||||
error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1" | tee -a "$LOG_FILE"
|
||||
}
|
||||
|
||||
usage() {
|
||||
cat << EOF
|
||||
AutoGPT Platform Deployment Script
|
||||
|
||||
Usage: $0 [OPTIONS] ACTION
|
||||
|
||||
ACTIONS:
|
||||
deploy Deploy the platform
|
||||
start Start existing deployment
|
||||
stop Stop the deployment
|
||||
restart Restart the deployment
|
||||
update Update to latest images
|
||||
backup Create backup of data
|
||||
restore Restore from backup
|
||||
logs Show logs
|
||||
status Show deployment status
|
||||
cleanup Remove all containers and volumes
|
||||
|
||||
OPTIONS:
|
||||
-r, --registry REGISTRY Container registry (default: ghcr.io)
|
||||
-v, --version VERSION Image version/tag (default: latest)
|
||||
-p, --profile PROFILE Docker compose profile (default: local)
|
||||
-f, --file FILE Compose file (default: docker-compose.published.yml)
|
||||
-e, --env FILE Environment file (default: .env)
|
||||
-h, --help Show this help message
|
||||
|
||||
EXAMPLES:
|
||||
$0 deploy # Deploy with defaults
|
||||
$0 -v v1.0.0 deploy # Deploy specific version
|
||||
$0 -r docker.io update # Update from Docker Hub
|
||||
$0 -p production deploy # Deploy for production
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
check_dependencies() {
|
||||
info "Checking dependencies..."
|
||||
|
||||
if ! command -v docker &> /dev/null; then
|
||||
error "Docker is not installed. Please install Docker first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v docker-compose &> /dev/null && ! docker compose version &> /dev/null; then
|
||||
error "Docker Compose is not installed. Please install Docker Compose first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if Docker daemon is running
|
||||
if ! docker info &> /dev/null; then
|
||||
error "Docker daemon is not running. Please start Docker first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
success "All dependencies are available"
|
||||
}
|
||||
|
||||
setup_environment() {
|
||||
info "Setting up environment..."
|
||||
|
||||
# Create necessary directories
|
||||
mkdir -p "$BACKUP_DIR"
|
||||
mkdir -p "data/postgres"
|
||||
mkdir -p "data/redis"
|
||||
mkdir -p "data/rabbitmq"
|
||||
mkdir -p "data/backend"
|
||||
|
||||
# Create environment file if it doesn't exist
|
||||
if [[ ! -f "$ENV_FILE" ]]; then
|
||||
info "Creating default environment file..."
|
||||
cat > "$ENV_FILE" << EOF
|
||||
# AutoGPT Platform Configuration
|
||||
POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password
|
||||
REDIS_PASSWORD=your-redis-password
|
||||
RABBITMQ_PASSWORD=your-rabbitmq-password
|
||||
JWT_SECRET=your-long-random-jwt-secret-with-at-least-32-characters
|
||||
|
||||
# Registry Configuration
|
||||
REGISTRY=${REGISTRY}
|
||||
IMAGE_PREFIX=${IMAGE_PREFIX}
|
||||
VERSION=${VERSION}
|
||||
|
||||
# Network Configuration
|
||||
BACKEND_PORT=8006
|
||||
FRONTEND_PORT=3000
|
||||
POSTGRES_PORT=5432
|
||||
REDIS_PORT=6379
|
||||
RABBITMQ_PORT=5672
|
||||
RABBITMQ_MANAGEMENT_PORT=15672
|
||||
|
||||
# Development
|
||||
PROFILE=${PROFILE}
|
||||
EOF
|
||||
warning "Created default $ENV_FILE - please review and update passwords!"
|
||||
fi
|
||||
|
||||
success "Environment setup complete"
|
||||
}
|
||||
|
||||
check_ports() {
|
||||
info "Checking if required ports are available..."
|
||||
|
||||
local ports=(3000 8000 8001 8002 8003 8005 8006 8007 5432 6379 5672 15672)
|
||||
local used_ports=()
|
||||
|
||||
for port in "${ports[@]}"; do
|
||||
if ss -tuln | grep -q ":$port "; then
|
||||
used_ports+=("$port")
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ ${#used_ports[@]} -gt 0 ]]; then
|
||||
warning "The following ports are already in use: ${used_ports[*]}"
|
||||
warning "This may cause conflicts. Please stop services using these ports or modify the configuration."
|
||||
else
|
||||
success "All required ports are available"
|
||||
fi
|
||||
}
|
||||
|
||||
pull_images() {
|
||||
info "Pulling container images..."
|
||||
|
||||
local images=(
|
||||
"$REGISTRY/$IMAGE_PREFIX-backend:$VERSION"
|
||||
"$REGISTRY/$IMAGE_PREFIX-frontend:$VERSION"
|
||||
)
|
||||
|
||||
for image in "${images[@]}"; do
|
||||
info "Pulling $image..."
|
||||
if docker pull "$image"; then
|
||||
success "Pulled $image"
|
||||
else
|
||||
error "Failed to pull $image"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
deploy() {
|
||||
info "Deploying AutoGPT Platform..."
|
||||
|
||||
check_dependencies
|
||||
setup_environment
|
||||
check_ports
|
||||
pull_images
|
||||
|
||||
# Update compose file with current settings
|
||||
export REGISTRY="$REGISTRY"
|
||||
export IMAGE_PREFIX="$IMAGE_PREFIX"
|
||||
export VERSION="$VERSION"
|
||||
|
||||
info "Starting services..."
|
||||
if docker compose -f "$COMPOSE_FILE" --profile "$PROFILE" up -d; then
|
||||
success "AutoGPT Platform deployed successfully!"
|
||||
|
||||
info "Waiting for services to be ready..."
|
||||
sleep 10
|
||||
|
||||
show_status
|
||||
|
||||
info "Access the platform at:"
|
||||
info " Frontend: http://localhost:3000"
|
||||
info " Backend API: http://localhost:8006"
|
||||
info " Database Admin: http://localhost:8910 (if using local profile)"
|
||||
info " RabbitMQ Management: http://localhost:15672"
|
||||
else
|
||||
error "Deployment failed. Check logs with: $0 logs"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
start_services() {
|
||||
info "Starting AutoGPT Platform services..."
|
||||
if docker compose -f "$COMPOSE_FILE" --profile "$PROFILE" start; then
|
||||
success "Services started successfully"
|
||||
show_status
|
||||
else
|
||||
error "Failed to start services"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
stop_services() {
|
||||
info "Stopping AutoGPT Platform services..."
|
||||
if docker compose -f "$COMPOSE_FILE" --profile "$PROFILE" stop; then
|
||||
success "Services stopped successfully"
|
||||
else
|
||||
error "Failed to stop services"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
restart_services() {
|
||||
info "Restarting AutoGPT Platform services..."
|
||||
stop_services
|
||||
start_services
|
||||
}
|
||||
|
||||
update_services() {
|
||||
info "Updating AutoGPT Platform to version $VERSION..."
|
||||
|
||||
# Pull new images
|
||||
pull_images
|
||||
|
||||
# Recreate containers with new images
|
||||
info "Recreating containers with new images..."
|
||||
if docker compose -f "$COMPOSE_FILE" --profile "$PROFILE" up -d --force-recreate; then
|
||||
success "Update completed successfully"
|
||||
show_status
|
||||
else
|
||||
error "Update failed"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
backup_data() {
|
||||
local backup_name="autogpt-backup-$(date +%Y%m%d-%H%M%S)"
|
||||
local backup_path="$BACKUP_DIR/$backup_name"
|
||||
|
||||
info "Creating backup: $backup_name..."
|
||||
mkdir -p "$backup_path"
|
||||
|
||||
# Stop services for consistent backup
|
||||
info "Stopping services for backup..."
|
||||
docker compose -f "$COMPOSE_FILE" --profile "$PROFILE" stop
|
||||
|
||||
# Backup database
|
||||
info "Backing up database..."
|
||||
docker compose -f "$COMPOSE_FILE" run --rm db pg_dump -U postgres postgres > "$backup_path/database.sql"
|
||||
|
||||
# Backup volumes
|
||||
info "Backing up data volumes..."
|
||||
cp -r data "$backup_path/"
|
||||
|
||||
# Backup configuration
|
||||
cp "$ENV_FILE" "$backup_path/"
|
||||
cp "$COMPOSE_FILE" "$backup_path/"
|
||||
|
||||
# Restart services
|
||||
info "Restarting services..."
|
||||
docker compose -f "$COMPOSE_FILE" --profile "$PROFILE" start
|
||||
|
||||
success "Backup created: $backup_path"
|
||||
}
|
||||
|
||||
restore_data() {
|
||||
if [[ $# -lt 1 ]]; then
|
||||
error "Please specify backup directory to restore from"
|
||||
error "Usage: $0 restore <backup-directory>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
local backup_path="$1"
|
||||
|
||||
if [[ ! -d "$backup_path" ]]; then
|
||||
error "Backup directory not found: $backup_path"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
warning "This will overwrite current data. Are you sure? (y/N)"
|
||||
read -r response
|
||||
if [[ ! "$response" =~ ^[Yy]$ ]]; then
|
||||
info "Restore cancelled"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
info "Restoring from backup: $backup_path..."
|
||||
|
||||
# Stop services
|
||||
stop_services
|
||||
|
||||
# Restore data
|
||||
info "Restoring data volumes..."
|
||||
rm -rf data
|
||||
cp -r "$backup_path/data" .
|
||||
|
||||
# Restore configuration
|
||||
if [[ -f "$backup_path/$ENV_FILE" ]]; then
|
||||
cp "$backup_path/$ENV_FILE" .
|
||||
info "Restored environment configuration"
|
||||
fi
|
||||
|
||||
# Start services
|
||||
start_services
|
||||
|
||||
# Restore database
|
||||
if [[ -f "$backup_path/database.sql" ]]; then
|
||||
info "Restoring database..."
|
||||
docker compose -f "$COMPOSE_FILE" exec -T db psql -U postgres postgres < "$backup_path/database.sql"
|
||||
fi
|
||||
|
||||
success "Restore completed successfully"
|
||||
}
|
||||
|
||||
show_logs() {
|
||||
info "Showing logs (press Ctrl+C to exit)..."
|
||||
docker compose -f "$COMPOSE_FILE" --profile "$PROFILE" logs -f
|
||||
}
|
||||
|
||||
show_status() {
|
||||
info "AutoGPT Platform Status:"
|
||||
docker compose -f "$COMPOSE_FILE" --profile "$PROFILE" ps
|
||||
|
||||
echo
|
||||
info "Service Health:"
|
||||
|
||||
# Check service health
|
||||
local services=("frontend:3000" "rest_server:8006" "db:5432" "redis:6379")
|
||||
|
||||
for service in "${services[@]}"; do
|
||||
local name="${service%:*}"
|
||||
local port="${service#*:}"
|
||||
|
||||
if docker compose -f "$COMPOSE_FILE" ps "$name" | grep -q "Up"; then
|
||||
if nc -z localhost "$port" 2>/dev/null; then
|
||||
echo -e " ${GREEN}✓${NC} $name (port $port)"
|
||||
else
|
||||
echo -e " ${YELLOW}⚠${NC} $name (container up, port not accessible)"
|
||||
fi
|
||||
else
|
||||
echo -e " ${RED}✗${NC} $name (container down)"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
warning "This will remove all containers and volumes. Are you sure? (y/N)"
|
||||
read -r response
|
||||
if [[ ! "$response" =~ ^[Yy]$ ]]; then
|
||||
info "Cleanup cancelled"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
info "Cleaning up AutoGPT Platform..."
|
||||
|
||||
# Stop and remove containers
|
||||
docker compose -f "$COMPOSE_FILE" --profile "$PROFILE" down -v --remove-orphans
|
||||
|
||||
# Remove images
|
||||
docker images | grep "$IMAGE_PREFIX" | awk '{print $3}' | xargs -r docker rmi
|
||||
|
||||
# Remove data directories
|
||||
rm -rf data
|
||||
|
||||
success "Cleanup completed"
|
||||
}
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-r|--registry)
|
||||
REGISTRY="$2"
|
||||
shift 2
|
||||
;;
|
||||
-v|--version)
|
||||
VERSION="$2"
|
||||
shift 2
|
||||
;;
|
||||
-p|--profile)
|
||||
PROFILE="$2"
|
||||
shift 2
|
||||
;;
|
||||
-f|--file)
|
||||
COMPOSE_FILE="$2"
|
||||
shift 2
|
||||
;;
|
||||
-e|--env)
|
||||
ENV_FILE="$2"
|
||||
shift 2
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
deploy|start|stop|restart|update|backup|restore|logs|status|cleanup)
|
||||
ACTION="$1"
|
||||
shift
|
||||
break
|
||||
;;
|
||||
*)
|
||||
error "Unknown option: $1"
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Check if action is provided
|
||||
if [[ -z "$ACTION" ]]; then
|
||||
error "No action specified"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Execute action
|
||||
case "$ACTION" in
|
||||
deploy)
|
||||
deploy
|
||||
;;
|
||||
start)
|
||||
start_services
|
||||
;;
|
||||
stop)
|
||||
stop_services
|
||||
;;
|
||||
restart)
|
||||
restart_services
|
||||
;;
|
||||
update)
|
||||
update_services
|
||||
;;
|
||||
backup)
|
||||
backup_data
|
||||
;;
|
||||
restore)
|
||||
restore_data "$@"
|
||||
;;
|
||||
logs)
|
||||
show_logs
|
||||
;;
|
||||
status)
|
||||
show_status
|
||||
;;
|
||||
cleanup)
|
||||
cleanup
|
||||
;;
|
||||
*)
|
||||
error "Unknown action: $ACTION"
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
@@ -1,514 +0,0 @@
|
||||
# AutoGPT Platform - Published Container Deployment
|
||||
# This compose file uses pre-built containers from GitHub Container Registry
|
||||
# Use this for production deployments or when you don't want to build from source
|
||||
|
||||
networks:
|
||||
app-network:
|
||||
name: app-network
|
||||
shared-network:
|
||||
name: shared-network
|
||||
|
||||
volumes:
|
||||
supabase-config:
|
||||
clamav-data:
|
||||
postgres-data:
|
||||
redis-data:
|
||||
rabbitmq-data:
|
||||
|
||||
x-agpt-services:
|
||||
&agpt-services
|
||||
networks:
|
||||
- app-network
|
||||
- shared-network
|
||||
|
||||
x-supabase-services:
|
||||
&supabase-services
|
||||
networks:
|
||||
- app-network
|
||||
- shared-network
|
||||
|
||||
services:
|
||||
# Database migration service
|
||||
migrate:
|
||||
<<: *agpt-services
|
||||
image: ghcr.io/significant-gravitas/autogpt-platform-backend:latest
|
||||
command: ["sh", "-c", "poetry run prisma migrate deploy"]
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
env_file:
|
||||
- backend/.env.default
|
||||
- path: backend/.env
|
||||
required: false
|
||||
environment:
|
||||
PYRO_HOST: "0.0.0.0"
|
||||
DB_HOST: db
|
||||
REDIS_HOST: redis
|
||||
RABBITMQ_HOST: rabbitmq
|
||||
SUPABASE_URL: http://kong:8000
|
||||
DATABASE_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
DIRECT_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
restart: on-failure
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "poetry run prisma migrate status | grep -q 'No pending migrations' || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 5s
|
||||
|
||||
# Redis cache service
|
||||
redis:
|
||||
<<: *agpt-services
|
||||
image: redis:latest
|
||||
command: redis-server --requirepass password
|
||||
ports:
|
||||
- "6379:6379"
|
||||
volumes:
|
||||
- redis-data:/data
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "ping"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
# RabbitMQ message broker
|
||||
rabbitmq:
|
||||
<<: *agpt-services
|
||||
image: rabbitmq:management
|
||||
container_name: rabbitmq
|
||||
volumes:
|
||||
- rabbitmq-data:/var/lib/rabbitmq
|
||||
ports:
|
||||
- "5672:5672"
|
||||
- "15672:15672"
|
||||
environment:
|
||||
RABBITMQ_DEFAULT_USER: autogpt
|
||||
RABBITMQ_DEFAULT_PASS: autogpt_password
|
||||
healthcheck:
|
||||
test: rabbitmq-diagnostics -q ping
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
start_period: 10s
|
||||
|
||||
# Backend API server
|
||||
rest_server:
|
||||
<<: *agpt-services
|
||||
image: ghcr.io/significant-gravitas/autogpt-platform-backend:latest
|
||||
command: ["python", "-m", "backend.rest"]
|
||||
depends_on:
|
||||
redis:
|
||||
condition: service_healthy
|
||||
db:
|
||||
condition: service_healthy
|
||||
migrate:
|
||||
condition: service_completed_successfully
|
||||
rabbitmq:
|
||||
condition: service_healthy
|
||||
env_file:
|
||||
- backend/.env.default
|
||||
- path: backend/.env
|
||||
required: false
|
||||
environment:
|
||||
PYRO_HOST: "0.0.0.0"
|
||||
AGENTSERVER_HOST: rest_server
|
||||
SCHEDULER_HOST: scheduler_server
|
||||
DATABASEMANAGER_HOST: database_manager
|
||||
EXECUTIONMANAGER_HOST: executor
|
||||
NOTIFICATIONMANAGER_HOST: notification_server
|
||||
CLAMAV_SERVICE_HOST: clamav
|
||||
DB_HOST: db
|
||||
REDIS_HOST: redis
|
||||
RABBITMQ_HOST: rabbitmq
|
||||
SUPABASE_URL: http://kong:8000
|
||||
DATABASE_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
DIRECT_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
ports:
|
||||
- "8006:8006"
|
||||
|
||||
# Backend executor service
|
||||
executor:
|
||||
<<: *agpt-services
|
||||
image: ghcr.io/significant-gravitas/autogpt-platform-backend:latest
|
||||
command: ["python", "-m", "backend.exec"]
|
||||
depends_on:
|
||||
redis:
|
||||
condition: service_healthy
|
||||
rabbitmq:
|
||||
condition: service_healthy
|
||||
db:
|
||||
condition: service_healthy
|
||||
migrate:
|
||||
condition: service_completed_successfully
|
||||
database_manager:
|
||||
condition: service_started
|
||||
env_file:
|
||||
- backend/.env.default
|
||||
- path: backend/.env
|
||||
required: false
|
||||
environment:
|
||||
PYRO_HOST: "0.0.0.0"
|
||||
AGENTSERVER_HOST: rest_server
|
||||
SCHEDULER_HOST: scheduler_server
|
||||
DATABASEMANAGER_HOST: database_manager
|
||||
EXECUTIONMANAGER_HOST: executor
|
||||
NOTIFICATIONMANAGER_HOST: notification_server
|
||||
CLAMAV_SERVICE_HOST: clamav
|
||||
DB_HOST: db
|
||||
REDIS_HOST: redis
|
||||
RABBITMQ_HOST: rabbitmq
|
||||
SUPABASE_URL: http://kong:8000
|
||||
DATABASE_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
DIRECT_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
ports:
|
||||
- "8002:8002"
|
||||
|
||||
# Backend WebSocket server
|
||||
websocket_server:
|
||||
<<: *agpt-services
|
||||
image: ghcr.io/significant-gravitas/autogpt-platform-backend:latest
|
||||
command: ["python", "-m", "backend.ws"]
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_healthy
|
||||
migrate:
|
||||
condition: service_completed_successfully
|
||||
database_manager:
|
||||
condition: service_started
|
||||
env_file:
|
||||
- backend/.env.default
|
||||
- path: backend/.env
|
||||
required: false
|
||||
environment:
|
||||
PYRO_HOST: "0.0.0.0"
|
||||
AGENTSERVER_HOST: rest_server
|
||||
SCHEDULER_HOST: scheduler_server
|
||||
DATABASEMANAGER_HOST: database_manager
|
||||
EXECUTIONMANAGER_HOST: executor
|
||||
NOTIFICATIONMANAGER_HOST: notification_server
|
||||
CLAMAV_SERVICE_HOST: clamav
|
||||
DB_HOST: db
|
||||
REDIS_HOST: redis
|
||||
RABBITMQ_HOST: rabbitmq
|
||||
SUPABASE_URL: http://kong:8000
|
||||
DATABASE_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
DIRECT_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
ports:
|
||||
- "8001:8001"
|
||||
|
||||
# Backend database manager
|
||||
database_manager:
|
||||
<<: *agpt-services
|
||||
image: ghcr.io/significant-gravitas/autogpt-platform-backend:latest
|
||||
command: ["python", "-m", "backend.db"]
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
migrate:
|
||||
condition: service_completed_successfully
|
||||
env_file:
|
||||
- backend/.env.default
|
||||
- path: backend/.env
|
||||
required: false
|
||||
environment:
|
||||
PYRO_HOST: "0.0.0.0"
|
||||
AGENTSERVER_HOST: rest_server
|
||||
SCHEDULER_HOST: scheduler_server
|
||||
DATABASEMANAGER_HOST: database_manager
|
||||
EXECUTIONMANAGER_HOST: executor
|
||||
NOTIFICATIONMANAGER_HOST: notification_server
|
||||
CLAMAV_SERVICE_HOST: clamav
|
||||
DB_HOST: db
|
||||
REDIS_HOST: redis
|
||||
RABBITMQ_HOST: rabbitmq
|
||||
SUPABASE_URL: http://kong:8000
|
||||
DATABASE_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
DIRECT_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
ports:
|
||||
- "8005:8005"
|
||||
|
||||
# Backend scheduler service
|
||||
scheduler_server:
|
||||
<<: *agpt-services
|
||||
image: ghcr.io/significant-gravitas/autogpt-platform-backend:latest
|
||||
command: ["python", "-m", "backend.scheduler"]
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_healthy
|
||||
rabbitmq:
|
||||
condition: service_healthy
|
||||
migrate:
|
||||
condition: service_completed_successfully
|
||||
database_manager:
|
||||
condition: service_started
|
||||
env_file:
|
||||
- backend/.env.default
|
||||
- path: backend/.env
|
||||
required: false
|
||||
environment:
|
||||
PYRO_HOST: "0.0.0.0"
|
||||
AGENTSERVER_HOST: rest_server
|
||||
SCHEDULER_HOST: scheduler_server
|
||||
DATABASEMANAGER_HOST: database_manager
|
||||
EXECUTIONMANAGER_HOST: executor
|
||||
NOTIFICATIONMANAGER_HOST: notification_server
|
||||
CLAMAV_SERVICE_HOST: clamav
|
||||
DB_HOST: db
|
||||
REDIS_HOST: redis
|
||||
RABBITMQ_HOST: rabbitmq
|
||||
SUPABASE_URL: http://kong:8000
|
||||
DATABASE_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
DIRECT_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
ports:
|
||||
- "8003:8003"
|
||||
|
||||
# Backend notification service
|
||||
notification_server:
|
||||
<<: *agpt-services
|
||||
image: ghcr.io/significant-gravitas/autogpt-platform-backend:latest
|
||||
command: ["python", "-m", "backend.notification"]
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
rabbitmq:
|
||||
condition: service_healthy
|
||||
migrate:
|
||||
condition: service_completed_successfully
|
||||
database_manager:
|
||||
condition: service_started
|
||||
env_file:
|
||||
- backend/.env.default
|
||||
- path: backend/.env
|
||||
required: false
|
||||
environment:
|
||||
PYRO_HOST: "0.0.0.0"
|
||||
AGENTSERVER_HOST: rest_server
|
||||
SCHEDULER_HOST: scheduler_server
|
||||
DATABASEMANAGER_HOST: database_manager
|
||||
EXECUTIONMANAGER_HOST: executor
|
||||
NOTIFICATIONMANAGER_HOST: notification_server
|
||||
CLAMAV_SERVICE_HOST: clamav
|
||||
DB_HOST: db
|
||||
REDIS_HOST: redis
|
||||
RABBITMQ_HOST: rabbitmq
|
||||
SUPABASE_URL: http://kong:8000
|
||||
DATABASE_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
DIRECT_URL: postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform
|
||||
ports:
|
||||
- "8007:8007"
|
||||
|
||||
# ClamAV antivirus service
|
||||
clamav:
|
||||
<<: *agpt-services
|
||||
image: clamav/clamav-debian:latest
|
||||
ports:
|
||||
- "3310:3310"
|
||||
volumes:
|
||||
- clamav-data:/var/lib/clamav
|
||||
environment:
|
||||
- CLAMAV_NO_FRESHCLAMD=false
|
||||
- CLAMD_CONF_StreamMaxLength=50M
|
||||
- CLAMD_CONF_MaxFileSize=100M
|
||||
- CLAMD_CONF_MaxScanSize=100M
|
||||
- CLAMD_CONF_MaxThreads=12
|
||||
- CLAMD_CONF_ReadTimeout=300
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "clamdscan --version || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
# Frontend application
|
||||
frontend:
|
||||
<<: *agpt-services
|
||||
image: ghcr.io/significant-gravitas/autogpt-platform-frontend:latest
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
migrate:
|
||||
condition: service_completed_successfully
|
||||
ports:
|
||||
- "3000:3000"
|
||||
env_file:
|
||||
- path: ./frontend/.env.default
|
||||
- path: ./frontend/.env
|
||||
required: false
|
||||
environment:
|
||||
# Server-side environment variables (Docker service names)
|
||||
AUTH_CALLBACK_URL: http://rest_server:8006/auth/callback
|
||||
SUPABASE_URL: http://kong:8000
|
||||
AGPT_SERVER_URL: http://rest_server:8006/api
|
||||
AGPT_WS_SERVER_URL: ws://websocket_server:8001/ws
|
||||
|
||||
# Supabase services (minimal: auth + db + kong)
|
||||
kong:
|
||||
<<: *supabase-services
|
||||
image: supabase/kong:v0.1.0
|
||||
environment:
|
||||
KONG_DATABASE: "off"
|
||||
KONG_DECLARATIVE_CONFIG: /etc/kong/kong.yml
|
||||
KONG_DNS_ORDER: LAST,A,CNAME
|
||||
KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth
|
||||
KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k
|
||||
KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k
|
||||
ports:
|
||||
- "8000:8000/tcp"
|
||||
volumes:
|
||||
- ./db/docker/volumes/api/kong.yml:/etc/kong/kong.yml:ro
|
||||
|
||||
auth:
|
||||
<<: *supabase-services
|
||||
image: supabase/gotrue:v2.151.0
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:9999/health"]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
GOTRUE_API_HOST: 0.0.0.0
|
||||
GOTRUE_API_PORT: 9999
|
||||
GOTRUE_DB_DRIVER: postgres
|
||||
GOTRUE_DB_DATABASE_URL: postgresql://supabase_auth_admin:root@db:5432/postgres?search_path=auth
|
||||
GOTRUE_SITE_URL: http://localhost:3000
|
||||
GOTRUE_URI_ALLOW_LIST: "*"
|
||||
GOTRUE_DISABLE_SIGNUP: false
|
||||
GOTRUE_JWT_ADMIN_ROLES: service_role
|
||||
GOTRUE_JWT_AUD: authenticated
|
||||
GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated
|
||||
GOTRUE_JWT_EXP: 3600
|
||||
GOTRUE_JWT_SECRET: super-secret-jwt-token-with-at-least-32-characters-long
|
||||
GOTRUE_EXTERNAL_EMAIL_ENABLED: true
|
||||
GOTRUE_MAILER_AUTOCONFIRM: true
|
||||
GOTRUE_SMTP_ADMIN_EMAIL: admin@email.com
|
||||
GOTRUE_SMTP_HOST: supabase-mail
|
||||
GOTRUE_SMTP_PORT: 2500
|
||||
GOTRUE_SMTP_USER: fake_mail_user
|
||||
GOTRUE_SMTP_PASS: fake_mail_password
|
||||
GOTRUE_SMTP_SENDER_NAME: fake_sender
|
||||
GOTRUE_MAILER_URLPATHS_INVITE: http://localhost:3000/auth/callback
|
||||
GOTRUE_MAILER_URLPATHS_CONFIRMATION: http://localhost:3000/auth/callback
|
||||
GOTRUE_MAILER_URLPATHS_RECOVERY: http://localhost:3000/auth/callback
|
||||
GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: http://localhost:3000/auth/callback
|
||||
|
||||
db:
|
||||
<<: *supabase-services
|
||||
image: supabase/postgres:15.1.0.147
|
||||
healthcheck:
|
||||
test: pg_isready -U postgres -h localhost
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
command:
|
||||
- postgres
|
||||
- -c
|
||||
- config_file=/etc/postgresql/postgresql.conf
|
||||
- -c
|
||||
- log_min_messages=fatal
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "5432:5432"
|
||||
environment:
|
||||
POSTGRES_HOST: /var/run/postgresql
|
||||
PGPORT: 5432
|
||||
POSTGRES_PORT: 5432
|
||||
PGPASSWORD: your-super-secret-and-long-postgres-password
|
||||
POSTGRES_PASSWORD: your-super-secret-and-long-postgres-password
|
||||
PGDATABASE: postgres
|
||||
POSTGRES_DB: postgres
|
||||
PGUSER: postgres
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_INITDB_ARGS: --lc-collate=C --lc-ctype=C
|
||||
volumes:
|
||||
- postgres-data:/var/lib/postgresql/data
|
||||
- ./db/docker/volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z
|
||||
- ./db/docker/volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z
|
||||
- ./db/docker/volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z
|
||||
|
||||
# Development-only services (studio for database management)
|
||||
meta:
|
||||
<<: *supabase-services
|
||||
profiles:
|
||||
- local
|
||||
image: supabase/studio:20240101-5cc8dea
|
||||
healthcheck:
|
||||
test: ["CMD", "node", "-e", "require('http').get('http://localhost:8080/api/profile', (r) => {if (r.statusCode !== 200) throw new Error(r.statusCode)})"]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
STUDIO_PG_META_URL: http://localhost:8080
|
||||
POSTGRES_PASSWORD: your-super-secret-and-long-postgres-password
|
||||
DEFAULT_ORGANIZATION_NAME: Default Organization
|
||||
DEFAULT_PROJECT_NAME: Default Project
|
||||
SUPABASE_URL: http://kong:8000
|
||||
SUPABASE_PUBLIC_URL: http://localhost:8000
|
||||
SUPABASE_ANON_KEY: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6ImFub24iLCJleHAiOjE5ODM4MTI5OTZ9.CRXP1A7WOeoJeXxjNni43kdQwgnWNReilDMblYTn_I0
|
||||
SUPABASE_SERVICE_KEY: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6InNlcnZpY2Vfcm9sZSIsImV4cCI6MTk4MzgxMjk5Nn0.EGIM96RAZx35lJzdJsyH-qQwv8Hdp7fsn3W0YpN81IU
|
||||
|
||||
studio:
|
||||
<<: *supabase-services
|
||||
profiles:
|
||||
- local
|
||||
image: supabase/studio:20240101-5cc8dea
|
||||
healthcheck:
|
||||
test: ["CMD", "node", "-e", "require('http').get('http://localhost:3000/api/profile', (r) => {if (r.statusCode !== 200) throw new Error(r.statusCode)})"]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
meta:
|
||||
condition: service_healthy
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "8910:3000/tcp"
|
||||
environment:
|
||||
STUDIO_PG_META_URL: http://meta:8080
|
||||
POSTGRES_PASSWORD: your-super-secret-and-long-postgres-password
|
||||
DEFAULT_ORGANIZATION_NAME: Default Organization
|
||||
DEFAULT_PROJECT_NAME: Default Project
|
||||
SUPABASE_URL: http://kong:8000
|
||||
SUPABASE_PUBLIC_URL: http://localhost:8000
|
||||
SUPABASE_ANON_KEY: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6ImFub24iLCJleHAiOjE5ODM4MTI5OTZ9.CRXP1A7WOeoJeXxjNni43kdQwgnWNReilDMblYTn_I0
|
||||
SUPABASE_SERVICE_KEY: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6InNlcnZpY2Vfcm9sZSIsImV4cCI6MTk4MzgxMjk5Nn0.EGIM96RAZx35lJzdJsyH-qQwv8Hdp7fsn3W0YpN81IU
|
||||
|
||||
# Helper services for development
|
||||
deps:
|
||||
<<: *supabase-services
|
||||
profiles:
|
||||
- local
|
||||
image: busybox
|
||||
command: /bin/true
|
||||
depends_on:
|
||||
- kong
|
||||
- auth
|
||||
- db
|
||||
- studio
|
||||
- redis
|
||||
- rabbitmq
|
||||
- clamav
|
||||
- migrate
|
||||
|
||||
deps_backend:
|
||||
<<: *agpt-services
|
||||
profiles:
|
||||
- local
|
||||
image: busybox
|
||||
command: /bin/true
|
||||
depends_on:
|
||||
- deps
|
||||
- rest_server
|
||||
- executor
|
||||
- websocket_server
|
||||
- database_manager
|
||||
@@ -12,23 +12,6 @@ const nextConfig = {
|
||||
"ideogram.ai", // for generated images
|
||||
"picsum.photos", // for placeholder images
|
||||
],
|
||||
remotePatterns: [
|
||||
{
|
||||
protocol: "https",
|
||||
hostname: "storage.googleapis.com",
|
||||
pathname: "/**",
|
||||
},
|
||||
{
|
||||
protocol: "https",
|
||||
hostname: "storage.cloud.google.com",
|
||||
pathname: "/**",
|
||||
},
|
||||
{
|
||||
protocol: "https",
|
||||
hostname: "lh3.googleusercontent.com",
|
||||
pathname: "/**",
|
||||
},
|
||||
],
|
||||
},
|
||||
output: "standalone",
|
||||
transpilePackages: ["geist"],
|
||||
|
||||
@@ -35,12 +35,6 @@ export default defineConfig({
|
||||
useInfiniteQueryParam: "page",
|
||||
},
|
||||
},
|
||||
"getV2List favorite library agents": {
|
||||
query: {
|
||||
useInfinite: true,
|
||||
useInfiniteQueryParam: "page",
|
||||
},
|
||||
},
|
||||
"getV1List graph executions": {
|
||||
query: {
|
||||
useInfinite: true,
|
||||
|
||||
@@ -54,8 +54,6 @@
|
||||
"@tanstack/react-query": "5.85.3",
|
||||
"@tanstack/react-table": "8.21.3",
|
||||
"@types/jaro-winkler": "0.2.4",
|
||||
"@vercel/analytics": "1.5.0",
|
||||
"@vercel/speed-insights": "1.2.0",
|
||||
"@xyflow/react": "12.8.3",
|
||||
"boring-avatars": "1.11.2",
|
||||
"class-variance-authority": "0.7.1",
|
||||
|
||||
65
autogpt_platform/frontend/pnpm-lock.yaml
generated
65
autogpt_platform/frontend/pnpm-lock.yaml
generated
@@ -95,12 +95,6 @@ importers:
|
||||
'@types/jaro-winkler':
|
||||
specifier: 0.2.4
|
||||
version: 0.2.4
|
||||
'@vercel/analytics':
|
||||
specifier: 1.5.0
|
||||
version: 1.5.0(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.55.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
|
||||
'@vercel/speed-insights':
|
||||
specifier: 1.2.0
|
||||
version: 1.2.0(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.55.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
|
||||
'@xyflow/react':
|
||||
specifier: 12.8.3
|
||||
version: 12.8.3(@types/react@18.3.17)(immer@10.1.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
@@ -3199,55 +3193,6 @@ packages:
|
||||
cpu: [x64]
|
||||
os: [win32]
|
||||
|
||||
'@vercel/analytics@1.5.0':
|
||||
resolution: {integrity: sha512-MYsBzfPki4gthY5HnYN7jgInhAZ7Ac1cYDoRWFomwGHWEX7odTEzbtg9kf/QSo7XEsEAqlQugA6gJ2WS2DEa3g==}
|
||||
peerDependencies:
|
||||
'@remix-run/react': ^2
|
||||
'@sveltejs/kit': ^1 || ^2
|
||||
next: '>= 13'
|
||||
react: ^18 || ^19 || ^19.0.0-rc
|
||||
svelte: '>= 4'
|
||||
vue: ^3
|
||||
vue-router: ^4
|
||||
peerDependenciesMeta:
|
||||
'@remix-run/react':
|
||||
optional: true
|
||||
'@sveltejs/kit':
|
||||
optional: true
|
||||
next:
|
||||
optional: true
|
||||
react:
|
||||
optional: true
|
||||
svelte:
|
||||
optional: true
|
||||
vue:
|
||||
optional: true
|
||||
vue-router:
|
||||
optional: true
|
||||
|
||||
'@vercel/speed-insights@1.2.0':
|
||||
resolution: {integrity: sha512-y9GVzrUJ2xmgtQlzFP2KhVRoCglwfRQgjyfY607aU0hh0Un6d0OUyrJkjuAlsV18qR4zfoFPs/BiIj9YDS6Wzw==}
|
||||
peerDependencies:
|
||||
'@sveltejs/kit': ^1 || ^2
|
||||
next: '>= 13'
|
||||
react: ^18 || ^19 || ^19.0.0-rc
|
||||
svelte: '>= 4'
|
||||
vue: ^3
|
||||
vue-router: ^4
|
||||
peerDependenciesMeta:
|
||||
'@sveltejs/kit':
|
||||
optional: true
|
||||
next:
|
||||
optional: true
|
||||
react:
|
||||
optional: true
|
||||
svelte:
|
||||
optional: true
|
||||
vue:
|
||||
optional: true
|
||||
vue-router:
|
||||
optional: true
|
||||
|
||||
'@vitest/expect@3.2.4':
|
||||
resolution: {integrity: sha512-Io0yyORnB6sikFlt8QW5K7slY4OjqNX9jmJQ02QDda8lyM6B5oNgVWoSoKPac8/kgnCUzuHQKrSLtu/uOqqrig==}
|
||||
|
||||
@@ -10714,16 +10659,6 @@ snapshots:
|
||||
'@unrs/resolver-binding-win32-x64-msvc@1.11.1':
|
||||
optional: true
|
||||
|
||||
'@vercel/analytics@1.5.0(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.55.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)':
|
||||
optionalDependencies:
|
||||
next: 15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.55.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
react: 18.3.1
|
||||
|
||||
'@vercel/speed-insights@1.2.0(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.55.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)':
|
||||
optionalDependencies:
|
||||
next: 15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.55.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
react: 18.3.1
|
||||
|
||||
'@vitest/expect@3.2.4':
|
||||
dependencies:
|
||||
'@types/chai': 5.2.2
|
||||
|
||||
@@ -1,127 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import React from "react";
|
||||
import { useParams } from "next/navigation";
|
||||
import { RunOutputs } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedRunView/components/RunOutputs";
|
||||
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
|
||||
import { Alert, AlertDescription } from "@/components/ui/alert";
|
||||
import { InfoIcon } from "lucide-react";
|
||||
import { useGetV1GetSharedExecution } from "@/app/api/__generated__/endpoints/default/default";
|
||||
|
||||
export default function SharePage() {
|
||||
const params = useParams();
|
||||
const token = params.token as string;
|
||||
|
||||
const {
|
||||
data: response,
|
||||
isLoading: loading,
|
||||
error,
|
||||
} = useGetV1GetSharedExecution(token);
|
||||
|
||||
const executionData = response?.status === 200 ? response.data : undefined;
|
||||
const is404 = !loading && !executionData;
|
||||
|
||||
if (loading) {
|
||||
return (
|
||||
<div className="flex min-h-screen items-center justify-center">
|
||||
<div className="text-center">
|
||||
<div className="mx-auto mb-4 h-12 w-12 animate-spin rounded-full border-b-2 border-primary"></div>
|
||||
<p className="text-muted-foreground">Loading shared execution...</p>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
if (error || is404 || !executionData) {
|
||||
return (
|
||||
<div className="flex min-h-screen items-center justify-center">
|
||||
<div className="mx-auto w-full max-w-md p-6">
|
||||
<Card className="border-dashed">
|
||||
<CardContent className="pt-6">
|
||||
<div className="space-y-4 text-center">
|
||||
<div className="mx-auto flex h-12 w-12 items-center justify-center rounded-full bg-muted">
|
||||
<InfoIcon className="h-6 w-6 text-muted-foreground" />
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<h3 className="text-lg font-semibold">
|
||||
{is404 ? "Share Link Not Found" : "Unable to Load"}
|
||||
</h3>
|
||||
<p className="text-sm text-muted-foreground">
|
||||
{is404
|
||||
? "This shared link is invalid or has been disabled by the owner. Please check with the person who shared this link."
|
||||
: "There was an error loading this shared execution. Please try refreshing the page."}
|
||||
</p>
|
||||
</div>
|
||||
<div className="pt-2">
|
||||
<button
|
||||
onClick={() => window.location.reload()}
|
||||
className="text-sm text-primary hover:underline"
|
||||
>
|
||||
Try again
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</CardContent>
|
||||
</Card>
|
||||
<div className="mt-8 text-center text-xs text-muted-foreground">
|
||||
<p>Powered by AutoGPT Platform</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="mx-auto max-w-6xl">
|
||||
<div className="mb-6">
|
||||
<Alert>
|
||||
<InfoIcon className="h-4 w-4" />
|
||||
<AlertDescription>
|
||||
This is a publicly shared agent run result. The person who shared
|
||||
this link can disable access at any time.
|
||||
</AlertDescription>
|
||||
</Alert>
|
||||
</div>
|
||||
|
||||
<Card className="mb-6">
|
||||
<CardHeader>
|
||||
<CardTitle className="text-2xl">{executionData.graph_name}</CardTitle>
|
||||
{executionData.graph_description && (
|
||||
<p className="mt-2 text-muted-foreground">
|
||||
{executionData.graph_description}
|
||||
</p>
|
||||
)}
|
||||
</CardHeader>
|
||||
<CardContent>
|
||||
<div className="grid grid-cols-2 gap-4 text-sm">
|
||||
<div>
|
||||
<span className="font-medium">Status:</span>
|
||||
<span className="ml-2 capitalize">
|
||||
{executionData.status.toLowerCase()}
|
||||
</span>
|
||||
</div>
|
||||
<div>
|
||||
<span className="font-medium">Created:</span>
|
||||
<span className="ml-2">
|
||||
{new Date(executionData.created_at).toLocaleString()}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
</CardContent>
|
||||
</Card>
|
||||
|
||||
<Card>
|
||||
<CardHeader>
|
||||
<CardTitle>Output</CardTitle>
|
||||
</CardHeader>
|
||||
<CardContent>
|
||||
<RunOutputs outputs={executionData.outputs} />
|
||||
</CardContent>
|
||||
</Card>
|
||||
|
||||
<div className="mt-8 text-center text-sm text-muted-foreground">
|
||||
<p>Powered by AutoGPT Platform</p>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
import type { Metadata } from "next";
|
||||
|
||||
export const metadata: Metadata = {
|
||||
title: "Shared Agent Run - AutoGPT",
|
||||
description: "View shared agent run results",
|
||||
robots: "noindex, nofollow",
|
||||
};
|
||||
|
||||
export default function ShareLayout({
|
||||
children,
|
||||
}: {
|
||||
children: React.ReactNode;
|
||||
}) {
|
||||
return (
|
||||
<div className="min-h-screen bg-background">
|
||||
<div className="container mx-auto px-4 py-8">{children}</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -5,78 +5,89 @@ import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
|
||||
import { useAgentRunsView } from "./useAgentRunsView";
|
||||
import { AgentRunsLoading } from "./components/AgentRunsLoading";
|
||||
import { RunsSidebar } from "./components/RunsSidebar/RunsSidebar";
|
||||
import { SelectedRunView } from "./components/SelectedRunView/SelectedRunView";
|
||||
import { SelectedScheduleView } from "./components/SelectedScheduleView/SelectedScheduleView";
|
||||
import React, { useMemo, useState } from "react";
|
||||
import { RunDetails } from "./components/RunDetails/RunDetails";
|
||||
import { ScheduleDetails } from "./components/ScheduleDetails/ScheduleDetails";
|
||||
import { EmptyAgentRuns } from "./components/EmptyAgentRuns/EmptyAgentRuns";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { RunAgentModal } from "./components/RunAgentModal/RunAgentModal";
|
||||
import { PlusIcon } from "@phosphor-icons/react";
|
||||
|
||||
export function AgentRunsView() {
|
||||
const {
|
||||
agent,
|
||||
hasAnyItems,
|
||||
showSidebarLayout,
|
||||
response,
|
||||
ready,
|
||||
error,
|
||||
agentId,
|
||||
selectedRun,
|
||||
handleSelectRun,
|
||||
handleCountsChange,
|
||||
handleClearSelectedRun,
|
||||
clearSelectedRun,
|
||||
} = useAgentRunsView();
|
||||
const [sidebarCounts, setSidebarCounts] = useState({
|
||||
runsCount: 0,
|
||||
schedulesCount: 0,
|
||||
});
|
||||
|
||||
const hasAnyItems = useMemo(
|
||||
() =>
|
||||
(sidebarCounts.runsCount ?? 0) > 0 ||
|
||||
(sidebarCounts.schedulesCount ?? 0) > 0,
|
||||
[sidebarCounts],
|
||||
);
|
||||
|
||||
if (!ready) {
|
||||
return <AgentRunsLoading />;
|
||||
}
|
||||
|
||||
if (error) {
|
||||
return (
|
||||
<ErrorCard
|
||||
isSuccess={false}
|
||||
responseError={error || undefined}
|
||||
httpError={
|
||||
response?.status !== 200
|
||||
? {
|
||||
status: response?.status,
|
||||
statusText: "Request failed",
|
||||
}
|
||||
: undefined
|
||||
}
|
||||
context="agent"
|
||||
onRetry={() => window.location.reload()}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
if (!ready || !agent) {
|
||||
return <AgentRunsLoading />;
|
||||
if (!response?.data || response.status !== 200) {
|
||||
return (
|
||||
<ErrorCard
|
||||
isSuccess={false}
|
||||
responseError={{ message: "No agent data found" }}
|
||||
context="agent"
|
||||
onRetry={() => window.location.reload()}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
const agent = response.data;
|
||||
|
||||
return (
|
||||
<div
|
||||
className={
|
||||
showSidebarLayout
|
||||
? "grid h-screen grid-cols-1 gap-0 pt-3 md:gap-4 lg:grid-cols-[25%_70%]"
|
||||
: "grid h-screen grid-cols-1 gap-0 pt-3 md:gap-4"
|
||||
hasAnyItems
|
||||
? "grid h-screen grid-cols-1 gap-0 pt-6 md:gap-4 lg:grid-cols-[25%_70%]"
|
||||
: "grid h-screen grid-cols-1 gap-0 pt-6 md:gap-4"
|
||||
}
|
||||
>
|
||||
<div className={showSidebarLayout ? "p-4 pl-5" : "hidden p-4 pl-5"}>
|
||||
<div className="mb-4">
|
||||
<RunAgentModal
|
||||
triggerSlot={
|
||||
<Button variant="primary" size="large" className="w-full">
|
||||
<PlusIcon size={20} /> New Run
|
||||
</Button>
|
||||
}
|
||||
agent={agent}
|
||||
agentId={agent.id.toString()}
|
||||
onRunCreated={(execution) => handleSelectRun(execution.id)}
|
||||
onScheduleCreated={(schedule) =>
|
||||
handleSelectRun(`schedule:${schedule.id}`)
|
||||
}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className={hasAnyItems ? "" : "hidden"}>
|
||||
<RunsSidebar
|
||||
agent={agent}
|
||||
selectedRunId={selectedRun}
|
||||
onSelectRun={handleSelectRun}
|
||||
onCountsChange={handleCountsChange}
|
||||
onCountsChange={setSidebarCounts}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Main Content - 70% */}
|
||||
<div className="p-4">
|
||||
<div className={!showSidebarLayout ? "px-2" : ""}>
|
||||
<div className={!hasAnyItems ? "px-2" : ""}>
|
||||
<Breadcrumbs
|
||||
items={[
|
||||
{ name: "My Library", link: "/library" },
|
||||
@@ -87,17 +98,17 @@ export function AgentRunsView() {
|
||||
<div className="mt-1">
|
||||
{selectedRun ? (
|
||||
selectedRun.startsWith("schedule:") ? (
|
||||
<SelectedScheduleView
|
||||
<ScheduleDetails
|
||||
agent={agent}
|
||||
scheduleId={selectedRun.replace("schedule:", "")}
|
||||
onClearSelectedRun={handleClearSelectedRun}
|
||||
onClearSelectedRun={clearSelectedRun}
|
||||
/>
|
||||
) : (
|
||||
<SelectedRunView
|
||||
<RunDetails
|
||||
agent={agent}
|
||||
runId={selectedRun}
|
||||
onSelectRun={handleSelectRun}
|
||||
onClearSelectedRun={handleClearSelectedRun}
|
||||
onClearSelectedRun={clearSelectedRun}
|
||||
/>
|
||||
)
|
||||
) : hasAnyItems ? (
|
||||
|
||||
@@ -1,152 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import React, { useState } from "react";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import {
|
||||
DropdownMenu,
|
||||
DropdownMenuContent,
|
||||
DropdownMenuItem,
|
||||
DropdownMenuTrigger,
|
||||
} from "@/components/molecules/DropdownMenu/DropdownMenu";
|
||||
import Link from "next/link";
|
||||
import {
|
||||
FileArrowDownIcon,
|
||||
PencilSimpleIcon,
|
||||
TrashIcon,
|
||||
} from "@phosphor-icons/react";
|
||||
import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
|
||||
import { getV1GetGraphVersion } from "@/app/api/__generated__/endpoints/graphs/graphs";
|
||||
import { exportAsJSONFile } from "@/lib/utils";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { useDeleteV2DeleteLibraryAgent } from "@/app/api/__generated__/endpoints/library/library";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
|
||||
interface Props {
|
||||
agent: LibraryAgent;
|
||||
}
|
||||
|
||||
export function AgentActionsDropdown({ agent }: Props) {
|
||||
const { toast } = useToast();
|
||||
const { mutateAsync: deleteAgent } = useDeleteV2DeleteLibraryAgent();
|
||||
const router = useRouter();
|
||||
const [isDeleting, setIsDeleting] = useState(false);
|
||||
const [showDeleteDialog, setShowDeleteDialog] = useState(false);
|
||||
|
||||
async function handleDelete() {
|
||||
if (!agent.id) return;
|
||||
|
||||
setIsDeleting(true);
|
||||
|
||||
try {
|
||||
await deleteAgent({ libraryAgentId: agent.id });
|
||||
toast({ title: "Agent deleted" });
|
||||
setShowDeleteDialog(false);
|
||||
router.push("/library");
|
||||
} catch (error: unknown) {
|
||||
toast({
|
||||
title: "Failed to delete agent",
|
||||
description:
|
||||
error instanceof Error
|
||||
? error.message
|
||||
: "An unexpected error occurred.",
|
||||
variant: "destructive",
|
||||
});
|
||||
} finally {
|
||||
setIsDeleting(false);
|
||||
}
|
||||
}
|
||||
|
||||
async function handleExport() {
|
||||
try {
|
||||
const res = await getV1GetGraphVersion(
|
||||
agent.graph_id,
|
||||
agent.graph_version,
|
||||
{ for_export: true },
|
||||
);
|
||||
if (res.status === 200) {
|
||||
const filename = `${agent.name}_v${agent.graph_version}.json`;
|
||||
exportAsJSONFile(res.data as any, filename);
|
||||
toast({ title: "Agent exported" });
|
||||
} else {
|
||||
toast({ title: "Failed to export agent", variant: "destructive" });
|
||||
}
|
||||
} catch (e: any) {
|
||||
toast({
|
||||
title: "Failed to export agent",
|
||||
description: e?.message,
|
||||
variant: "destructive",
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger asChild>
|
||||
<Button variant="secondary" size="small" className="min-w-fit">
|
||||
•••
|
||||
</Button>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent align="end">
|
||||
<DropdownMenuItem asChild>
|
||||
<Link
|
||||
href={`/build?flowID=${agent.graph_id}&flowVersion=${agent.graph_version}`}
|
||||
target="_blank"
|
||||
className="flex items-center gap-2"
|
||||
>
|
||||
<PencilSimpleIcon size={16} /> Edit agent
|
||||
</Link>
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem
|
||||
onClick={handleExport}
|
||||
className="flex items-center gap-2"
|
||||
>
|
||||
<FileArrowDownIcon size={16} /> Export agent
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem
|
||||
onClick={() => setShowDeleteDialog(true)}
|
||||
className="flex items-center gap-2"
|
||||
>
|
||||
<TrashIcon size={16} /> Delete agent
|
||||
</DropdownMenuItem>
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
|
||||
<Dialog
|
||||
controlled={{
|
||||
isOpen: showDeleteDialog,
|
||||
set: setShowDeleteDialog,
|
||||
}}
|
||||
styling={{ maxWidth: "32rem" }}
|
||||
title="Delete agent"
|
||||
>
|
||||
<Dialog.Content>
|
||||
<div>
|
||||
<Text variant="large">
|
||||
Are you sure you want to delete this agent? This action cannot be
|
||||
undone.
|
||||
</Text>
|
||||
<Dialog.Footer>
|
||||
<Button
|
||||
variant="secondary"
|
||||
disabled={isDeleting}
|
||||
onClick={() => setShowDeleteDialog(false)}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
variant="destructive"
|
||||
onClick={handleDelete}
|
||||
loading={isDeleting}
|
||||
>
|
||||
Delete
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</div>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
</>
|
||||
);
|
||||
}
|
||||
@@ -2,88 +2,57 @@
|
||||
|
||||
import React from "react";
|
||||
import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
|
||||
import type { CredentialsMetaInput } from "@/lib/autogpt-server-api/types";
|
||||
import { toDisplayName } from "@/components/integrations/helper";
|
||||
import {
|
||||
getAgentCredentialsFields,
|
||||
getAgentInputFields,
|
||||
getCredentialTypeDisplayName,
|
||||
renderValue,
|
||||
} from "./helpers";
|
||||
|
||||
type Props = {
|
||||
agent: LibraryAgent;
|
||||
inputs?: Record<string, any> | null;
|
||||
credentialInputs?: Record<string, CredentialsMetaInput> | null;
|
||||
};
|
||||
|
||||
export function AgentInputsReadOnly({
|
||||
agent,
|
||||
inputs,
|
||||
credentialInputs,
|
||||
}: Props) {
|
||||
function getAgentInputFields(agent: LibraryAgent): Record<string, any> {
|
||||
const schema = agent.input_schema as unknown as {
|
||||
properties?: Record<string, any>;
|
||||
} | null;
|
||||
if (!schema || !schema.properties) return {};
|
||||
const properties = schema.properties as Record<string, any>;
|
||||
const visibleEntries = Object.entries(properties).filter(
|
||||
([, sub]) => !sub?.hidden,
|
||||
);
|
||||
return Object.fromEntries(visibleEntries);
|
||||
}
|
||||
|
||||
function renderValue(value: any): string {
|
||||
if (value === undefined || value === null) return "";
|
||||
if (
|
||||
typeof value === "string" ||
|
||||
typeof value === "number" ||
|
||||
typeof value === "boolean"
|
||||
)
|
||||
return String(value);
|
||||
try {
|
||||
return JSON.stringify(value, undefined, 2);
|
||||
} catch {
|
||||
return String(value);
|
||||
}
|
||||
}
|
||||
|
||||
export function AgentInputsReadOnly({ agent, inputs }: Props) {
|
||||
const fields = getAgentInputFields(agent);
|
||||
const credentialFields = getAgentCredentialsFields(agent);
|
||||
const inputEntries = Object.entries(fields);
|
||||
const credentialEntries = Object.entries(credentialFields);
|
||||
const entries = Object.entries(fields);
|
||||
|
||||
const hasInputs = inputs && inputEntries.length > 0;
|
||||
const hasCredentials = credentialInputs && credentialEntries.length > 0;
|
||||
|
||||
if (!hasInputs && !hasCredentials) {
|
||||
if (!inputs || entries.length === 0) {
|
||||
return <div className="text-neutral-600">No input for this run.</div>;
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-6">
|
||||
{/* Regular inputs */}
|
||||
{hasInputs && (
|
||||
<div className="flex flex-col gap-4">
|
||||
{inputEntries.map(([key, sub]) => (
|
||||
<div key={key} className="flex flex-col gap-1.5">
|
||||
<label className="text-sm font-medium">{sub?.title || key}</label>
|
||||
<p className="whitespace-pre-wrap break-words text-sm text-neutral-700">
|
||||
{renderValue((inputs as Record<string, any>)[key])}
|
||||
</p>
|
||||
</div>
|
||||
))}
|
||||
<div className="flex flex-col gap-4">
|
||||
{entries.map(([key, sub]) => (
|
||||
<div key={key} className="flex flex-col gap-1.5">
|
||||
<label className="text-sm font-medium">{sub?.title || key}</label>
|
||||
<p className="whitespace-pre-wrap break-words text-sm text-neutral-700">
|
||||
{renderValue((inputs as Record<string, any>)[key])}
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Credentials */}
|
||||
{hasCredentials && (
|
||||
<div className="flex flex-col gap-6">
|
||||
{hasInputs && <div className="border-t border-neutral-200 pt-4" />}
|
||||
{credentialEntries.map(([key, _sub]) => {
|
||||
const credential = credentialInputs![key];
|
||||
if (!credential) return null;
|
||||
|
||||
return (
|
||||
<div key={key} className="flex flex-col gap-4">
|
||||
<h3 className="text-lg font-medium text-neutral-900">
|
||||
{toDisplayName(credential.provider)} credentials
|
||||
</h3>
|
||||
<div className="flex flex-col gap-3">
|
||||
<div className="flex items-center justify-between text-sm">
|
||||
<span className="text-neutral-600">Name</span>
|
||||
<span className="text-neutral-600">
|
||||
{getCredentialTypeDisplayName(credential.type)}
|
||||
</span>
|
||||
</div>
|
||||
<div className="flex items-center justify-between text-sm">
|
||||
<span className="text-neutral-900">
|
||||
{credential.title || "Untitled"}
|
||||
</span>
|
||||
<span className="font-mono text-neutral-400">
|
||||
{"*".repeat(25)}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
)}
|
||||
))}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
import { CredentialsMetaResponseType } from "@/app/api/__generated__/models/credentialsMetaResponseType";
|
||||
import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
|
||||
|
||||
export function getCredentialTypeDisplayName(type: string): string {
|
||||
const typeDisplayMap: Record<CredentialsMetaResponseType, string> = {
|
||||
[CredentialsMetaResponseType.api_key]: "API key",
|
||||
[CredentialsMetaResponseType.oauth2]: "OAuth2",
|
||||
[CredentialsMetaResponseType.user_password]: "Username/Password",
|
||||
[CredentialsMetaResponseType.host_scoped]: "Host-Scoped",
|
||||
};
|
||||
|
||||
return typeDisplayMap[type as CredentialsMetaResponseType] || type;
|
||||
}
|
||||
|
||||
export function getAgentInputFields(agent: LibraryAgent): Record<string, any> {
|
||||
const schema = agent.input_schema as unknown as {
|
||||
properties?: Record<string, any>;
|
||||
} | null;
|
||||
if (!schema || !schema.properties) return {};
|
||||
const properties = schema.properties as Record<string, any>;
|
||||
const visibleEntries = Object.entries(properties).filter(
|
||||
([, sub]) => !sub?.hidden,
|
||||
);
|
||||
return Object.fromEntries(visibleEntries);
|
||||
}
|
||||
|
||||
export function getAgentCredentialsFields(
|
||||
agent: LibraryAgent,
|
||||
): Record<string, any> {
|
||||
if (
|
||||
!agent.credentials_input_schema ||
|
||||
typeof agent.credentials_input_schema !== "object" ||
|
||||
!("properties" in agent.credentials_input_schema) ||
|
||||
!agent.credentials_input_schema.properties
|
||||
) {
|
||||
return {};
|
||||
}
|
||||
return agent.credentials_input_schema.properties as Record<string, any>;
|
||||
}
|
||||
|
||||
export function renderValue(value: any): string {
|
||||
if (value === undefined || value === null) return "";
|
||||
if (
|
||||
typeof value === "string" ||
|
||||
typeof value === "number" ||
|
||||
typeof value === "boolean"
|
||||
)
|
||||
return String(value);
|
||||
try {
|
||||
return JSON.stringify(value, undefined, 2);
|
||||
} catch {
|
||||
return String(value);
|
||||
}
|
||||
}
|
||||
@@ -4,9 +4,9 @@ import { Skeleton } from "@/components/ui/skeleton";
|
||||
export function AgentRunsLoading() {
|
||||
return (
|
||||
<div className="px-6 py-6">
|
||||
<div className="flex h-screen w-full gap-8">
|
||||
<div className="flex h-screen w-full gap-4">
|
||||
{/* Left Sidebar */}
|
||||
<div className="w-[20vw] space-y-4">
|
||||
<div className="w-80 space-y-4">
|
||||
<Skeleton className="h-12 w-full" />
|
||||
<Skeleton className="h-32 w-full" />
|
||||
<Skeleton className="h-24 w-full" />
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
"use client";
|
||||
|
||||
import React from "react";
|
||||
import React, { useState } from "react";
|
||||
import { CopyIcon, CheckIcon } from "lucide-react";
|
||||
import { OutputRenderer, OutputMetadata } from "../types";
|
||||
import { copyToClipboard } from "../utils/copy";
|
||||
|
||||
interface OutputItemProps {
|
||||
value: any;
|
||||
@@ -16,13 +18,51 @@ export function OutputItem({
|
||||
renderer,
|
||||
label,
|
||||
}: OutputItemProps) {
|
||||
const [showCopyButton, setShowCopyButton] = useState(false);
|
||||
const [copied, setCopied] = useState(false);
|
||||
|
||||
const handleCopy = async () => {
|
||||
const copyContent = renderer.getCopyContent(value, metadata);
|
||||
if (copyContent) {
|
||||
try {
|
||||
await copyToClipboard(copyContent);
|
||||
setCopied(true);
|
||||
setTimeout(() => setCopied(false), 2000);
|
||||
} catch (error) {
|
||||
console.error("Failed to copy:", error);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const canCopy = renderer.getCopyContent(value, metadata) !== null;
|
||||
|
||||
return (
|
||||
<div className="relative">
|
||||
<div
|
||||
className="relative"
|
||||
onMouseEnter={() => setShowCopyButton(true)}
|
||||
onMouseLeave={() => setShowCopyButton(false)}
|
||||
>
|
||||
{label && (
|
||||
<label className="mb-1.5 block text-sm font-medium">{label}</label>
|
||||
)}
|
||||
|
||||
<div className="relative">{renderer.render(value, metadata)}</div>
|
||||
<div className="relative">
|
||||
{renderer.render(value, metadata)}
|
||||
|
||||
{canCopy && showCopyButton && (
|
||||
<button
|
||||
onClick={handleCopy}
|
||||
className="absolute right-2 top-2 rounded-md border border-gray-200 bg-background/80 p-1.5 backdrop-blur-sm transition-all duration-200 hover:bg-gray-100"
|
||||
aria-label="Copy content"
|
||||
>
|
||||
{copied ? (
|
||||
<CheckIcon className="h-4 w-4 text-green-600" />
|
||||
) : (
|
||||
<CopyIcon className="h-4 w-4 text-gray-600" />
|
||||
)}
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -228,7 +228,5 @@ export function RunAgentInputs({
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="no-drag relative flex w-full">{innerInputElement}</div>
|
||||
);
|
||||
return <div className="no-drag relative flex">{innerInputElement}</div>;
|
||||
}
|
||||
|
||||
@@ -8,34 +8,28 @@ import { useAgentRunModal } from "./useAgentRunModal";
|
||||
import { ModalHeader } from "./components/ModalHeader/ModalHeader";
|
||||
import { AgentCostSection } from "./components/AgentCostSection/AgentCostSection";
|
||||
import { AgentSectionHeader } from "./components/AgentSectionHeader/AgentSectionHeader";
|
||||
import { ModalRunSection } from "./components/ModalRunSection/ModalRunSection";
|
||||
import { DefaultRunView } from "./components/DefaultRunView/DefaultRunView";
|
||||
import { RunAgentModalContextProvider } from "./context";
|
||||
import { ScheduleView } from "./components/ScheduleView/ScheduleView";
|
||||
import { AgentDetails } from "./components/AgentDetails/AgentDetails";
|
||||
import { RunActions } from "./components/RunActions/RunActions";
|
||||
import { ScheduleAgentModal } from "../ScheduleAgentModal/ScheduleAgentModal";
|
||||
import { AlarmIcon } from "@phosphor-icons/react";
|
||||
import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta";
|
||||
import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo";
|
||||
import { ScheduleActions } from "./components/ScheduleActions/ScheduleActions";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { AlarmIcon, TrashIcon } from "@phosphor-icons/react";
|
||||
|
||||
interface Props {
|
||||
triggerSlot: React.ReactNode;
|
||||
agent: LibraryAgent;
|
||||
agentId: string;
|
||||
agentVersion?: number;
|
||||
onRunCreated?: (execution: GraphExecutionMeta) => void;
|
||||
onScheduleCreated?: (schedule: GraphExecutionJobInfo) => void;
|
||||
}
|
||||
|
||||
export function RunAgentModal({
|
||||
triggerSlot,
|
||||
agent,
|
||||
onRunCreated,
|
||||
onScheduleCreated,
|
||||
}: Props) {
|
||||
export function RunAgentModal({ triggerSlot, agent }: Props) {
|
||||
const {
|
||||
// UI state
|
||||
isOpen,
|
||||
setIsOpen,
|
||||
showScheduleView,
|
||||
|
||||
// Run mode
|
||||
defaultRunType,
|
||||
@@ -54,6 +48,10 @@ export function RunAgentModal({
|
||||
setPresetName,
|
||||
setPresetDescription,
|
||||
|
||||
// Scheduling
|
||||
scheduleName,
|
||||
cronExpression,
|
||||
|
||||
// Validation/readiness
|
||||
allRequiredInputsAreSet,
|
||||
|
||||
@@ -63,15 +61,19 @@ export function RunAgentModal({
|
||||
|
||||
// Async states
|
||||
isExecuting,
|
||||
isCreatingSchedule,
|
||||
isSettingUpTrigger,
|
||||
|
||||
// Actions
|
||||
handleRun,
|
||||
} = useAgentRunModal(agent, {
|
||||
onRun: onRunCreated,
|
||||
});
|
||||
handleSchedule,
|
||||
handleShowSchedule,
|
||||
handleGoBack,
|
||||
handleSetScheduleName,
|
||||
handleSetCronExpression,
|
||||
} = useAgentRunModal(agent);
|
||||
|
||||
const [isScheduleModalOpen, setIsScheduleModalOpen] = useState(false);
|
||||
const [isScheduleFormValid, setIsScheduleFormValid] = useState(true);
|
||||
|
||||
const hasAnySetupFields =
|
||||
Object.keys(agentInputFields || {}).length > 0 ||
|
||||
@@ -98,20 +100,14 @@ export function RunAgentModal({
|
||||
|
||||
function handleSetOpen(open: boolean) {
|
||||
setIsOpen(open);
|
||||
// Always reset to Run view when opening/closing
|
||||
if (open || !open) handleGoBack();
|
||||
}
|
||||
|
||||
function handleOpenScheduleModal() {
|
||||
setIsScheduleModalOpen(true);
|
||||
}
|
||||
|
||||
function handleCloseScheduleModal() {
|
||||
setIsScheduleModalOpen(false);
|
||||
}
|
||||
|
||||
function handleScheduleCreated(schedule: GraphExecutionJobInfo) {
|
||||
handleCloseScheduleModal();
|
||||
setIsOpen(false); // Close the main RunAgentModal
|
||||
onScheduleCreated?.(schedule);
|
||||
function handleRemoveSchedule() {
|
||||
handleGoBack();
|
||||
handleSetScheduleName("");
|
||||
handleSetCronExpression("");
|
||||
}
|
||||
|
||||
return (
|
||||
@@ -158,12 +154,61 @@ export function RunAgentModal({
|
||||
: "Agent Setup"
|
||||
}
|
||||
/>
|
||||
<ModalRunSection />
|
||||
<div>
|
||||
<DefaultRunView />
|
||||
</div>
|
||||
</>
|
||||
</RunAgentModalContextProvider>
|
||||
) : null}
|
||||
</div>
|
||||
|
||||
{/* Schedule Section - always visible */}
|
||||
<div className="mt-4">
|
||||
<AgentSectionHeader title="Schedule Setup" />
|
||||
{showScheduleView ? (
|
||||
<>
|
||||
<div className="my-4 flex justify-start">
|
||||
<Button
|
||||
variant="secondary"
|
||||
size="small"
|
||||
onClick={handleRemoveSchedule}
|
||||
>
|
||||
<TrashIcon size={16} />
|
||||
Remove schedule
|
||||
</Button>
|
||||
</div>
|
||||
<ScheduleView
|
||||
scheduleName={scheduleName}
|
||||
cronExpression={cronExpression}
|
||||
recommendedScheduleCron={agent.recommended_schedule_cron}
|
||||
onScheduleNameChange={handleSetScheduleName}
|
||||
onCronExpressionChange={handleSetCronExpression}
|
||||
onValidityChange={setIsScheduleFormValid}
|
||||
/>
|
||||
</>
|
||||
) : (
|
||||
<div className="mt-2 flex flex-col items-start gap-2">
|
||||
<Text variant="body" className="mb-3 !text-zinc-500">
|
||||
No schedule configured. Create a schedule to run this
|
||||
agent automatically at a specific time.{" "}
|
||||
{agent.recommended_schedule_cron && (
|
||||
<span className="text-blue-600">
|
||||
This agent has a recommended schedule.
|
||||
</span>
|
||||
)}
|
||||
</Text>
|
||||
<Button
|
||||
variant="secondary"
|
||||
size="small"
|
||||
onClick={handleShowSchedule}
|
||||
>
|
||||
<AlarmIcon size={16} />
|
||||
Create schedule
|
||||
</Button>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Agent Details Section */}
|
||||
<div className="mt-8">
|
||||
<AgentSectionHeader title="Agent Details" />
|
||||
@@ -175,33 +220,25 @@ export function RunAgentModal({
|
||||
className="fixed bottom-1 left-0 z-10 w-full bg-white p-4"
|
||||
style={{ boxShadow: "0px -8px 10px white" }}
|
||||
>
|
||||
<div className="flex items-center justify-end gap-3">
|
||||
<Button
|
||||
variant="secondary"
|
||||
onClick={handleOpenScheduleModal}
|
||||
disabled={
|
||||
isExecuting || isSettingUpTrigger || !allRequiredInputsAreSet
|
||||
{showScheduleView ? (
|
||||
<ScheduleActions
|
||||
onSchedule={handleSchedule}
|
||||
isCreatingSchedule={isCreatingSchedule}
|
||||
allRequiredInputsAreSet={
|
||||
allRequiredInputsAreSet &&
|
||||
!!scheduleName.trim() &&
|
||||
isScheduleFormValid
|
||||
}
|
||||
>
|
||||
<AlarmIcon size={16} />
|
||||
Schedule Agent
|
||||
</Button>
|
||||
/>
|
||||
) : (
|
||||
<RunActions
|
||||
defaultRunType={defaultRunType}
|
||||
onRun={handleRun}
|
||||
isExecuting={isExecuting}
|
||||
isSettingUpTrigger={isSettingUpTrigger}
|
||||
isRunReady={allRequiredInputsAreSet}
|
||||
allRequiredInputsAreSet={allRequiredInputsAreSet}
|
||||
/>
|
||||
</div>
|
||||
<ScheduleAgentModal
|
||||
isOpen={isScheduleModalOpen}
|
||||
onClose={handleCloseScheduleModal}
|
||||
agent={agent}
|
||||
inputValues={inputValues}
|
||||
inputCredentials={inputCredentials}
|
||||
onScheduleCreated={handleScheduleCreated}
|
||||
/>
|
||||
)}
|
||||
</Dialog.Footer>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
|
||||
@@ -6,7 +6,7 @@ interface Props {
|
||||
|
||||
export function AgentSectionHeader({ title }: Props) {
|
||||
return (
|
||||
<div className="border-t border-zinc-400 px-0 pb-2 pt-1">
|
||||
<div className="border-t border-zinc-400 px-0 py-2">
|
||||
<Text variant="label" className="!text-zinc-700">
|
||||
{title}
|
||||
</Text>
|
||||
|
||||
@@ -51,9 +51,9 @@ export function TimeAt({
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex items-end gap-1">
|
||||
<div className="flex items-end gap-2">
|
||||
<div className="relative">
|
||||
<label className="mb-0 block text-sm font-medium text-zinc-700">
|
||||
<label className="mb-1 block text-xs font-medium text-zinc-700">
|
||||
At
|
||||
</label>
|
||||
<div className="flex items-center gap-2">
|
||||
@@ -4,12 +4,8 @@ import SchemaTooltip from "@/components/SchemaTooltip";
|
||||
import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/CredentialsInputs/CredentialsInputs";
|
||||
import { useRunAgentModalContext } from "../../context";
|
||||
import { RunAgentInputs } from "../../../RunAgentInputs/RunAgentInputs";
|
||||
import { InfoIcon } from "@phosphor-icons/react";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { toDisplayName } from "@/components/integrations/helper";
|
||||
import { getCredentialTypeDisplayName } from "./helpers";
|
||||
|
||||
export function ModalRunSection() {
|
||||
export function DefaultRunView() {
|
||||
const {
|
||||
agent,
|
||||
defaultRunType,
|
||||
@@ -26,7 +22,7 @@ export function ModalRunSection() {
|
||||
} = useRunAgentModalContext();
|
||||
|
||||
return (
|
||||
<div className="mb-10 mt-4">
|
||||
<div className="my-4">
|
||||
{defaultRunType === "automatic-trigger" && <WebhookTriggerBanner />}
|
||||
|
||||
{/* Preset/Trigger fields */}
|
||||
@@ -65,21 +61,6 @@ export function ModalRunSection() {
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Instructions */}
|
||||
{agent.instructions && (
|
||||
<div className="mb-4 flex items-start gap-2 rounded-md border border-blue-200 bg-blue-50 p-3">
|
||||
<InfoIcon className="mt-0.5 h-4 w-4 text-blue-600" />
|
||||
<div>
|
||||
<h4 className="text-sm font-medium text-blue-900">
|
||||
How to use this agent
|
||||
</h4>
|
||||
<p className="mt-1 whitespace-pre-wrap text-sm text-blue-800">
|
||||
{agent.instructions}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Credentials inputs */}
|
||||
{Object.entries(agentCredentialsInputFields || {}).map(
|
||||
([key, inputSubSchema]) => (
|
||||
@@ -101,7 +82,7 @@ export function ModalRunSection() {
|
||||
|
||||
{/* Regular inputs */}
|
||||
{Object.entries(agentInputFields || {}).map(([key, inputSubSchema]) => (
|
||||
<div key={key} className="flex w-full flex-col gap-0 space-y-2">
|
||||
<div key={key} className="flex flex-col gap-0 space-y-2">
|
||||
<label className="flex items-center gap-1 text-sm font-medium">
|
||||
{inputSubSchema.title || key}
|
||||
<SchemaTooltip description={inputSubSchema.description} />
|
||||
@@ -116,56 +97,6 @@ export function ModalRunSection() {
|
||||
/>
|
||||
</div>
|
||||
))}
|
||||
|
||||
{/* Selected Credentials Preview */}
|
||||
{Object.keys(inputCredentials).length > 0 && (
|
||||
<div className="mt-6 flex flex-col gap-6">
|
||||
{Object.entries(agentCredentialsInputFields || {}).map(
|
||||
([key, _sub]) => {
|
||||
const credential = inputCredentials[key];
|
||||
if (!credential) return null;
|
||||
|
||||
return (
|
||||
<div key={key} className="flex flex-col gap-4">
|
||||
<Text variant="body-medium" as="h3">
|
||||
{toDisplayName(credential.provider)} credentials
|
||||
</Text>
|
||||
<div className="flex flex-col gap-3">
|
||||
<div className="flex items-center justify-between text-sm">
|
||||
<Text
|
||||
variant="body"
|
||||
as="span"
|
||||
className="!text-neutral-600"
|
||||
>
|
||||
Name
|
||||
</Text>
|
||||
<Text
|
||||
variant="body"
|
||||
as="span"
|
||||
className="!text-neutral-600"
|
||||
>
|
||||
{getCredentialTypeDisplayName(credential.type)}
|
||||
</Text>
|
||||
</div>
|
||||
<div className="flex items-center justify-between text-sm">
|
||||
<Text
|
||||
variant="body"
|
||||
as="span"
|
||||
className="!text-neutral-900"
|
||||
>
|
||||
{credential.title || "Untitled"}
|
||||
</Text>
|
||||
<span className="font-mono text-neutral-400">
|
||||
{"*".repeat(25)}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
},
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -2,8 +2,6 @@ import { Badge } from "@/components/atoms/Badge/Badge";
|
||||
import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { ShowMoreText } from "@/components/molecules/ShowMoreText/ShowMoreText";
|
||||
import { ClockIcon, InfoIcon } from "@phosphor-icons/react";
|
||||
import { humanizeCronExpression } from "@/lib/cron-expression-utils";
|
||||
|
||||
interface ModalHeaderProps {
|
||||
agent: LibraryAgent;
|
||||
@@ -11,7 +9,6 @@ interface ModalHeaderProps {
|
||||
|
||||
export function ModalHeader({ agent }: ModalHeaderProps) {
|
||||
const isUnknownCreator = agent.creator_name === "Unknown";
|
||||
|
||||
return (
|
||||
<div className="space-y-4">
|
||||
<div className="flex items-center gap-3">
|
||||
@@ -29,30 +26,6 @@ export function ModalHeader({ agent }: ModalHeaderProps) {
|
||||
>
|
||||
{agent.description}
|
||||
</ShowMoreText>
|
||||
|
||||
{/* Schedule recommendation tip */}
|
||||
{agent.recommended_schedule_cron && !agent.has_external_trigger && (
|
||||
<div className="mt-4 flex items-center gap-2">
|
||||
<ClockIcon className="h-4 w-4 text-gray-500" />
|
||||
<p className="text-sm text-gray-600">
|
||||
<strong>Tip:</strong> For best results, run this agent{" "}
|
||||
{humanizeCronExpression(
|
||||
agent.recommended_schedule_cron,
|
||||
).toLowerCase()}
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Setup Instructions */}
|
||||
{agent.instructions && (
|
||||
<div className="mt-4 flex items-start gap-2">
|
||||
<InfoIcon className="mt-0.5 h-4 w-4 flex-shrink-0 text-gray-500" />
|
||||
<div className="text-sm text-gray-600">
|
||||
<strong>Setup Instructions:</strong>{" "}
|
||||
<span className="whitespace-pre-wrap">{agent.instructions}</span>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
import { CredentialsMetaResponseType } from "@/app/api/__generated__/models/credentialsMetaResponseType";
|
||||
|
||||
export function getCredentialTypeDisplayName(type: string): string {
|
||||
const typeDisplayMap: Record<CredentialsMetaResponseType, string> = {
|
||||
[CredentialsMetaResponseType.api_key]: "API key",
|
||||
[CredentialsMetaResponseType.oauth2]: "OAuth2",
|
||||
[CredentialsMetaResponseType.user_password]: "Username/Password",
|
||||
[CredentialsMetaResponseType.host_scoped]: "Host-Scoped",
|
||||
};
|
||||
|
||||
return typeDisplayMap[type as CredentialsMetaResponseType] || type;
|
||||
}
|
||||
@@ -6,7 +6,7 @@ interface Props {
|
||||
onRun: () => void;
|
||||
isExecuting?: boolean;
|
||||
isSettingUpTrigger?: boolean;
|
||||
isRunReady?: boolean;
|
||||
allRequiredInputsAreSet?: boolean;
|
||||
}
|
||||
|
||||
export function RunActions({
|
||||
@@ -14,14 +14,14 @@ export function RunActions({
|
||||
onRun,
|
||||
isExecuting = false,
|
||||
isSettingUpTrigger = false,
|
||||
isRunReady = true,
|
||||
allRequiredInputsAreSet = true,
|
||||
}: Props) {
|
||||
return (
|
||||
<div className="flex justify-end gap-3">
|
||||
<Button
|
||||
variant="primary"
|
||||
onClick={onRun}
|
||||
disabled={!isRunReady || isExecuting || isSettingUpTrigger}
|
||||
disabled={!allRequiredInputsAreSet || isExecuting || isSettingUpTrigger}
|
||||
loading={isExecuting || isSettingUpTrigger}
|
||||
>
|
||||
{defaultRunType === "automatic-trigger"
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
|
||||
interface Props {
|
||||
onSchedule: () => void;
|
||||
isCreatingSchedule?: boolean;
|
||||
allRequiredInputsAreSet?: boolean;
|
||||
}
|
||||
|
||||
export function ScheduleActions({
|
||||
onSchedule,
|
||||
isCreatingSchedule = false,
|
||||
allRequiredInputsAreSet = true,
|
||||
}: Props) {
|
||||
return (
|
||||
<div className="flex justify-end gap-3">
|
||||
<Button
|
||||
variant="primary"
|
||||
onClick={onSchedule}
|
||||
disabled={!allRequiredInputsAreSet || isCreatingSchedule}
|
||||
loading={isCreatingSchedule}
|
||||
>
|
||||
Schedule Agent
|
||||
</Button>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -14,7 +14,7 @@ interface Props {
|
||||
onValidityChange?: (valid: boolean) => void;
|
||||
}
|
||||
|
||||
export function ModalScheduleSection({
|
||||
export function ScheduleView({
|
||||
scheduleName,
|
||||
cronExpression: _cronExpression,
|
||||
recommendedScheduleCron,
|
||||
@@ -73,8 +73,9 @@ export function ModalScheduleSection({
|
||||
}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<TimezoneNotice />
|
||||
<div className="mt-2 w-fit">
|
||||
<TimezoneNotice />
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,10 +1,12 @@
|
||||
import { useEffect, useMemo, useState } from "react";
|
||||
|
||||
interface Args {
|
||||
interface UseScheduleViewOptions {
|
||||
onCronExpressionChange: (expression: string) => void;
|
||||
}
|
||||
|
||||
export function useModalScheduleSection({ onCronExpressionChange }: Args) {
|
||||
export function useScheduleView({
|
||||
onCronExpressionChange,
|
||||
}: UseScheduleViewOptions) {
|
||||
const repeatOptions = useMemo(
|
||||
() => [
|
||||
{ value: "daily", label: "Daily" },
|
||||
@@ -15,7 +15,6 @@ import { usePostV2SetupTrigger } from "@/app/api/__generated__/endpoints/presets
|
||||
import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta";
|
||||
import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo";
|
||||
import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset";
|
||||
import { useGetV1GetUserTimezone } from "@/app/api/__generated__/endpoints/auth/auth";
|
||||
|
||||
export type RunVariant =
|
||||
| "manual"
|
||||
@@ -49,13 +48,6 @@ export function useAgentRunModal(
|
||||
agent.recommended_schedule_cron || "0 9 * * 1",
|
||||
);
|
||||
|
||||
// Get user timezone for scheduling
|
||||
const { data: userTimezone } = useGetV1GetUserTimezone({
|
||||
query: {
|
||||
select: (res) => (res.status === 200 ? res.data.timezone : undefined),
|
||||
},
|
||||
});
|
||||
|
||||
// Determine the default run type based on agent capabilities
|
||||
const defaultRunType: RunVariant = agent.has_external_trigger
|
||||
? "automatic-trigger"
|
||||
@@ -315,8 +307,6 @@ export function useAgentRunModal(
|
||||
inputs: inputValues,
|
||||
graph_version: agent.graph_version,
|
||||
credentials: inputCredentials,
|
||||
timezone:
|
||||
userTimezone && userTimezone !== "not-set" ? userTimezone : undefined,
|
||||
},
|
||||
});
|
||||
}, [
|
||||
@@ -329,7 +319,6 @@ export function useAgentRunModal(
|
||||
notifyMissingRequirements,
|
||||
createScheduleMutation,
|
||||
toast,
|
||||
userTimezone,
|
||||
]);
|
||||
|
||||
function handleShowSchedule() {
|
||||
|
||||
@@ -9,7 +9,7 @@ export function RunDetailCard({ children, className }: Props) {
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
"min-h-20 rounded-large border border-white bg-white p-6",
|
||||
"min-h-20 rounded-xlarge border border-slate-50/70 bg-white p-6",
|
||||
className,
|
||||
)}
|
||||
>
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import React from "react";
|
||||
import { RunStatusBadge } from "../SelectedRunView/components/RunStatusBadge";
|
||||
import { RunStatusBadge } from "../RunDetails/components/RunStatusBadge";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import {
|
||||
@@ -12,10 +11,7 @@ import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
|
||||
import moment from "moment";
|
||||
import { GraphExecution } from "@/app/api/__generated__/models/graphExecution";
|
||||
import { useRunDetailHeader } from "./useRunDetailHeader";
|
||||
import { AgentActionsDropdown } from "../AgentActionsDropdown";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { ShareButton } from "@/components/ShareButton";
|
||||
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
|
||||
import { AgentActions } from "./components/AgentActions";
|
||||
|
||||
type Props = {
|
||||
agent: LibraryAgent;
|
||||
@@ -32,22 +28,16 @@ export function RunDetailHeader({
|
||||
onSelectRun,
|
||||
onClearSelectedRun,
|
||||
}: Props) {
|
||||
const shareExecutionResultsEnabled = useGetFlag(Flag.SHARE_EXECUTION_RESULTS);
|
||||
|
||||
const {
|
||||
stopRun,
|
||||
canStop,
|
||||
isStopping,
|
||||
deleteRun,
|
||||
isDeleting,
|
||||
isRunning,
|
||||
runAgain,
|
||||
isRunningAgain,
|
||||
openInBuilderHref,
|
||||
showDeleteDialog,
|
||||
handleStopRun,
|
||||
handleRunAgain,
|
||||
handleDeleteRun,
|
||||
handleShowDeleteDialog,
|
||||
} = useRunDetailHeader(agent.graph_id, run, onSelectRun, onClearSelectedRun);
|
||||
|
||||
return (
|
||||
<div>
|
||||
<div className="flex w-full items-center justify-between">
|
||||
@@ -67,28 +57,19 @@ export function RunDetailHeader({
|
||||
<Button
|
||||
variant="secondary"
|
||||
size="small"
|
||||
onClick={handleRunAgain}
|
||||
onClick={runAgain}
|
||||
loading={isRunningAgain}
|
||||
>
|
||||
<PlayIcon size={16} /> Run again
|
||||
</Button>
|
||||
{shareExecutionResultsEnabled && (
|
||||
<ShareButton
|
||||
graphId={agent.graph_id}
|
||||
executionId={run.id}
|
||||
isShared={run.is_shared}
|
||||
shareToken={run.share_token}
|
||||
/>
|
||||
)}
|
||||
{!isRunning ? (
|
||||
<Button
|
||||
variant="secondary"
|
||||
size="small"
|
||||
onClick={() => handleShowDeleteDialog(true)}
|
||||
>
|
||||
<TrashIcon size={16} /> Delete run
|
||||
</Button>
|
||||
) : null}
|
||||
<Button
|
||||
variant="secondary"
|
||||
size="small"
|
||||
onClick={deleteRun}
|
||||
loading={isDeleting}
|
||||
>
|
||||
<TrashIcon size={16} /> Delete run
|
||||
</Button>
|
||||
{openInBuilderHref ? (
|
||||
<Button
|
||||
variant="secondary"
|
||||
@@ -97,20 +78,20 @@ export function RunDetailHeader({
|
||||
href={openInBuilderHref}
|
||||
target="_blank"
|
||||
>
|
||||
<ArrowSquareOutIcon size={16} /> Edit run
|
||||
<ArrowSquareOutIcon size={16} /> Open in builder
|
||||
</Button>
|
||||
) : null}
|
||||
{canStop ? (
|
||||
<Button
|
||||
variant="destructive"
|
||||
size="small"
|
||||
onClick={handleStopRun}
|
||||
onClick={stopRun}
|
||||
disabled={isStopping}
|
||||
>
|
||||
<StopIcon size={14} /> Stop agent
|
||||
<StopIcon size={14} /> Stop run
|
||||
</Button>
|
||||
) : null}
|
||||
<AgentActionsDropdown agent={agent} />
|
||||
<AgentActions agent={agent} />
|
||||
</div>
|
||||
) : null}
|
||||
</div>
|
||||
@@ -164,40 +145,6 @@ export function RunDetailHeader({
|
||||
) : null}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<Dialog
|
||||
controlled={{
|
||||
isOpen: showDeleteDialog,
|
||||
set: handleShowDeleteDialog,
|
||||
}}
|
||||
styling={{ maxWidth: "32rem" }}
|
||||
title="Delete run"
|
||||
>
|
||||
<Dialog.Content>
|
||||
<div>
|
||||
<Text variant="large">
|
||||
Are you sure you want to delete this run? This action cannot be
|
||||
undone.
|
||||
</Text>
|
||||
<Dialog.Footer>
|
||||
<Button
|
||||
variant="secondary"
|
||||
disabled={isDeleting}
|
||||
onClick={() => handleShowDeleteDialog(false)}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
variant="destructive"
|
||||
onClick={handleDeleteRun}
|
||||
loading={isDeleting}
|
||||
>
|
||||
Delete
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</div>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -0,0 +1,111 @@
|
||||
"use client";
|
||||
|
||||
import React, { useCallback } from "react";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import {
|
||||
DropdownMenu,
|
||||
DropdownMenuContent,
|
||||
DropdownMenuItem,
|
||||
DropdownMenuTrigger,
|
||||
} from "@/components/molecules/DropdownMenu/DropdownMenu";
|
||||
import Link from "next/link";
|
||||
import {
|
||||
FileArrowDownIcon,
|
||||
PencilSimpleIcon,
|
||||
TrashIcon,
|
||||
} from "@phosphor-icons/react";
|
||||
import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { useDeleteV2DeleteLibraryAgent } from "@/app/api/__generated__/endpoints/library/library";
|
||||
import { getV1GetGraphVersion } from "@/app/api/__generated__/endpoints/graphs/graphs";
|
||||
import { exportAsJSONFile } from "@/lib/utils";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
|
||||
interface AgentActionsProps {
|
||||
agent: LibraryAgent;
|
||||
}
|
||||
|
||||
export function AgentActions({ agent }: AgentActionsProps) {
|
||||
const router = useRouter();
|
||||
const { toast } = useToast();
|
||||
const deleteMutation = useDeleteV2DeleteLibraryAgent();
|
||||
|
||||
const handleExport = useCallback(async () => {
|
||||
try {
|
||||
const res = await getV1GetGraphVersion(
|
||||
agent.graph_id,
|
||||
agent.graph_version,
|
||||
{ for_export: true },
|
||||
);
|
||||
if (res.status === 200) {
|
||||
const filename = `${agent.name}_v${agent.graph_version}.json`;
|
||||
exportAsJSONFile(res.data as any, filename);
|
||||
toast({ title: "Agent exported" });
|
||||
} else {
|
||||
toast({ title: "Failed to export agent", variant: "destructive" });
|
||||
}
|
||||
} catch (e: any) {
|
||||
toast({
|
||||
title: "Failed to export agent",
|
||||
description: e?.message,
|
||||
variant: "destructive",
|
||||
});
|
||||
}
|
||||
}, [agent.graph_id, agent.graph_version, agent.name, toast]);
|
||||
|
||||
const handleDelete = useCallback(() => {
|
||||
if (!agent?.id) return;
|
||||
const confirmed = window.confirm(
|
||||
"Are you sure you want to delete this agent? This action cannot be undone.",
|
||||
);
|
||||
if (!confirmed) return;
|
||||
deleteMutation.mutate(
|
||||
{ libraryAgentId: agent.id },
|
||||
{
|
||||
onSuccess: () => {
|
||||
toast({ title: "Agent deleted" });
|
||||
router.push("/library");
|
||||
},
|
||||
onError: (error: any) =>
|
||||
toast({
|
||||
title: "Failed to delete agent",
|
||||
description: error?.message,
|
||||
variant: "destructive",
|
||||
}),
|
||||
},
|
||||
);
|
||||
}, [agent.id, deleteMutation, router, toast]);
|
||||
|
||||
return (
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger asChild>
|
||||
<Button variant="secondary" size="small">
|
||||
•••
|
||||
</Button>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent align="end">
|
||||
<DropdownMenuItem asChild>
|
||||
<Link
|
||||
href={`/build?flowID=${agent.graph_id}&flowVersion=${agent.graph_version}`}
|
||||
target="_blank"
|
||||
className="flex items-center gap-2"
|
||||
>
|
||||
<PencilSimpleIcon size={16} /> Edit agent
|
||||
</Link>
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem
|
||||
onClick={handleExport}
|
||||
className="flex items-center gap-2"
|
||||
>
|
||||
<FileArrowDownIcon size={16} /> Export agent to file
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem
|
||||
onClick={handleDelete}
|
||||
className="flex items-center gap-2"
|
||||
>
|
||||
<TrashIcon size={16} /> Delete agent
|
||||
</DropdownMenuItem>
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
);
|
||||
}
|
||||
@@ -9,7 +9,6 @@ import {
|
||||
import { useDeleteV1DeleteGraphExecution } from "@/app/api/__generated__/endpoints/graphs/graphs";
|
||||
import { usePostV1ExecuteGraphAgent } from "@/app/api/__generated__/endpoints/graphs/graphs";
|
||||
import type { GraphExecution } from "@/app/api/__generated__/models/graphExecution";
|
||||
import { useState } from "react";
|
||||
|
||||
export function useRunDetailHeader(
|
||||
agentGraphId: string,
|
||||
@@ -20,117 +19,92 @@ export function useRunDetailHeader(
|
||||
const queryClient = useQueryClient();
|
||||
const { toast } = useToast();
|
||||
|
||||
const [showDeleteDialog, setShowDeleteDialog] = useState(false);
|
||||
const stopMutation = usePostV1StopGraphExecution({
|
||||
mutation: {
|
||||
onSuccess: () => {
|
||||
toast({ title: "Run stopped" });
|
||||
queryClient.invalidateQueries({
|
||||
queryKey:
|
||||
getGetV1ListGraphExecutionsInfiniteQueryOptions(agentGraphId)
|
||||
.queryKey,
|
||||
});
|
||||
},
|
||||
onError: (error: any) => {
|
||||
toast({
|
||||
title: "Failed to stop run",
|
||||
description: error?.message || "An unexpected error occurred.",
|
||||
variant: "destructive",
|
||||
});
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
function stopRun() {
|
||||
if (!run) return;
|
||||
stopMutation.mutate({ graphId: run.graph_id, graphExecId: run.id });
|
||||
}
|
||||
|
||||
const canStop = run?.status === "RUNNING" || run?.status === "QUEUED";
|
||||
|
||||
const { mutateAsync: stopRun, isPending: isStopping } =
|
||||
usePostV1StopGraphExecution();
|
||||
// Delete run
|
||||
const deleteMutation = useDeleteV1DeleteGraphExecution({
|
||||
mutation: {
|
||||
onSuccess: () => {
|
||||
toast({ title: "Run deleted" });
|
||||
queryClient.invalidateQueries({
|
||||
queryKey:
|
||||
getGetV1ListGraphExecutionsInfiniteQueryOptions(agentGraphId)
|
||||
.queryKey,
|
||||
});
|
||||
if (onClearSelectedRun) onClearSelectedRun();
|
||||
},
|
||||
onError: (error: any) =>
|
||||
toast({
|
||||
title: "Failed to delete run",
|
||||
description: error?.message || "An unexpected error occurred.",
|
||||
variant: "destructive",
|
||||
}),
|
||||
},
|
||||
});
|
||||
|
||||
const { mutateAsync: deleteRun, isPending: isDeleting } =
|
||||
useDeleteV1DeleteGraphExecution();
|
||||
|
||||
const { mutateAsync: executeRun, isPending: isRunningAgain } =
|
||||
usePostV1ExecuteGraphAgent();
|
||||
|
||||
async function handleDeleteRun() {
|
||||
try {
|
||||
await deleteRun({ graphExecId: run?.id ?? "" });
|
||||
|
||||
toast({ title: "Run deleted" });
|
||||
|
||||
await queryClient.refetchQueries({
|
||||
queryKey:
|
||||
getGetV1ListGraphExecutionsInfiniteQueryOptions(agentGraphId)
|
||||
.queryKey,
|
||||
});
|
||||
|
||||
if (onClearSelectedRun) onClearSelectedRun();
|
||||
|
||||
setShowDeleteDialog(false);
|
||||
} catch (error: unknown) {
|
||||
toast({
|
||||
title: "Failed to delete run",
|
||||
description:
|
||||
error instanceof Error
|
||||
? error.message
|
||||
: "An unexpected error occurred.",
|
||||
variant: "destructive",
|
||||
});
|
||||
}
|
||||
function deleteRun() {
|
||||
if (!run) return;
|
||||
deleteMutation.mutate({ graphExecId: run.id });
|
||||
}
|
||||
|
||||
async function handleStopRun() {
|
||||
try {
|
||||
await stopRun({
|
||||
graphId: run?.graph_id ?? "",
|
||||
graphExecId: run?.id ?? "",
|
||||
});
|
||||
// Run again (execute agent with previous inputs/credentials)
|
||||
const executeMutation = usePostV1ExecuteGraphAgent({
|
||||
mutation: {
|
||||
onSuccess: async (res) => {
|
||||
toast({ title: "Run started" });
|
||||
const newRunId = res?.status === 200 ? (res?.data?.id ?? "") : "";
|
||||
|
||||
toast({ title: "Run stopped" });
|
||||
await queryClient.invalidateQueries({
|
||||
queryKey:
|
||||
getGetV1ListGraphExecutionsInfiniteQueryOptions(agentGraphId)
|
||||
.queryKey,
|
||||
});
|
||||
if (newRunId && onSelectRun) onSelectRun(newRunId);
|
||||
},
|
||||
onError: (error: any) =>
|
||||
toast({
|
||||
title: "Failed to start run",
|
||||
description: error?.message || "An unexpected error occurred.",
|
||||
variant: "destructive",
|
||||
}),
|
||||
},
|
||||
});
|
||||
|
||||
await queryClient.invalidateQueries({
|
||||
queryKey:
|
||||
getGetV1ListGraphExecutionsInfiniteQueryOptions(agentGraphId)
|
||||
.queryKey,
|
||||
});
|
||||
} catch (error: unknown) {
|
||||
toast({
|
||||
title: "Failed to stop run",
|
||||
description:
|
||||
error instanceof Error
|
||||
? error.message
|
||||
: "An unexpected error occurred.",
|
||||
variant: "destructive",
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
async function handleRunAgain() {
|
||||
if (!run) {
|
||||
toast({
|
||||
title: "Run not found",
|
||||
description: "Run not found",
|
||||
variant: "destructive",
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
toast({ title: "Run started" });
|
||||
|
||||
const res = await executeRun({
|
||||
graphId: run.graph_id,
|
||||
graphVersion: run.graph_version,
|
||||
data: {
|
||||
inputs: (run as any).inputs || {},
|
||||
credentials_inputs: (run as any).credential_inputs || {},
|
||||
},
|
||||
});
|
||||
|
||||
const newRunId = res?.status === 200 ? (res?.data?.id ?? "") : "";
|
||||
|
||||
await queryClient.invalidateQueries({
|
||||
queryKey:
|
||||
getGetV1ListGraphExecutionsInfiniteQueryOptions(agentGraphId)
|
||||
.queryKey,
|
||||
});
|
||||
|
||||
if (newRunId && onSelectRun) onSelectRun(newRunId);
|
||||
} catch (error: unknown) {
|
||||
toast({
|
||||
title: "Failed to start run",
|
||||
description:
|
||||
error instanceof Error
|
||||
? error.message
|
||||
: "An unexpected error occurred.",
|
||||
variant: "destructive",
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function handleShowDeleteDialog(open: boolean) {
|
||||
setShowDeleteDialog(open);
|
||||
function runAgain() {
|
||||
if (!run) return;
|
||||
executeMutation.mutate({
|
||||
graphId: run.graph_id,
|
||||
graphVersion: run.graph_version,
|
||||
data: {
|
||||
inputs: (run as any).inputs || {},
|
||||
credentials_inputs: (run as any).credential_inputs || {},
|
||||
},
|
||||
} as any);
|
||||
}
|
||||
|
||||
// Open in builder URL helper
|
||||
@@ -139,16 +113,13 @@ export function useRunDetailHeader(
|
||||
: undefined;
|
||||
|
||||
return {
|
||||
openInBuilderHref,
|
||||
showDeleteDialog,
|
||||
stopRun,
|
||||
canStop,
|
||||
isStopping,
|
||||
isDeleting,
|
||||
isRunning: run?.status === "RUNNING",
|
||||
isRunningAgain,
|
||||
handleShowDeleteDialog,
|
||||
handleDeleteRun,
|
||||
handleStopRun,
|
||||
handleRunAgain,
|
||||
isStopping: stopMutation.isPending,
|
||||
deleteRun,
|
||||
isDeleting: deleteMutation.isPending,
|
||||
runAgain,
|
||||
isRunningAgain: executeMutation.isPending,
|
||||
openInBuilderHref,
|
||||
} as const;
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import {
|
||||
TabsLineList,
|
||||
TabsLineTrigger,
|
||||
} from "@/components/molecules/TabsLine/TabsLine";
|
||||
import { useSelectedRunView } from "./useSelectedRunView";
|
||||
import { useRunDetails } from "./useRunDetails";
|
||||
import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
|
||||
import { RunDetailHeader } from "../RunDetailHeader/RunDetailHeader";
|
||||
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
|
||||
@@ -16,20 +16,20 @@ import { AgentInputsReadOnly } from "../AgentInputsReadOnly/AgentInputsReadOnly"
|
||||
import { RunDetailCard } from "../RunDetailCard/RunDetailCard";
|
||||
import { RunOutputs } from "./components/RunOutputs";
|
||||
|
||||
interface Props {
|
||||
interface RunDetailsProps {
|
||||
agent: LibraryAgent;
|
||||
runId: string;
|
||||
onSelectRun?: (id: string) => void;
|
||||
onClearSelectedRun?: () => void;
|
||||
}
|
||||
|
||||
export function SelectedRunView({
|
||||
export function RunDetails({
|
||||
agent,
|
||||
runId,
|
||||
onSelectRun,
|
||||
onClearSelectedRun,
|
||||
}: Props) {
|
||||
const { run, isLoading, responseError, httpError } = useSelectedRunView(
|
||||
}: RunDetailsProps) {
|
||||
const { run, isLoading, responseError, httpError } = useRunDetails(
|
||||
agent.graph_id,
|
||||
runId,
|
||||
);
|
||||
@@ -85,11 +85,7 @@ export function SelectedRunView({
|
||||
|
||||
<TabsLineContent value="input">
|
||||
<RunDetailCard>
|
||||
<AgentInputsReadOnly
|
||||
agent={agent}
|
||||
inputs={(run as any)?.inputs}
|
||||
credentialInputs={(run as any)?.credential_inputs}
|
||||
/>
|
||||
<AgentInputsReadOnly agent={agent} inputs={(run as any)?.inputs} />
|
||||
</RunDetailCard>
|
||||
</TabsLineContent>
|
||||
</TabsLine>
|
||||
@@ -4,7 +4,7 @@ import { useGetV1GetExecutionDetails } from "@/app/api/__generated__/endpoints/g
|
||||
import type { GetV1GetExecutionDetails200 } from "@/app/api/__generated__/models/getV1GetExecutionDetails200";
|
||||
import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus";
|
||||
|
||||
export function useSelectedRunView(graphId: string, runId: string) {
|
||||
export function useRunDetails(graphId: string, runId: string) {
|
||||
const query = useGetV1GetExecutionDetails(graphId, runId, {
|
||||
query: {
|
||||
refetchInterval: (q) => {
|
||||
@@ -1,12 +1,15 @@
|
||||
"use client";
|
||||
|
||||
import React from "react";
|
||||
import React, { useEffect } from "react";
|
||||
import {
|
||||
TabsLine,
|
||||
TabsLineList,
|
||||
TabsLineTrigger,
|
||||
TabsLineContent,
|
||||
} from "@/components/molecules/TabsLine/TabsLine";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { PlusIcon } from "@phosphor-icons/react/dist/ssr";
|
||||
import { RunAgentModal } from "../RunAgentModal/RunAgentModal";
|
||||
import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
|
||||
import { useRunsSidebar } from "./useRunsSidebar";
|
||||
import { RunListItem } from "./components/RunListItem";
|
||||
@@ -23,7 +26,6 @@ interface RunsSidebarProps {
|
||||
onCountsChange?: (info: {
|
||||
runsCount: number;
|
||||
schedulesCount: number;
|
||||
loading?: boolean;
|
||||
}) => void;
|
||||
}
|
||||
|
||||
@@ -45,11 +47,11 @@ export function RunsSidebar({
|
||||
isFetchingMoreRuns,
|
||||
tabValue,
|
||||
setTabValue,
|
||||
} = useRunsSidebar({
|
||||
graphId: agent.graph_id,
|
||||
onSelectRun,
|
||||
onCountsChange,
|
||||
});
|
||||
} = useRunsSidebar({ graphId: agent.graph_id, onSelectRun });
|
||||
|
||||
useEffect(() => {
|
||||
if (onCountsChange) onCountsChange({ runsCount, schedulesCount });
|
||||
}, [runsCount, schedulesCount, onCountsChange]);
|
||||
|
||||
if (error) {
|
||||
return <ErrorCard responseError={error} />;
|
||||
@@ -57,7 +59,7 @@ export function RunsSidebar({
|
||||
|
||||
if (loading) {
|
||||
return (
|
||||
<div className="ml-6 w-[20vw] space-y-4">
|
||||
<div className="ml-6 w-80 space-y-4">
|
||||
<Skeleton className="h-12 w-full" />
|
||||
<Skeleton className="h-32 w-full" />
|
||||
<Skeleton className="h-24 w-full" />
|
||||
@@ -66,64 +68,77 @@ export function RunsSidebar({
|
||||
}
|
||||
|
||||
return (
|
||||
<TabsLine
|
||||
value={tabValue}
|
||||
onValueChange={(v) => {
|
||||
const value = v as "runs" | "scheduled";
|
||||
setTabValue(value);
|
||||
if (value === "runs") {
|
||||
if (runs && runs.length) onSelectRun(runs[0].id);
|
||||
} else {
|
||||
if (schedules && schedules.length)
|
||||
onSelectRun(`schedule:${schedules[0].id}`);
|
||||
<div className="min-w-0 bg-gray-50 p-4 pl-5">
|
||||
<RunAgentModal
|
||||
triggerSlot={
|
||||
<Button variant="primary" size="large" className="w-full">
|
||||
<PlusIcon size={20} /> New Run
|
||||
</Button>
|
||||
}
|
||||
}}
|
||||
className="min-w-0 overflow-hidden"
|
||||
>
|
||||
<TabsLineList>
|
||||
<TabsLineTrigger value="runs">
|
||||
Runs <span className="ml-3 inline-block">{runsCount}</span>
|
||||
</TabsLineTrigger>
|
||||
<TabsLineTrigger value="scheduled">
|
||||
Scheduled <span className="ml-3 inline-block">{schedulesCount}</span>
|
||||
</TabsLineTrigger>
|
||||
</TabsLineList>
|
||||
agent={agent}
|
||||
agentId={agent.id.toString()}
|
||||
/>
|
||||
|
||||
<>
|
||||
<TabsLineContent value="runs">
|
||||
<InfiniteList
|
||||
items={runs}
|
||||
hasMore={!!hasMoreRuns}
|
||||
isFetchingMore={isFetchingMoreRuns}
|
||||
onEndReached={fetchMoreRuns}
|
||||
className="flex flex-nowrap items-center justify-start gap-4 overflow-x-scroll px-1 pb-4 pt-1 lg:flex-col lg:gap-3 lg:overflow-x-hidden"
|
||||
itemWrapperClassName="w-auto lg:w-full"
|
||||
renderItem={(run) => (
|
||||
<div className="w-[15rem] lg:w-full">
|
||||
<RunListItem
|
||||
run={run}
|
||||
title={agent.name}
|
||||
selected={selectedRunId === run.id}
|
||||
onClick={() => onSelectRun && onSelectRun(run.id)}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
/>
|
||||
</TabsLineContent>
|
||||
<TabsLineContent value="scheduled">
|
||||
<div className="flex flex-nowrap items-center justify-start gap-4 overflow-x-scroll px-1 pb-4 pt-1 lg:flex-col lg:gap-3 lg:overflow-x-hidden">
|
||||
{schedules.map((s: GraphExecutionJobInfo) => (
|
||||
<div className="w-[15rem] lg:w-full" key={s.id}>
|
||||
<ScheduleListItem
|
||||
schedule={s}
|
||||
selected={selectedRunId === `schedule:${s.id}`}
|
||||
onClick={() => onSelectRun(`schedule:${s.id}`)}
|
||||
/>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</TabsLineContent>
|
||||
</>
|
||||
</TabsLine>
|
||||
<TabsLine
|
||||
value={tabValue}
|
||||
onValueChange={(v) => {
|
||||
const value = v as "runs" | "scheduled";
|
||||
setTabValue(value);
|
||||
if (value === "runs") {
|
||||
if (runs && runs.length) onSelectRun(runs[0].id);
|
||||
} else {
|
||||
if (schedules && schedules.length)
|
||||
onSelectRun(`schedule:${schedules[0].id}`);
|
||||
}
|
||||
}}
|
||||
className="mt-6 min-w-0 overflow-hidden"
|
||||
>
|
||||
<TabsLineList>
|
||||
<TabsLineTrigger value="runs">
|
||||
Runs <span className="ml-3 inline-block">{runsCount}</span>
|
||||
</TabsLineTrigger>
|
||||
<TabsLineTrigger value="scheduled">
|
||||
Scheduled{" "}
|
||||
<span className="ml-3 inline-block">{schedulesCount}</span>
|
||||
</TabsLineTrigger>
|
||||
</TabsLineList>
|
||||
|
||||
<>
|
||||
<TabsLineContent value="runs">
|
||||
<InfiniteList
|
||||
items={runs}
|
||||
hasMore={!!hasMoreRuns}
|
||||
isFetchingMore={isFetchingMoreRuns}
|
||||
onEndReached={fetchMoreRuns}
|
||||
className="flex flex-nowrap items-center justify-start gap-4 overflow-x-scroll px-1 pb-4 pt-1 lg:flex-col lg:gap-1 lg:overflow-x-hidden"
|
||||
itemWrapperClassName="w-auto lg:w-full"
|
||||
renderItem={(run) => (
|
||||
<div className="mb-3 w-[15rem] lg:w-full">
|
||||
<RunListItem
|
||||
run={run}
|
||||
title={agent.name}
|
||||
selected={selectedRunId === run.id}
|
||||
onClick={() => onSelectRun && onSelectRun(run.id)}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
/>
|
||||
</TabsLineContent>
|
||||
<TabsLineContent value="scheduled">
|
||||
<div className="flex flex-nowrap items-center justify-start gap-4 overflow-x-scroll px-1 pb-4 pt-1 lg:flex-col lg:gap-1 lg:overflow-x-hidden">
|
||||
{schedules.map((s: GraphExecutionJobInfo) => (
|
||||
<div className="mb-3 w-[15rem] lg:w-full" key={s.id}>
|
||||
<ScheduleListItem
|
||||
schedule={s}
|
||||
selected={selectedRunId === `schedule:${s.id}`}
|
||||
onClick={() => onSelectRun(`schedule:${s.id}`)}
|
||||
/>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</TabsLineContent>
|
||||
</>
|
||||
</TabsLine>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ export function RunSidebarCard({
|
||||
<button
|
||||
className={cn(
|
||||
"w-full rounded-large border border-slate-50/70 bg-white p-3 text-left transition-all duration-150 hover:scale-[1.01] hover:bg-slate-50/50",
|
||||
selected ? "large ring-2 ring-slate-800" : undefined,
|
||||
selected ? "ring-2 ring-slate-800" : undefined,
|
||||
)}
|
||||
onClick={onClick}
|
||||
>
|
||||
|
||||
@@ -1,66 +0,0 @@
|
||||
import type { GraphExecutionsPaginated } from "@/app/api/__generated__/models/graphExecutionsPaginated";
|
||||
import type { InfiniteData } from "@tanstack/react-query";
|
||||
|
||||
const AGENT_RUNNING_POLL_INTERVAL = 1500;
|
||||
|
||||
function hasValidExecutionsData(
|
||||
page: unknown,
|
||||
): page is { data: GraphExecutionsPaginated } {
|
||||
return (
|
||||
typeof page === "object" &&
|
||||
page !== null &&
|
||||
"data" in page &&
|
||||
typeof (page as { data: unknown }).data === "object" &&
|
||||
(page as { data: unknown }).data !== null &&
|
||||
"executions" in (page as { data: GraphExecutionsPaginated }).data
|
||||
);
|
||||
}
|
||||
|
||||
export function getRunsPollingInterval(
|
||||
pages: Array<unknown> | undefined,
|
||||
isRunsTab: boolean,
|
||||
): number | false {
|
||||
if (!isRunsTab || !pages?.length) return false;
|
||||
|
||||
try {
|
||||
const executions = pages.flatMap((page) => {
|
||||
if (!hasValidExecutionsData(page)) return [];
|
||||
return page.data.executions || [];
|
||||
});
|
||||
const hasActive = executions.some(
|
||||
(e) => e.status === "RUNNING" || e.status === "QUEUED",
|
||||
);
|
||||
return hasActive ? AGENT_RUNNING_POLL_INTERVAL : false;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
export function computeRunsCount(
|
||||
infiniteData: InfiniteData<unknown> | undefined,
|
||||
runsLength: number,
|
||||
): number {
|
||||
const lastPage = infiniteData?.pages.at(-1);
|
||||
if (!hasValidExecutionsData(lastPage)) return runsLength;
|
||||
return lastPage.data.pagination?.total_items || runsLength;
|
||||
}
|
||||
|
||||
export function getNextRunsPageParam(lastPage: unknown): number | undefined {
|
||||
if (!hasValidExecutionsData(lastPage)) return undefined;
|
||||
|
||||
const { pagination } = lastPage.data;
|
||||
const hasMore =
|
||||
pagination.current_page * pagination.page_size < pagination.total_items;
|
||||
return hasMore ? pagination.current_page + 1 : undefined;
|
||||
}
|
||||
|
||||
export function extractRunsFromPages(
|
||||
infiniteData: InfiniteData<unknown> | undefined,
|
||||
) {
|
||||
return (
|
||||
infiniteData?.pages.flatMap((page) => {
|
||||
if (!hasValidExecutionsData(page)) return [];
|
||||
return page.data.executions || [];
|
||||
}) || []
|
||||
);
|
||||
}
|
||||
@@ -4,27 +4,18 @@ import { useEffect, useMemo, useState } from "react";
|
||||
|
||||
import { useGetV1ListGraphExecutionsInfinite } from "@/app/api/__generated__/endpoints/graphs/graphs";
|
||||
import { useGetV1ListExecutionSchedulesForAGraph } from "@/app/api/__generated__/endpoints/schedules/schedules";
|
||||
import { GraphExecutionsPaginated } from "@/app/api/__generated__/models/graphExecutionsPaginated";
|
||||
import type { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo";
|
||||
import { useSearchParams } from "next/navigation";
|
||||
import { okData } from "@/app/api/helpers";
|
||||
import {
|
||||
getRunsPollingInterval,
|
||||
computeRunsCount,
|
||||
getNextRunsPageParam,
|
||||
extractRunsFromPages,
|
||||
} from "./helpers";
|
||||
|
||||
const AGENT_RUNNING_POLL_INTERVAL = 1500;
|
||||
|
||||
type Args = {
|
||||
graphId?: string;
|
||||
onSelectRun: (runId: string) => void;
|
||||
onCountsChange?: (info: {
|
||||
runsCount: number;
|
||||
schedulesCount: number;
|
||||
loading?: boolean;
|
||||
}) => void;
|
||||
};
|
||||
|
||||
export function useRunsSidebar({ graphId, onSelectRun, onCountsChange }: Args) {
|
||||
export function useRunsSidebar({ graphId, onSelectRun }: Args) {
|
||||
const params = useSearchParams();
|
||||
const existingRunId = params.get("executionId") as string | undefined;
|
||||
const [tabValue, setTabValue] = useState<"runs" | "scheduled">("runs");
|
||||
@@ -35,11 +26,38 @@ export function useRunsSidebar({ graphId, onSelectRun, onCountsChange }: Args) {
|
||||
{
|
||||
query: {
|
||||
enabled: !!graphId,
|
||||
refetchInterval: (q) =>
|
||||
getRunsPollingInterval(q.state.data?.pages, tabValue === "runs"),
|
||||
// Lightweight polling so statuses refresh; only poll if any run is active
|
||||
refetchInterval: (q) => {
|
||||
if (tabValue !== "runs") return false;
|
||||
const pages = q.state.data?.pages as
|
||||
| Array<{ data: unknown }>
|
||||
| undefined;
|
||||
if (!pages || pages.length === 0) return false;
|
||||
try {
|
||||
const executions = pages.flatMap((p) => {
|
||||
const response = p.data as GraphExecutionsPaginated;
|
||||
return response.executions || [];
|
||||
});
|
||||
const hasActive = executions.some(
|
||||
(e: { status?: string }) =>
|
||||
e.status === "RUNNING" || e.status === "QUEUED",
|
||||
);
|
||||
return hasActive ? AGENT_RUNNING_POLL_INTERVAL : false;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
},
|
||||
refetchIntervalInBackground: true,
|
||||
refetchOnWindowFocus: false,
|
||||
getNextPageParam: getNextRunsPageParam,
|
||||
getNextPageParam: (lastPage) => {
|
||||
const pagination = (lastPage.data as GraphExecutionsPaginated)
|
||||
.pagination;
|
||||
const hasMore =
|
||||
pagination.current_page * pagination.page_size <
|
||||
pagination.total_items;
|
||||
|
||||
return hasMore ? pagination.current_page + 1 : undefined;
|
||||
},
|
||||
},
|
||||
},
|
||||
);
|
||||
@@ -47,31 +65,19 @@ export function useRunsSidebar({ graphId, onSelectRun, onCountsChange }: Args) {
|
||||
const schedulesQuery = useGetV1ListExecutionSchedulesForAGraph(
|
||||
graphId || "",
|
||||
{
|
||||
query: {
|
||||
enabled: !!graphId,
|
||||
select: (r) => okData<GraphExecutionJobInfo[]>(r) ?? [],
|
||||
},
|
||||
query: { enabled: !!graphId },
|
||||
},
|
||||
);
|
||||
|
||||
const runs = useMemo(
|
||||
() => extractRunsFromPages(runsQuery.data),
|
||||
() =>
|
||||
runsQuery.data?.pages.flatMap((p) => {
|
||||
const response = p.data as GraphExecutionsPaginated;
|
||||
return response.executions;
|
||||
}) || [],
|
||||
[runsQuery.data],
|
||||
);
|
||||
|
||||
const schedules = schedulesQuery.data || [];
|
||||
|
||||
const runsCount = computeRunsCount(runsQuery.data, runs.length);
|
||||
const schedulesCount = schedules.length;
|
||||
const loading = !schedulesQuery.isSuccess || !runsQuery.isSuccess;
|
||||
|
||||
// Notify parent about counts and loading state
|
||||
useEffect(() => {
|
||||
if (onCountsChange) {
|
||||
onCountsChange({ runsCount, schedulesCount, loading });
|
||||
}
|
||||
}, [runsCount, schedulesCount, loading, onCountsChange]);
|
||||
|
||||
useEffect(() => {
|
||||
if (runs.length > 0) {
|
||||
if (existingRunId) {
|
||||
@@ -88,6 +94,9 @@ export function useRunsSidebar({ graphId, onSelectRun, onCountsChange }: Args) {
|
||||
else setTabValue("runs");
|
||||
}, [existingRunId]);
|
||||
|
||||
const schedules: GraphExecutionJobInfo[] =
|
||||
schedulesQuery.data?.status === 200 ? schedulesQuery.data.data : [];
|
||||
|
||||
// If there are no runs but there are schedules, and nothing is selected, auto-select the first schedule
|
||||
useEffect(() => {
|
||||
if (!existingRunId && runs.length === 0 && schedules.length > 0)
|
||||
@@ -98,12 +107,17 @@ export function useRunsSidebar({ graphId, onSelectRun, onCountsChange }: Args) {
|
||||
runs,
|
||||
schedules,
|
||||
error: schedulesQuery.error || runsQuery.error,
|
||||
loading,
|
||||
loading: !schedulesQuery.isSuccess || !runsQuery.isSuccess,
|
||||
runsQuery,
|
||||
tabValue,
|
||||
setTabValue,
|
||||
runsCount,
|
||||
schedulesCount,
|
||||
runsCount:
|
||||
(
|
||||
runsQuery.data?.pages.at(-1)?.data as
|
||||
| GraphExecutionsPaginated
|
||||
| undefined
|
||||
)?.pagination.total_items || runs.length,
|
||||
schedulesCount: schedules.length,
|
||||
fetchMoreRuns: runsQuery.fetchNextPage,
|
||||
hasMoreRuns: runsQuery.hasNextPage,
|
||||
isFetchingMoreRuns: runsQuery.isFetchingNextPage,
|
||||
|
||||
@@ -1,110 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { useState } from "react";
|
||||
import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
|
||||
import { ModalScheduleSection } from "./components/ModalScheduleSection/ModalScheduleSection";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { useScheduleAgentModal } from "./useScheduleAgentModal";
|
||||
import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo";
|
||||
|
||||
interface Props {
|
||||
isOpen: boolean;
|
||||
onClose: () => void;
|
||||
agent: LibraryAgent;
|
||||
inputValues: Record<string, any>;
|
||||
inputCredentials: Record<string, any>;
|
||||
onScheduleCreated?: (schedule: GraphExecutionJobInfo) => void;
|
||||
}
|
||||
|
||||
export function ScheduleAgentModal({
|
||||
isOpen,
|
||||
onClose,
|
||||
agent,
|
||||
inputValues,
|
||||
inputCredentials,
|
||||
onScheduleCreated,
|
||||
}: Props) {
|
||||
const [isScheduleFormValid, setIsScheduleFormValid] = useState(true);
|
||||
|
||||
const {
|
||||
scheduleName,
|
||||
cronExpression,
|
||||
isCreatingSchedule,
|
||||
handleSchedule,
|
||||
handleSetScheduleName,
|
||||
handleSetCronExpression,
|
||||
resetForm,
|
||||
} = useScheduleAgentModal(agent, inputValues, inputCredentials, {
|
||||
onCreateSchedule: (schedule) => {
|
||||
onScheduleCreated?.(schedule);
|
||||
},
|
||||
onClose: onClose,
|
||||
});
|
||||
|
||||
function handleClose() {
|
||||
resetForm();
|
||||
setIsScheduleFormValid(true);
|
||||
onClose();
|
||||
}
|
||||
|
||||
async function handleScheduleClick() {
|
||||
if (!scheduleName.trim() || !isScheduleFormValid) return;
|
||||
|
||||
try {
|
||||
await handleSchedule(scheduleName, cronExpression);
|
||||
} catch (error) {
|
||||
// Error handling is done in the hook
|
||||
console.error("Failed to create schedule:", error);
|
||||
}
|
||||
}
|
||||
|
||||
const canSchedule = scheduleName.trim() && isScheduleFormValid;
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
controlled={{ isOpen, set: handleClose }}
|
||||
styling={{ maxWidth: "600px", maxHeight: "90vh" }}
|
||||
>
|
||||
<Dialog.Content>
|
||||
<div className="flex h-full flex-col">
|
||||
<Text variant="lead" as="h2" className="!font-medium !text-black">
|
||||
Schedule run
|
||||
</Text>
|
||||
|
||||
{/* Content */}
|
||||
<div className="overflow-y-auto">
|
||||
<ModalScheduleSection
|
||||
scheduleName={scheduleName}
|
||||
cronExpression={cronExpression}
|
||||
recommendedScheduleCron={agent.recommended_schedule_cron}
|
||||
onScheduleNameChange={handleSetScheduleName}
|
||||
onCronExpressionChange={handleSetCronExpression}
|
||||
onValidityChange={setIsScheduleFormValid}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Footer */}
|
||||
<div className="flex items-center justify-end gap-3 pt-6">
|
||||
<Button
|
||||
variant="secondary"
|
||||
onClick={handleClose}
|
||||
disabled={isCreatingSchedule}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
variant="primary"
|
||||
onClick={handleScheduleClick}
|
||||
loading={isCreatingSchedule}
|
||||
disabled={!canSchedule}
|
||||
>
|
||||
Schedule
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -1,128 +0,0 @@
|
||||
import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
|
||||
import { useState, useCallback, useMemo } from "react";
|
||||
import { useQueryClient } from "@tanstack/react-query";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import {
|
||||
usePostV1CreateExecutionSchedule as useCreateSchedule,
|
||||
getGetV1ListExecutionSchedulesForAGraphQueryKey,
|
||||
} from "@/app/api/__generated__/endpoints/schedules/schedules";
|
||||
import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo";
|
||||
|
||||
interface UseScheduleAgentModalCallbacks {
|
||||
onCreateSchedule?: (schedule: GraphExecutionJobInfo) => void;
|
||||
onClose?: () => void;
|
||||
}
|
||||
|
||||
export function useScheduleAgentModal(
|
||||
agent: LibraryAgent,
|
||||
inputValues: Record<string, any>,
|
||||
inputCredentials: Record<string, any>,
|
||||
callbacks?: UseScheduleAgentModalCallbacks,
|
||||
) {
|
||||
const { toast } = useToast();
|
||||
const queryClient = useQueryClient();
|
||||
|
||||
const defaultScheduleName = useMemo(() => `Run ${agent.name}`, [agent.name]);
|
||||
const [scheduleName, setScheduleName] = useState(defaultScheduleName);
|
||||
const [cronExpression, setCronExpression] = useState(
|
||||
agent.recommended_schedule_cron || "0 9 * * 1",
|
||||
);
|
||||
|
||||
const createScheduleMutation = useCreateSchedule({
|
||||
mutation: {
|
||||
onSuccess: (response) => {
|
||||
if (response.status === 200) {
|
||||
toast({
|
||||
title: "Schedule created",
|
||||
});
|
||||
callbacks?.onCreateSchedule?.(response.data);
|
||||
// Invalidate schedules list for this graph
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: getGetV1ListExecutionSchedulesForAGraphQueryKey(
|
||||
agent.graph_id,
|
||||
),
|
||||
});
|
||||
// Reset form
|
||||
setScheduleName(defaultScheduleName);
|
||||
setCronExpression(agent.recommended_schedule_cron || "0 9 * * 1");
|
||||
callbacks?.onClose?.();
|
||||
}
|
||||
},
|
||||
onError: (error: any) => {
|
||||
toast({
|
||||
title: "❌ Failed to create schedule",
|
||||
description: error.message || "An unexpected error occurred.",
|
||||
variant: "destructive",
|
||||
});
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const handleSchedule = useCallback(
|
||||
(scheduleName: string, cronExpression: string) => {
|
||||
if (!scheduleName.trim()) {
|
||||
toast({
|
||||
title: "⚠️ Schedule name required",
|
||||
description: "Please provide a name for your schedule.",
|
||||
variant: "destructive",
|
||||
});
|
||||
return Promise.reject(new Error("Schedule name required"));
|
||||
}
|
||||
|
||||
return new Promise<void>((resolve, reject) => {
|
||||
createScheduleMutation.mutate(
|
||||
{
|
||||
graphId: agent.graph_id,
|
||||
data: {
|
||||
name: scheduleName,
|
||||
cron: cronExpression,
|
||||
inputs: inputValues,
|
||||
graph_version: agent.graph_version,
|
||||
credentials: inputCredentials,
|
||||
},
|
||||
},
|
||||
{
|
||||
onSuccess: () => resolve(),
|
||||
onError: (error) => reject(error),
|
||||
},
|
||||
);
|
||||
});
|
||||
},
|
||||
[
|
||||
agent.graph_id,
|
||||
agent.graph_version,
|
||||
inputValues,
|
||||
inputCredentials,
|
||||
createScheduleMutation,
|
||||
toast,
|
||||
],
|
||||
);
|
||||
|
||||
const handleSetScheduleName = useCallback((name: string) => {
|
||||
setScheduleName(name);
|
||||
}, []);
|
||||
|
||||
const handleSetCronExpression = useCallback((expression: string) => {
|
||||
setCronExpression(expression);
|
||||
}, []);
|
||||
|
||||
const resetForm = useCallback(() => {
|
||||
setScheduleName(defaultScheduleName);
|
||||
setCronExpression(agent.recommended_schedule_cron || "0 9 * * 1");
|
||||
}, [defaultScheduleName, agent.recommended_schedule_cron]);
|
||||
|
||||
return {
|
||||
// State
|
||||
scheduleName,
|
||||
cronExpression,
|
||||
|
||||
// Loading state
|
||||
isCreatingSchedule: createScheduleMutation.isPending,
|
||||
|
||||
// Actions
|
||||
handleSchedule,
|
||||
handleSetScheduleName,
|
||||
handleSetCronExpression,
|
||||
resetForm,
|
||||
};
|
||||
}
|
||||
@@ -10,28 +10,38 @@ import {
|
||||
TabsLineList,
|
||||
TabsLineTrigger,
|
||||
} from "@/components/molecules/TabsLine/TabsLine";
|
||||
import { useSelectedScheduleView } from "./useSelectedScheduleView";
|
||||
import { useScheduleDetails } from "./useScheduleDetails";
|
||||
import { RunDetailCard } from "../RunDetailCard/RunDetailCard";
|
||||
import { RunDetailHeader } from "../RunDetailHeader/RunDetailHeader";
|
||||
import {
|
||||
DropdownMenu,
|
||||
DropdownMenuContent,
|
||||
DropdownMenuItem,
|
||||
DropdownMenuTrigger,
|
||||
} from "@/components/molecules/DropdownMenu/DropdownMenu";
|
||||
import { PencilSimpleIcon, ArrowSquareOut } from "@phosphor-icons/react";
|
||||
import Link from "next/link";
|
||||
import { useScheduleDetailHeader } from "../RunDetailHeader/useScheduleDetailHeader";
|
||||
import { DeleteScheduleButton } from "./components/DeleteScheduleButton/DeleteScheduleButton";
|
||||
import { humanizeCronExpression } from "@/lib/cron-expression-utils";
|
||||
import { useGetV1GetUserTimezone } from "@/app/api/__generated__/endpoints/auth/auth";
|
||||
import { formatInTimezone, getTimezoneDisplayName } from "@/lib/timezone-utils";
|
||||
import { Skeleton } from "@/components/ui/skeleton";
|
||||
import { AgentInputsReadOnly } from "../AgentInputsReadOnly/AgentInputsReadOnly";
|
||||
import { ScheduleActions } from "./components/ScheduleActions";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
|
||||
interface Props {
|
||||
interface ScheduleDetailsProps {
|
||||
agent: LibraryAgent;
|
||||
scheduleId: string;
|
||||
onClearSelectedRun?: () => void;
|
||||
}
|
||||
|
||||
export function SelectedScheduleView({
|
||||
export function ScheduleDetails({
|
||||
agent,
|
||||
scheduleId,
|
||||
onClearSelectedRun,
|
||||
}: Props) {
|
||||
const { schedule, isLoading, error } = useSelectedScheduleView(
|
||||
}: ScheduleDetailsProps) {
|
||||
const { schedule, isLoading, error } = useScheduleDetails(
|
||||
agent.graph_id,
|
||||
scheduleId,
|
||||
);
|
||||
@@ -88,17 +98,20 @@ export function SelectedScheduleView({
|
||||
run={undefined}
|
||||
scheduleRecurrence={
|
||||
schedule
|
||||
? `${humanizeCronExpression(schedule.cron || "")} · ${getTimezoneDisplayName(schedule.timezone || userTzRes || "UTC")}`
|
||||
? `${humanizeCronExpression(schedule.cron || "", userTzRes)} · ${getTimezoneDisplayName(userTzRes || "UTC")}`
|
||||
: undefined
|
||||
}
|
||||
/>
|
||||
</div>
|
||||
{schedule ? (
|
||||
<ScheduleActions
|
||||
agent={agent}
|
||||
scheduleId={schedule.id}
|
||||
onDeleted={onClearSelectedRun}
|
||||
/>
|
||||
<div className="flex items-center gap-2">
|
||||
<DeleteScheduleButton
|
||||
agent={agent}
|
||||
scheduleId={schedule.id}
|
||||
onDeleted={onClearSelectedRun}
|
||||
/>
|
||||
<ScheduleActions agent={agent} scheduleId={schedule.id} />
|
||||
</div>
|
||||
) : null}
|
||||
</div>
|
||||
</div>
|
||||
@@ -119,7 +132,6 @@ export function SelectedScheduleView({
|
||||
<AgentInputsReadOnly
|
||||
agent={agent}
|
||||
inputs={schedule?.input_data}
|
||||
credentialInputs={schedule?.input_credentials}
|
||||
/>
|
||||
</div>
|
||||
</RunDetailCard>
|
||||
@@ -149,12 +161,10 @@ export function SelectedScheduleView({
|
||||
Recurrence
|
||||
</Text>
|
||||
<p className="text-sm text-zinc-600">
|
||||
{humanizeCronExpression(schedule.cron)}
|
||||
{humanizeCronExpression(schedule.cron, userTzRes)}
|
||||
{" • "}
|
||||
<span className="text-xs text-zinc-600">
|
||||
{getTimezoneDisplayName(
|
||||
schedule.timezone || userTzRes || "UTC",
|
||||
)}
|
||||
{getTimezoneDisplayName(userTzRes || "UTC")}
|
||||
</span>
|
||||
</p>
|
||||
</div>
|
||||
@@ -177,9 +187,7 @@ export function SelectedScheduleView({
|
||||
)}{" "}
|
||||
•{" "}
|
||||
<span className="text-xs text-zinc-600">
|
||||
{getTimezoneDisplayName(
|
||||
schedule.timezone || userTzRes || "UTC",
|
||||
)}
|
||||
{getTimezoneDisplayName(userTzRes || "UTC")}
|
||||
</span>
|
||||
</p>
|
||||
</div>
|
||||
@@ -191,3 +199,50 @@ export function SelectedScheduleView({
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function ScheduleActions({
|
||||
agent,
|
||||
scheduleId,
|
||||
}: {
|
||||
agent: LibraryAgent;
|
||||
scheduleId: string;
|
||||
}) {
|
||||
const { openInBuilderHref } = useScheduleDetailHeader(
|
||||
agent.graph_id,
|
||||
scheduleId,
|
||||
agent.graph_version,
|
||||
);
|
||||
return (
|
||||
<div className="flex items-center gap-2">
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger asChild>
|
||||
<Button variant="secondary" size="small">
|
||||
•••
|
||||
</Button>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent align="end">
|
||||
{openInBuilderHref ? (
|
||||
<DropdownMenuItem asChild>
|
||||
<Link
|
||||
href={openInBuilderHref}
|
||||
target="_blank"
|
||||
className="flex items-center gap-2"
|
||||
>
|
||||
<ArrowSquareOut size={14} /> Open in builder
|
||||
</Link>
|
||||
</DropdownMenuItem>
|
||||
) : null}
|
||||
<DropdownMenuItem asChild>
|
||||
<Link
|
||||
href={`/build?flowID=${agent.graph_id}&flowVersion=${agent.graph_version}`}
|
||||
target="_blank"
|
||||
className="flex items-center gap-2"
|
||||
>
|
||||
<PencilSimpleIcon size={16} /> Edit agent
|
||||
</Link>
|
||||
</DropdownMenuItem>
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user