mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-08 06:44:05 -05:00
Merge branch 'dev' into seer/xml-parsing-error-handling
This commit is contained in:
94
.github/copilot-instructions.md
vendored
94
.github/copilot-instructions.md
vendored
@@ -12,6 +12,7 @@ This file provides comprehensive onboarding information for GitHub Copilot codin
|
||||
- **Infrastructure** - Docker configurations, CI/CD, and development tools
|
||||
|
||||
**Primary Languages & Frameworks:**
|
||||
|
||||
- **Backend**: Python 3.10-3.13, FastAPI, Prisma ORM, PostgreSQL, RabbitMQ
|
||||
- **Frontend**: TypeScript, Next.js 15, React, Tailwind CSS, Radix UI
|
||||
- **Development**: Docker, Poetry, pnpm, Playwright, Storybook
|
||||
@@ -23,15 +24,17 @@ This file provides comprehensive onboarding information for GitHub Copilot codin
|
||||
**Always run these commands in the correct directory and in this order:**
|
||||
|
||||
1. **Initial Setup** (required once):
|
||||
|
||||
```bash
|
||||
# Clone and enter repository
|
||||
git clone <repo> && cd AutoGPT
|
||||
|
||||
|
||||
# Start all services (database, redis, rabbitmq, clamav)
|
||||
cd autogpt_platform && docker compose --profile local up deps --build --detach
|
||||
```
|
||||
|
||||
2. **Backend Setup** (always run before backend development):
|
||||
|
||||
```bash
|
||||
cd autogpt_platform/backend
|
||||
poetry install # Install dependencies
|
||||
@@ -48,6 +51,7 @@ This file provides comprehensive onboarding information for GitHub Copilot codin
|
||||
### Runtime Requirements
|
||||
|
||||
**Critical:** Always ensure Docker services are running before starting development:
|
||||
|
||||
```bash
|
||||
cd autogpt_platform && docker compose --profile local up deps --build --detach
|
||||
```
|
||||
@@ -58,6 +62,7 @@ cd autogpt_platform && docker compose --profile local up deps --build --detach
|
||||
### Development Commands
|
||||
|
||||
**Backend Development:**
|
||||
|
||||
```bash
|
||||
cd autogpt_platform/backend
|
||||
poetry run serve # Start development server (port 8000)
|
||||
@@ -68,6 +73,7 @@ poetry run lint # Lint code (ruff) - run after format
|
||||
```
|
||||
|
||||
**Frontend Development:**
|
||||
|
||||
```bash
|
||||
cd autogpt_platform/frontend
|
||||
pnpm dev # Start development server (port 3000) - use for active development
|
||||
@@ -81,23 +87,27 @@ pnpm storybook # Start component development server
|
||||
### Testing Strategy
|
||||
|
||||
**Backend Tests:**
|
||||
|
||||
- **Block Tests**: `poetry run pytest backend/blocks/test/test_block.py -xvs` (validates all blocks)
|
||||
- **Specific Block**: `poetry run pytest 'backend/blocks/test/test_block.py::test_available_blocks[BlockName]' -xvs`
|
||||
- **Snapshot Tests**: Use `--snapshot-update` when output changes, always review with `git diff`
|
||||
|
||||
**Frontend Tests:**
|
||||
|
||||
- **E2E Tests**: Always run `pnpm dev` before `pnpm test` (Playwright requires running instance)
|
||||
- **Component Tests**: Use Storybook for isolated component development
|
||||
|
||||
### Critical Validation Steps
|
||||
|
||||
**Before committing changes:**
|
||||
|
||||
1. Run `poetry run format` (backend) and `pnpm format` (frontend)
|
||||
2. Ensure all tests pass in modified areas
|
||||
3. Verify Docker services are still running
|
||||
4. Check that database migrations apply cleanly
|
||||
|
||||
**Common Issues & Workarounds:**
|
||||
|
||||
- **Prisma issues**: Run `poetry run prisma generate` after schema changes
|
||||
- **Permission errors**: Ensure Docker has proper permissions
|
||||
- **Port conflicts**: Check the `docker-compose.yml` file for the current list of exposed ports. You can list all mapped ports with:
|
||||
@@ -108,6 +118,7 @@ pnpm storybook # Start component development server
|
||||
### Core Architecture
|
||||
|
||||
**AutoGPT Platform** (`autogpt_platform/`):
|
||||
|
||||
- `backend/` - FastAPI server with async support
|
||||
- `backend/backend/` - Core API logic
|
||||
- `backend/blocks/` - Agent execution blocks
|
||||
@@ -121,6 +132,7 @@ pnpm storybook # Start component development server
|
||||
- `docker-compose.yml` - Development stack orchestration
|
||||
|
||||
**Key Configuration Files:**
|
||||
|
||||
- `pyproject.toml` - Python dependencies and tooling
|
||||
- `package.json` - Node.js dependencies and scripts
|
||||
- `schema.prisma` - Database schema and migrations
|
||||
@@ -136,6 +148,7 @@ pnpm storybook # Start component development server
|
||||
### Development Workflow
|
||||
|
||||
**GitHub Actions**: Multiple CI/CD workflows in `.github/workflows/`
|
||||
|
||||
- `platform-backend-ci.yml` - Backend testing and validation
|
||||
- `platform-frontend-ci.yml` - Frontend testing and validation
|
||||
- `platform-fullstack-ci.yml` - End-to-end integration tests
|
||||
@@ -146,11 +159,13 @@ pnpm storybook # Start component development server
|
||||
### Key Source Files
|
||||
|
||||
**Backend Entry Points:**
|
||||
|
||||
- `backend/backend/server/server.py` - FastAPI application setup
|
||||
- `backend/backend/data/` - Database models and user management
|
||||
- `backend/blocks/` - Agent execution blocks and logic
|
||||
|
||||
**Frontend Entry Points:**
|
||||
|
||||
- `frontend/src/app/layout.tsx` - Root application layout
|
||||
- `frontend/src/app/page.tsx` - Home page
|
||||
- `frontend/src/lib/supabase/` - Authentication and database client
|
||||
@@ -160,6 +175,7 @@ pnpm storybook # Start component development server
|
||||
### Agent Block System
|
||||
|
||||
Agents are built using a visual block-based system where each block performs a single action. Blocks are defined in `backend/blocks/` and must include:
|
||||
|
||||
- Block definition with input/output schemas
|
||||
- Execution logic with proper error handling
|
||||
- Tests validating functionality
|
||||
@@ -167,6 +183,7 @@ Agents are built using a visual block-based system where each block performs a s
|
||||
### Database & ORM
|
||||
|
||||
**Prisma ORM** with PostgreSQL backend including pgvector for embeddings:
|
||||
|
||||
- Schema in `schema.prisma`
|
||||
- Migrations in `backend/migrations/`
|
||||
- Always run `prisma migrate dev` and `prisma generate` after schema changes
|
||||
@@ -174,13 +191,15 @@ Agents are built using a visual block-based system where each block performs a s
|
||||
## Environment Configuration
|
||||
|
||||
### Configuration Files Priority Order
|
||||
|
||||
1. **Backend**: `/backend/.env.default` → `/backend/.env` (user overrides)
|
||||
2. **Frontend**: `/frontend/.env.default` → `/frontend/.env` (user overrides)
|
||||
2. **Frontend**: `/frontend/.env.default` → `/frontend/.env` (user overrides)
|
||||
3. **Platform**: `/.env.default` (Supabase/shared) → `/.env` (user overrides)
|
||||
4. Docker Compose `environment:` sections override file-based config
|
||||
5. Shell environment variables have highest precedence
|
||||
|
||||
### Docker Environment Setup
|
||||
|
||||
- All services use hardcoded defaults (no `${VARIABLE}` substitutions)
|
||||
- The `env_file` directive loads variables INTO containers at runtime
|
||||
- Backend/Frontend services use YAML anchors for consistent configuration
|
||||
@@ -189,6 +208,7 @@ Agents are built using a visual block-based system where each block performs a s
|
||||
## Advanced Development Patterns
|
||||
|
||||
### Adding New Blocks
|
||||
|
||||
1. Create file in `/backend/backend/blocks/`
|
||||
2. Inherit from `Block` base class with input/output schemas
|
||||
3. Implement `run` method with proper error handling
|
||||
@@ -198,6 +218,7 @@ Agents are built using a visual block-based system where each block performs a s
|
||||
7. Consider how inputs/outputs connect with other blocks in graph editor
|
||||
|
||||
### API Development
|
||||
|
||||
1. Update routes in `/backend/backend/server/routers/`
|
||||
2. Add/update Pydantic models in same directory
|
||||
3. Write tests alongside route files
|
||||
@@ -205,21 +226,76 @@ Agents are built using a visual block-based system where each block performs a s
|
||||
5. Run `poetry run test` to verify changes
|
||||
|
||||
### Frontend Development
|
||||
1. Components in `/frontend/src/components/`
|
||||
2. Use existing UI components from `/frontend/src/components/ui/`
|
||||
3. Add Storybook stories for component development
|
||||
4. Test user-facing features with Playwright E2E tests
|
||||
5. Update protected routes in middleware when needed
|
||||
|
||||
**📖 Complete Frontend Guide**: See `autogpt_platform/frontend/CONTRIBUTING.md` and `autogpt_platform/frontend/.cursorrules` for comprehensive patterns and conventions.
|
||||
|
||||
**Quick Reference:**
|
||||
|
||||
**Component Structure:**
|
||||
|
||||
- Separate render logic from data/behavior
|
||||
- Structure: `ComponentName/ComponentName.tsx` + `useComponentName.ts` + `helpers.ts`
|
||||
- Exception: Small components (3-4 lines of logic) can be inline
|
||||
- Render-only components can be direct files without folders
|
||||
|
||||
**Data Fetching:**
|
||||
|
||||
- Use generated API hooks from `@/app/api/__generated__/endpoints/`
|
||||
- Generated via Orval from backend OpenAPI spec
|
||||
- Pattern: `use{Method}{Version}{OperationName}`
|
||||
- Example: `useGetV2ListLibraryAgents`
|
||||
- Regenerate with: `pnpm generate:api`
|
||||
- **Never** use deprecated `BackendAPI` or `src/lib/autogpt-server-api/*`
|
||||
|
||||
**Code Conventions:**
|
||||
|
||||
- Use function declarations for components and handlers (not arrow functions)
|
||||
- Only arrow functions for small inline lambdas (map, filter, etc.)
|
||||
- Components: `PascalCase`, Hooks: `camelCase` with `use` prefix
|
||||
- No barrel files or `index.ts` re-exports
|
||||
- Minimal comments (code should be self-documenting)
|
||||
|
||||
**Styling:**
|
||||
|
||||
- Use Tailwind CSS utilities only
|
||||
- Use design system components from `src/components/` (atoms, molecules, organisms)
|
||||
- Never use `src/components/__legacy__/*`
|
||||
- Only use Phosphor Icons (`@phosphor-icons/react`)
|
||||
- Prefer design tokens over hardcoded values
|
||||
|
||||
**Error Handling:**
|
||||
|
||||
- Render errors: Use `<ErrorCard />` component
|
||||
- Mutation errors: Display with toast notifications
|
||||
- Manual exceptions: Use `Sentry.captureException()`
|
||||
- Global error boundaries already configured
|
||||
|
||||
**Testing:**
|
||||
|
||||
- Add/update Storybook stories for UI components (`pnpm storybook`)
|
||||
- Run Playwright E2E tests with `pnpm test`
|
||||
- Verify in Chromatic after PR
|
||||
|
||||
**Architecture:**
|
||||
|
||||
- Default to client components ("use client")
|
||||
- Server components only for SEO or extreme TTFB needs
|
||||
- Use React Query for server state (via generated hooks)
|
||||
- Co-locate UI state in components/hooks
|
||||
|
||||
### Security Guidelines
|
||||
|
||||
**Cache Protection Middleware** (`/backend/backend/server/middleware/security.py`):
|
||||
|
||||
- Default: Disables caching for ALL endpoints with `Cache-Control: no-store, no-cache, must-revalidate, private`
|
||||
- Uses allow list approach for cacheable paths (static assets, health checks, public pages)
|
||||
- Prevents sensitive data caching in browsers/proxies
|
||||
- Add new cacheable endpoints to `CACHEABLE_PATHS`
|
||||
|
||||
### CI/CD Alignment
|
||||
|
||||
The repository has comprehensive CI workflows that test:
|
||||
|
||||
- **Backend**: Python 3.11-3.13, services (Redis/RabbitMQ/ClamAV), Prisma migrations, Poetry lock validation
|
||||
- **Frontend**: Node.js 21, pnpm, Playwright with Docker Compose stack, API schema validation
|
||||
- **Integration**: Full-stack type checking and E2E testing
|
||||
@@ -229,6 +305,7 @@ Match these patterns when developing locally - the copilot setup environment mir
|
||||
## Collaboration with Other AI Assistants
|
||||
|
||||
This repository is actively developed with assistance from Claude (via CLAUDE.md files). When working on this codebase:
|
||||
|
||||
- Check for existing CLAUDE.md files that provide additional context
|
||||
- Follow established patterns and conventions already in the codebase
|
||||
- Maintain consistency with existing code style and architecture
|
||||
@@ -237,8 +314,9 @@ This repository is actively developed with assistance from Claude (via CLAUDE.md
|
||||
## Trust These Instructions
|
||||
|
||||
These instructions are comprehensive and tested. Only perform additional searches if:
|
||||
|
||||
1. Information here is incomplete for your specific task
|
||||
2. You encounter errors not covered by the workarounds
|
||||
3. You need to understand implementation details not covered above
|
||||
|
||||
For detailed platform development patterns, refer to `autogpt_platform/CLAUDE.md` and `AGENTS.md` in the repository root.
|
||||
For detailed platform development patterns, refer to `autogpt_platform/CLAUDE.md` and `AGENTS.md` in the repository root.
|
||||
|
||||
2
.github/workflows/claude-dependabot.yml
vendored
2
.github/workflows/claude-dependabot.yml
vendored
@@ -80,7 +80,7 @@ jobs:
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
node-version: "22"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
8
.github/workflows/claude.yml
vendored
8
.github/workflows/claude.yml
vendored
@@ -44,6 +44,12 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@v1.3.1
|
||||
with:
|
||||
large-packages: false # slow
|
||||
docker-images: false # limited benefit
|
||||
|
||||
# Backend Python/Poetry setup (mirrors platform-backend-ci.yml)
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
@@ -90,7 +96,7 @@ jobs:
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
node-version: "22"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
4
.github/workflows/copilot-setup-steps.yml
vendored
4
.github/workflows/copilot-setup-steps.yml
vendored
@@ -78,7 +78,7 @@ jobs:
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
node-version: "22"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
@@ -299,4 +299,4 @@ jobs:
|
||||
echo "✅ AutoGPT Platform development environment setup complete!"
|
||||
echo "🚀 Ready for development with Docker services running"
|
||||
echo "📝 Backend server: poetry run serve (port 8000)"
|
||||
echo "🌐 Frontend server: pnpm dev (port 3000)"
|
||||
echo "🌐 Frontend server: pnpm dev (port 3000)"
|
||||
|
||||
12
.github/workflows/platform-frontend-ci.yml
vendored
12
.github/workflows/platform-frontend-ci.yml
vendored
@@ -12,6 +12,10 @@ on:
|
||||
- "autogpt_platform/frontend/**"
|
||||
merge_group:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name == 'merge_group' && format('merge-queue-{0}', github.ref) || format('{0}-{1}', github.ref, github.event.pull_request.number || github.sha) }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
@@ -30,7 +34,7 @@ jobs:
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
node-version: "22.18.0"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
@@ -62,7 +66,7 @@ jobs:
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
node-version: "22.18.0"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
@@ -97,7 +101,7 @@ jobs:
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
node-version: "22.18.0"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
@@ -138,7 +142,7 @@ jobs:
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
node-version: "22.18.0"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
8
.github/workflows/platform-fullstack-ci.yml
vendored
8
.github/workflows/platform-fullstack-ci.yml
vendored
@@ -12,6 +12,10 @@ on:
|
||||
- "autogpt_platform/**"
|
||||
merge_group:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name == 'merge_group' && format('merge-queue-{0}', github.ref) || github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
@@ -30,7 +34,7 @@ jobs:
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
node-version: "22.18.0"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
@@ -66,7 +70,7 @@ jobs:
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
node-version: "22.18.0"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
@@ -11,7 +11,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v9
|
||||
- uses: actions/stale@v10
|
||||
with:
|
||||
# operations-per-run: 5000
|
||||
stale-issue-message: >
|
||||
|
||||
2
.github/workflows/repo-pr-label.yml
vendored
2
.github/workflows/repo-pr-label.yml
vendored
@@ -61,6 +61,6 @@ jobs:
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/labeler@v5
|
||||
- uses: actions/labeler@v6
|
||||
with:
|
||||
sync-labels: true
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -178,3 +178,4 @@ autogpt_platform/backend/settings.py
|
||||
*.ign.*
|
||||
.test-contents
|
||||
.claude/settings.local.json
|
||||
/autogpt_platform/backend/logs
|
||||
|
||||
@@ -63,6 +63,9 @@ poetry run pytest path/to/test.py --snapshot-update
|
||||
# Install dependencies
|
||||
cd frontend && pnpm i
|
||||
|
||||
# Generate API client from OpenAPI spec
|
||||
pnpm generate:api
|
||||
|
||||
# Start development server
|
||||
pnpm dev
|
||||
|
||||
@@ -75,12 +78,23 @@ pnpm storybook
|
||||
# Build production
|
||||
pnpm build
|
||||
|
||||
# Format and lint
|
||||
pnpm format
|
||||
|
||||
# Type checking
|
||||
pnpm types
|
||||
```
|
||||
|
||||
We have a components library in autogpt_platform/frontend/src/components/atoms that should be used when adding new pages and components.
|
||||
**📖 Complete Guide**: See `/frontend/CONTRIBUTING.md` and `/frontend/.cursorrules` for comprehensive frontend patterns.
|
||||
|
||||
**Key Frontend Conventions:**
|
||||
|
||||
- Separate render logic from data/behavior in components
|
||||
- Use generated API hooks from `@/app/api/__generated__/endpoints/`
|
||||
- Use function declarations (not arrow functions) for components/handlers
|
||||
- Use design system components from `src/components/` (atoms, molecules, organisms)
|
||||
- Only use Phosphor Icons
|
||||
- Never use `src/components/__legacy__/*` or deprecated `BackendAPI`
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
@@ -95,11 +109,16 @@ We have a components library in autogpt_platform/frontend/src/components/atoms t
|
||||
|
||||
### Frontend Architecture
|
||||
|
||||
- **Framework**: Next.js App Router with React Server Components
|
||||
- **State Management**: React hooks + Supabase client for real-time updates
|
||||
- **Framework**: Next.js 15 App Router (client-first approach)
|
||||
- **Data Fetching**: Type-safe generated API hooks via Orval + React Query
|
||||
- **State Management**: React Query for server state, co-located UI state in components/hooks
|
||||
- **Component Structure**: Separate render logic (`.tsx`) from business logic (`use*.ts` hooks)
|
||||
- **Workflow Builder**: Visual graph editor using @xyflow/react
|
||||
- **UI Components**: Radix UI primitives with Tailwind CSS styling
|
||||
- **UI Components**: shadcn/ui (Radix UI primitives) with Tailwind CSS styling
|
||||
- **Icons**: Phosphor Icons only
|
||||
- **Feature Flags**: LaunchDarkly integration
|
||||
- **Error Handling**: ErrorCard for render errors, toast for mutations, Sentry for exceptions
|
||||
- **Testing**: Playwright for E2E, Storybook for component development
|
||||
|
||||
### Key Concepts
|
||||
|
||||
@@ -153,6 +172,7 @@ Key models (defined in `/backend/schema.prisma`):
|
||||
**Adding a new block:**
|
||||
|
||||
Follow the comprehensive [Block SDK Guide](../../../docs/content/platform/block-sdk-guide.md) which covers:
|
||||
|
||||
- Provider configuration with `ProviderBuilder`
|
||||
- Block schema definition
|
||||
- Authentication (API keys, OAuth, webhooks)
|
||||
@@ -160,6 +180,7 @@ Follow the comprehensive [Block SDK Guide](../../../docs/content/platform/block-
|
||||
- File organization
|
||||
|
||||
Quick steps:
|
||||
|
||||
1. Create new file in `/backend/backend/blocks/`
|
||||
2. Configure provider using `ProviderBuilder` in `_config.py`
|
||||
3. Inherit from `Block` base class
|
||||
@@ -171,6 +192,8 @@ Quick steps:
|
||||
Note: when making many new blocks analyze the interfaces for each of these blocks and picture if they would go well together in a graph based editor or would they struggle to connect productively?
|
||||
ex: do the inputs and outputs tie well together?
|
||||
|
||||
If you get any pushback or hit complex block conditions check the new_blocks guide in the docs.
|
||||
|
||||
**Modifying the API:**
|
||||
|
||||
1. Update route in `/backend/backend/server/routers/`
|
||||
@@ -180,10 +203,20 @@ ex: do the inputs and outputs tie well together?
|
||||
|
||||
**Frontend feature development:**
|
||||
|
||||
1. Components go in `/frontend/src/components/`
|
||||
2. Use existing UI components from `/frontend/src/components/ui/`
|
||||
3. Add Storybook stories for new components
|
||||
4. Test with Playwright if user-facing
|
||||
See `/frontend/CONTRIBUTING.md` for complete patterns. Quick reference:
|
||||
|
||||
1. **Pages**: Create in `src/app/(platform)/feature-name/page.tsx`
|
||||
- Add `usePageName.ts` hook for logic
|
||||
- Put sub-components in local `components/` folder
|
||||
2. **Components**: Structure as `ComponentName/ComponentName.tsx` + `useComponentName.ts` + `helpers.ts`
|
||||
- Use design system components from `src/components/` (atoms, molecules, organisms)
|
||||
- Never use `src/components/__legacy__/*`
|
||||
3. **Data fetching**: Use generated API hooks from `@/app/api/__generated__/endpoints/`
|
||||
- Regenerate with `pnpm generate:api`
|
||||
- Pattern: `use{Method}{Version}{OperationName}`
|
||||
4. **Styling**: Tailwind CSS only, use design tokens, Phosphor Icons only
|
||||
5. **Testing**: Add Storybook stories for new components, Playwright for E2E
|
||||
6. **Code conventions**: Function declarations (not arrow functions) for components/handlers
|
||||
|
||||
### Security Implementation
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.PHONY: start-core stop-core logs-core format lint migrate run-backend run-frontend
|
||||
.PHONY: start-core stop-core logs-core format lint migrate run-backend run-frontend load-store-agents
|
||||
|
||||
# Run just Supabase + Redis + RabbitMQ
|
||||
start-core:
|
||||
@@ -8,6 +8,11 @@ start-core:
|
||||
stop-core:
|
||||
docker compose stop deps
|
||||
|
||||
reset-db:
|
||||
rm -rf db/docker/volumes/db/data
|
||||
cd backend && poetry run prisma migrate deploy
|
||||
cd backend && poetry run prisma generate
|
||||
|
||||
# View logs for core services
|
||||
logs-core:
|
||||
docker compose logs -f deps
|
||||
@@ -35,13 +40,22 @@ run-backend:
|
||||
run-frontend:
|
||||
cd frontend && pnpm dev
|
||||
|
||||
test-data:
|
||||
cd backend && poetry run python test/test_data_creator.py
|
||||
|
||||
load-store-agents:
|
||||
cd backend && poetry run load-store-agents
|
||||
|
||||
help:
|
||||
@echo "Usage: make <target>"
|
||||
@echo "Targets:"
|
||||
@echo " start-core - Start just the core services (Supabase, Redis, RabbitMQ) in background"
|
||||
@echo " stop-core - Stop the core services"
|
||||
@echo " reset-db - Reset the database by deleting the volume"
|
||||
@echo " logs-core - Tail the logs for core services"
|
||||
@echo " format - Format & lint backend (Python) and frontend (TypeScript) code"
|
||||
@echo " migrate - Run backend database migrations"
|
||||
@echo " run-backend - Run the backend FastAPI server"
|
||||
@echo " run-frontend - Run the frontend Next.js development server"
|
||||
@echo " run-frontend - Run the frontend Next.js development server"
|
||||
@echo " test-data - Run the test data creator"
|
||||
@echo " load-store-agents - Load store agents from agents/ folder into test database"
|
||||
@@ -57,6 +57,9 @@ class APIKeySmith:
|
||||
|
||||
def hash_key(self, raw_key: str) -> tuple[str, str]:
|
||||
"""Migrate a legacy hash to secure hash format."""
|
||||
if not raw_key.startswith(self.PREFIX):
|
||||
raise ValueError("Key without 'agpt_' prefix would fail validation")
|
||||
|
||||
salt = self._generate_salt()
|
||||
hash = self._hash_key_with_salt(raw_key, salt)
|
||||
return hash, salt.hex()
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
from .config import verify_settings
|
||||
from .dependencies import get_user_id, requires_admin_user, requires_user
|
||||
from .dependencies import (
|
||||
get_optional_user_id,
|
||||
get_user_id,
|
||||
requires_admin_user,
|
||||
requires_user,
|
||||
)
|
||||
from .helpers import add_auth_responses_to_openapi
|
||||
from .models import User
|
||||
|
||||
@@ -8,6 +13,7 @@ __all__ = [
|
||||
"get_user_id",
|
||||
"requires_admin_user",
|
||||
"requires_user",
|
||||
"get_optional_user_id",
|
||||
"add_auth_responses_to_openapi",
|
||||
"User",
|
||||
]
|
||||
|
||||
@@ -4,11 +4,53 @@ FastAPI dependency functions for JWT-based authentication and authorization.
|
||||
These are the high-level dependency functions used in route definitions.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
import fastapi
|
||||
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
|
||||
|
||||
from .jwt_utils import get_jwt_payload, verify_user
|
||||
from .models import User
|
||||
|
||||
optional_bearer = HTTPBearer(auto_error=False)
|
||||
|
||||
# Header name for admin impersonation
|
||||
IMPERSONATION_HEADER_NAME = "X-Act-As-User-Id"
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_optional_user_id(
|
||||
credentials: HTTPAuthorizationCredentials | None = fastapi.Security(
|
||||
optional_bearer
|
||||
),
|
||||
) -> str | None:
|
||||
"""
|
||||
Attempts to extract the user ID ("sub" claim) from a Bearer JWT if provided.
|
||||
|
||||
This dependency allows for both authenticated and anonymous access. If a valid bearer token is
|
||||
supplied, it parses the JWT and extracts the user ID. If the token is missing or invalid, it returns None,
|
||||
treating the request as anonymous.
|
||||
|
||||
Args:
|
||||
credentials: Optional HTTPAuthorizationCredentials object from FastAPI Security dependency.
|
||||
|
||||
Returns:
|
||||
The user ID (str) extracted from the JWT "sub" claim, or None if no valid token is present.
|
||||
"""
|
||||
if not credentials:
|
||||
return None
|
||||
|
||||
try:
|
||||
# Parse JWT token to get user ID
|
||||
from autogpt_libs.auth.jwt_utils import parse_jwt_token
|
||||
|
||||
payload = parse_jwt_token(credentials.credentials)
|
||||
return payload.get("sub")
|
||||
except Exception as e:
|
||||
logger.debug(f"Auth token validation failed (anonymous access): {e}")
|
||||
return None
|
||||
|
||||
|
||||
async def requires_user(jwt_payload: dict = fastapi.Security(get_jwt_payload)) -> User:
|
||||
"""
|
||||
@@ -32,16 +74,44 @@ async def requires_admin_user(
|
||||
return verify_user(jwt_payload, admin_only=True)
|
||||
|
||||
|
||||
async def get_user_id(jwt_payload: dict = fastapi.Security(get_jwt_payload)) -> str:
|
||||
async def get_user_id(
|
||||
request: fastapi.Request, jwt_payload: dict = fastapi.Security(get_jwt_payload)
|
||||
) -> str:
|
||||
"""
|
||||
FastAPI dependency that returns the ID of the authenticated user.
|
||||
|
||||
Supports admin impersonation via X-Act-As-User-Id header:
|
||||
- If the header is present and user is admin, returns the impersonated user ID
|
||||
- Otherwise returns the authenticated user's own ID
|
||||
- Logs all impersonation actions for audit trail
|
||||
|
||||
Raises:
|
||||
HTTPException: 401 for authentication failures or missing user ID
|
||||
HTTPException: 403 if non-admin tries to use impersonation
|
||||
"""
|
||||
# Get the authenticated user's ID from JWT
|
||||
user_id = jwt_payload.get("sub")
|
||||
if not user_id:
|
||||
raise fastapi.HTTPException(
|
||||
status_code=401, detail="User ID not found in token"
|
||||
)
|
||||
|
||||
# Check for admin impersonation header
|
||||
impersonate_header = request.headers.get(IMPERSONATION_HEADER_NAME, "").strip()
|
||||
if impersonate_header:
|
||||
# Verify the authenticated user is an admin
|
||||
authenticated_user = verify_user(jwt_payload, admin_only=False)
|
||||
if authenticated_user.role != "admin":
|
||||
raise fastapi.HTTPException(
|
||||
status_code=403, detail="Only admin users can impersonate other users"
|
||||
)
|
||||
|
||||
# Log the impersonation for audit trail
|
||||
logger.info(
|
||||
f"Admin impersonation: {authenticated_user.user_id} ({authenticated_user.email}) "
|
||||
f"acting as user {impersonate_header} for requesting {request.method} {request.url}"
|
||||
)
|
||||
|
||||
return impersonate_header
|
||||
|
||||
return user_id
|
||||
|
||||
@@ -4,9 +4,10 @@ Tests the full authentication flow from HTTP requests to user validation.
|
||||
"""
|
||||
|
||||
import os
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
from fastapi import FastAPI, HTTPException, Security
|
||||
from fastapi import FastAPI, HTTPException, Request, Security
|
||||
from fastapi.testclient import TestClient
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
@@ -45,6 +46,7 @@ class TestAuthDependencies:
|
||||
"""Create a test client."""
|
||||
return TestClient(app)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_requires_user_with_valid_jwt_payload(self, mocker: MockerFixture):
|
||||
"""Test requires_user with valid JWT payload."""
|
||||
jwt_payload = {"sub": "user-123", "role": "user", "email": "user@example.com"}
|
||||
@@ -58,6 +60,7 @@ class TestAuthDependencies:
|
||||
assert user.user_id == "user-123"
|
||||
assert user.role == "user"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_requires_user_with_admin_jwt_payload(self, mocker: MockerFixture):
|
||||
"""Test requires_user accepts admin users."""
|
||||
jwt_payload = {
|
||||
@@ -73,6 +76,7 @@ class TestAuthDependencies:
|
||||
assert user.user_id == "admin-456"
|
||||
assert user.role == "admin"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_requires_user_missing_sub(self):
|
||||
"""Test requires_user with missing user ID."""
|
||||
jwt_payload = {"role": "user", "email": "user@example.com"}
|
||||
@@ -82,6 +86,7 @@ class TestAuthDependencies:
|
||||
assert exc_info.value.status_code == 401
|
||||
assert "User ID not found" in exc_info.value.detail
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_requires_user_empty_sub(self):
|
||||
"""Test requires_user with empty user ID."""
|
||||
jwt_payload = {"sub": "", "role": "user"}
|
||||
@@ -90,6 +95,7 @@ class TestAuthDependencies:
|
||||
await requires_user(jwt_payload)
|
||||
assert exc_info.value.status_code == 401
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_requires_admin_user_with_admin(self, mocker: MockerFixture):
|
||||
"""Test requires_admin_user with admin role."""
|
||||
jwt_payload = {
|
||||
@@ -105,6 +111,7 @@ class TestAuthDependencies:
|
||||
assert user.user_id == "admin-789"
|
||||
assert user.role == "admin"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_requires_admin_user_with_regular_user(self):
|
||||
"""Test requires_admin_user rejects regular users."""
|
||||
jwt_payload = {"sub": "user-123", "role": "user", "email": "user@example.com"}
|
||||
@@ -114,6 +121,7 @@ class TestAuthDependencies:
|
||||
assert exc_info.value.status_code == 403
|
||||
assert "Admin access required" in exc_info.value.detail
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_requires_admin_user_missing_role(self):
|
||||
"""Test requires_admin_user with missing role."""
|
||||
jwt_payload = {"sub": "user-123", "email": "user@example.com"}
|
||||
@@ -121,31 +129,40 @@ class TestAuthDependencies:
|
||||
with pytest.raises(KeyError):
|
||||
await requires_admin_user(jwt_payload)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_user_id_with_valid_payload(self, mocker: MockerFixture):
|
||||
"""Test get_user_id extracts user ID correctly."""
|
||||
request = Mock(spec=Request)
|
||||
request.headers = {}
|
||||
jwt_payload = {"sub": "user-id-xyz", "role": "user"}
|
||||
|
||||
mocker.patch(
|
||||
"autogpt_libs.auth.dependencies.get_jwt_payload", return_value=jwt_payload
|
||||
)
|
||||
user_id = await get_user_id(jwt_payload)
|
||||
user_id = await get_user_id(request, jwt_payload)
|
||||
assert user_id == "user-id-xyz"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_user_id_missing_sub(self):
|
||||
"""Test get_user_id with missing user ID."""
|
||||
request = Mock(spec=Request)
|
||||
request.headers = {}
|
||||
jwt_payload = {"role": "user"}
|
||||
|
||||
with pytest.raises(HTTPException) as exc_info:
|
||||
await get_user_id(jwt_payload)
|
||||
await get_user_id(request, jwt_payload)
|
||||
assert exc_info.value.status_code == 401
|
||||
assert "User ID not found" in exc_info.value.detail
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_user_id_none_sub(self):
|
||||
"""Test get_user_id with None user ID."""
|
||||
request = Mock(spec=Request)
|
||||
request.headers = {}
|
||||
jwt_payload = {"sub": None, "role": "user"}
|
||||
|
||||
with pytest.raises(HTTPException) as exc_info:
|
||||
await get_user_id(jwt_payload)
|
||||
await get_user_id(request, jwt_payload)
|
||||
assert exc_info.value.status_code == 401
|
||||
|
||||
|
||||
@@ -170,6 +187,7 @@ class TestAuthDependenciesIntegration:
|
||||
|
||||
return _create_token
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_endpoint_auth_enabled_no_token(self):
|
||||
"""Test endpoints require token when auth is enabled."""
|
||||
app = FastAPI()
|
||||
@@ -184,6 +202,7 @@ class TestAuthDependenciesIntegration:
|
||||
response = client.get("/test")
|
||||
assert response.status_code == 401
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_endpoint_with_valid_token(self, create_token):
|
||||
"""Test endpoint with valid JWT token."""
|
||||
app = FastAPI()
|
||||
@@ -203,6 +222,7 @@ class TestAuthDependenciesIntegration:
|
||||
assert response.status_code == 200
|
||||
assert response.json()["user_id"] == "test-user"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_admin_endpoint_requires_admin_role(self, create_token):
|
||||
"""Test admin endpoint rejects non-admin users."""
|
||||
app = FastAPI()
|
||||
@@ -240,6 +260,7 @@ class TestAuthDependenciesIntegration:
|
||||
class TestAuthDependenciesEdgeCases:
|
||||
"""Edge case tests for authentication dependencies."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dependency_with_complex_payload(self):
|
||||
"""Test dependencies handle complex JWT payloads."""
|
||||
complex_payload = {
|
||||
@@ -263,6 +284,7 @@ class TestAuthDependenciesEdgeCases:
|
||||
admin = await requires_admin_user(complex_payload)
|
||||
assert admin.role == "admin"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dependency_with_unicode_in_payload(self):
|
||||
"""Test dependencies handle unicode in JWT payloads."""
|
||||
unicode_payload = {
|
||||
@@ -276,6 +298,7 @@ class TestAuthDependenciesEdgeCases:
|
||||
assert "😀" in user.user_id
|
||||
assert user.email == "测试@example.com"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dependency_with_null_values(self):
|
||||
"""Test dependencies handle null values in payload."""
|
||||
null_payload = {
|
||||
@@ -290,6 +313,7 @@ class TestAuthDependenciesEdgeCases:
|
||||
assert user.user_id == "user-123"
|
||||
assert user.email is None
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_concurrent_requests_isolation(self):
|
||||
"""Test that concurrent requests don't interfere with each other."""
|
||||
payload1 = {"sub": "user-1", "role": "user"}
|
||||
@@ -314,6 +338,7 @@ class TestAuthDependenciesEdgeCases:
|
||||
({"sub": "user", "role": "user"}, "Admin access required", True),
|
||||
],
|
||||
)
|
||||
@pytest.mark.asyncio
|
||||
async def test_dependency_error_cases(
|
||||
self, payload, expected_error: str, admin_only: bool
|
||||
):
|
||||
@@ -325,6 +350,7 @@ class TestAuthDependenciesEdgeCases:
|
||||
verify_user(payload, admin_only=admin_only)
|
||||
assert expected_error in exc_info.value.detail
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dependency_valid_user(self):
|
||||
"""Test valid user case for dependency."""
|
||||
# Import verify_user to test it directly since dependencies use FastAPI Security
|
||||
@@ -333,3 +359,196 @@ class TestAuthDependenciesEdgeCases:
|
||||
# Valid case
|
||||
user = verify_user({"sub": "user", "role": "user"}, admin_only=False)
|
||||
assert user.user_id == "user"
|
||||
|
||||
|
||||
class TestAdminImpersonation:
|
||||
"""Test suite for admin user impersonation functionality."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_admin_impersonation_success(self, mocker: MockerFixture):
|
||||
"""Test admin successfully impersonating another user."""
|
||||
request = Mock(spec=Request)
|
||||
request.headers = {"X-Act-As-User-Id": "target-user-123"}
|
||||
jwt_payload = {
|
||||
"sub": "admin-456",
|
||||
"role": "admin",
|
||||
"email": "admin@example.com",
|
||||
}
|
||||
|
||||
# Mock verify_user to return admin user data
|
||||
mock_verify_user = mocker.patch("autogpt_libs.auth.dependencies.verify_user")
|
||||
mock_verify_user.return_value = Mock(
|
||||
user_id="admin-456", email="admin@example.com", role="admin"
|
||||
)
|
||||
|
||||
# Mock logger to verify audit logging
|
||||
mock_logger = mocker.patch("autogpt_libs.auth.dependencies.logger")
|
||||
|
||||
mocker.patch(
|
||||
"autogpt_libs.auth.dependencies.get_jwt_payload", return_value=jwt_payload
|
||||
)
|
||||
|
||||
user_id = await get_user_id(request, jwt_payload)
|
||||
|
||||
# Should return the impersonated user ID
|
||||
assert user_id == "target-user-123"
|
||||
|
||||
# Should log the impersonation attempt
|
||||
mock_logger.info.assert_called_once()
|
||||
log_call = mock_logger.info.call_args[0][0]
|
||||
assert "Admin impersonation:" in log_call
|
||||
assert "admin@example.com" in log_call
|
||||
assert "target-user-123" in log_call
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_non_admin_impersonation_attempt(self, mocker: MockerFixture):
|
||||
"""Test non-admin user attempting impersonation returns 403."""
|
||||
request = Mock(spec=Request)
|
||||
request.headers = {"X-Act-As-User-Id": "target-user-123"}
|
||||
jwt_payload = {
|
||||
"sub": "regular-user",
|
||||
"role": "user",
|
||||
"email": "user@example.com",
|
||||
}
|
||||
|
||||
# Mock verify_user to return regular user data
|
||||
mock_verify_user = mocker.patch("autogpt_libs.auth.dependencies.verify_user")
|
||||
mock_verify_user.return_value = Mock(
|
||||
user_id="regular-user", email="user@example.com", role="user"
|
||||
)
|
||||
|
||||
mocker.patch(
|
||||
"autogpt_libs.auth.dependencies.get_jwt_payload", return_value=jwt_payload
|
||||
)
|
||||
|
||||
with pytest.raises(HTTPException) as exc_info:
|
||||
await get_user_id(request, jwt_payload)
|
||||
|
||||
assert exc_info.value.status_code == 403
|
||||
assert "Only admin users can impersonate other users" in exc_info.value.detail
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_impersonation_empty_header(self, mocker: MockerFixture):
|
||||
"""Test impersonation with empty header falls back to regular user ID."""
|
||||
request = Mock(spec=Request)
|
||||
request.headers = {"X-Act-As-User-Id": ""}
|
||||
jwt_payload = {
|
||||
"sub": "admin-456",
|
||||
"role": "admin",
|
||||
"email": "admin@example.com",
|
||||
}
|
||||
|
||||
mocker.patch(
|
||||
"autogpt_libs.auth.dependencies.get_jwt_payload", return_value=jwt_payload
|
||||
)
|
||||
|
||||
user_id = await get_user_id(request, jwt_payload)
|
||||
|
||||
# Should fall back to the admin's own user ID
|
||||
assert user_id == "admin-456"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_impersonation_missing_header(self, mocker: MockerFixture):
|
||||
"""Test normal behavior when impersonation header is missing."""
|
||||
request = Mock(spec=Request)
|
||||
request.headers = {} # No impersonation header
|
||||
jwt_payload = {
|
||||
"sub": "admin-456",
|
||||
"role": "admin",
|
||||
"email": "admin@example.com",
|
||||
}
|
||||
|
||||
mocker.patch(
|
||||
"autogpt_libs.auth.dependencies.get_jwt_payload", return_value=jwt_payload
|
||||
)
|
||||
|
||||
user_id = await get_user_id(request, jwt_payload)
|
||||
|
||||
# Should return the admin's own user ID
|
||||
assert user_id == "admin-456"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_impersonation_audit_logging_details(self, mocker: MockerFixture):
|
||||
"""Test that impersonation audit logging includes all required details."""
|
||||
request = Mock(spec=Request)
|
||||
request.headers = {"X-Act-As-User-Id": "victim-user-789"}
|
||||
jwt_payload = {
|
||||
"sub": "admin-999",
|
||||
"role": "admin",
|
||||
"email": "superadmin@company.com",
|
||||
}
|
||||
|
||||
# Mock verify_user to return admin user data
|
||||
mock_verify_user = mocker.patch("autogpt_libs.auth.dependencies.verify_user")
|
||||
mock_verify_user.return_value = Mock(
|
||||
user_id="admin-999", email="superadmin@company.com", role="admin"
|
||||
)
|
||||
|
||||
# Mock logger to capture audit trail
|
||||
mock_logger = mocker.patch("autogpt_libs.auth.dependencies.logger")
|
||||
|
||||
mocker.patch(
|
||||
"autogpt_libs.auth.dependencies.get_jwt_payload", return_value=jwt_payload
|
||||
)
|
||||
|
||||
user_id = await get_user_id(request, jwt_payload)
|
||||
|
||||
# Verify all audit details are logged
|
||||
assert user_id == "victim-user-789"
|
||||
mock_logger.info.assert_called_once()
|
||||
|
||||
log_message = mock_logger.info.call_args[0][0]
|
||||
assert "Admin impersonation:" in log_message
|
||||
assert "superadmin@company.com" in log_message
|
||||
assert "victim-user-789" in log_message
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_impersonation_header_case_sensitivity(self, mocker: MockerFixture):
|
||||
"""Test that impersonation header is case-sensitive."""
|
||||
request = Mock(spec=Request)
|
||||
# Use wrong case - should not trigger impersonation
|
||||
request.headers = {"x-act-as-user-id": "target-user-123"}
|
||||
jwt_payload = {
|
||||
"sub": "admin-456",
|
||||
"role": "admin",
|
||||
"email": "admin@example.com",
|
||||
}
|
||||
|
||||
mocker.patch(
|
||||
"autogpt_libs.auth.dependencies.get_jwt_payload", return_value=jwt_payload
|
||||
)
|
||||
|
||||
user_id = await get_user_id(request, jwt_payload)
|
||||
|
||||
# Should fall back to admin's own ID (header case mismatch)
|
||||
assert user_id == "admin-456"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_impersonation_with_whitespace_header(self, mocker: MockerFixture):
|
||||
"""Test impersonation with whitespace in header value."""
|
||||
request = Mock(spec=Request)
|
||||
request.headers = {"X-Act-As-User-Id": " target-user-123 "}
|
||||
jwt_payload = {
|
||||
"sub": "admin-456",
|
||||
"role": "admin",
|
||||
"email": "admin@example.com",
|
||||
}
|
||||
|
||||
# Mock verify_user to return admin user data
|
||||
mock_verify_user = mocker.patch("autogpt_libs.auth.dependencies.verify_user")
|
||||
mock_verify_user.return_value = Mock(
|
||||
user_id="admin-456", email="admin@example.com", role="admin"
|
||||
)
|
||||
|
||||
# Mock logger
|
||||
mock_logger = mocker.patch("autogpt_libs.auth.dependencies.logger")
|
||||
|
||||
mocker.patch(
|
||||
"autogpt_libs.auth.dependencies.get_jwt_payload", return_value=jwt_payload
|
||||
)
|
||||
|
||||
user_id = await get_user_id(request, jwt_payload)
|
||||
|
||||
# Should strip whitespace and impersonate successfully
|
||||
assert user_id == "target-user-123"
|
||||
mock_logger.info.assert_called_once()
|
||||
|
||||
@@ -1,29 +1,25 @@
|
||||
from fastapi import FastAPI
|
||||
from fastapi.openapi.utils import get_openapi
|
||||
|
||||
from .jwt_utils import bearer_jwt_auth
|
||||
|
||||
|
||||
def add_auth_responses_to_openapi(app: FastAPI) -> None:
|
||||
"""
|
||||
Set up custom OpenAPI schema generation that adds 401 responses
|
||||
Patch a FastAPI instance's `openapi()` method to add 401 responses
|
||||
to all authenticated endpoints.
|
||||
|
||||
This is needed when using HTTPBearer with auto_error=False to get proper
|
||||
401 responses instead of 403, but FastAPI only automatically adds security
|
||||
responses when auto_error=True.
|
||||
"""
|
||||
# Wrap current method to allow stacking OpenAPI schema modifiers like this
|
||||
wrapped_openapi = app.openapi
|
||||
|
||||
def custom_openapi():
|
||||
if app.openapi_schema:
|
||||
return app.openapi_schema
|
||||
|
||||
openapi_schema = get_openapi(
|
||||
title=app.title,
|
||||
version=app.version,
|
||||
description=app.description,
|
||||
routes=app.routes,
|
||||
)
|
||||
openapi_schema = wrapped_openapi()
|
||||
|
||||
# Add 401 response to all endpoints that have security requirements
|
||||
for path, methods in openapi_schema["paths"].items():
|
||||
|
||||
@@ -94,42 +94,36 @@ def configure_logging(force_cloud_logging: bool = False) -> None:
|
||||
config = LoggingConfig()
|
||||
log_handlers: list[logging.Handler] = []
|
||||
|
||||
structured_logging = config.enable_cloud_logging or force_cloud_logging
|
||||
|
||||
# Console output handlers
|
||||
stdout = logging.StreamHandler(stream=sys.stdout)
|
||||
stdout.setLevel(config.level)
|
||||
stdout.addFilter(BelowLevelFilter(logging.WARNING))
|
||||
if config.level == logging.DEBUG:
|
||||
stdout.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT))
|
||||
else:
|
||||
stdout.setFormatter(AGPTFormatter(SIMPLE_LOG_FORMAT))
|
||||
if not structured_logging:
|
||||
stdout = logging.StreamHandler(stream=sys.stdout)
|
||||
stdout.setLevel(config.level)
|
||||
stdout.addFilter(BelowLevelFilter(logging.WARNING))
|
||||
if config.level == logging.DEBUG:
|
||||
stdout.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT))
|
||||
else:
|
||||
stdout.setFormatter(AGPTFormatter(SIMPLE_LOG_FORMAT))
|
||||
|
||||
stderr = logging.StreamHandler()
|
||||
stderr.setLevel(logging.WARNING)
|
||||
if config.level == logging.DEBUG:
|
||||
stderr.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT))
|
||||
else:
|
||||
stderr.setFormatter(AGPTFormatter(SIMPLE_LOG_FORMAT))
|
||||
stderr = logging.StreamHandler()
|
||||
stderr.setLevel(logging.WARNING)
|
||||
if config.level == logging.DEBUG:
|
||||
stderr.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT))
|
||||
else:
|
||||
stderr.setFormatter(AGPTFormatter(SIMPLE_LOG_FORMAT))
|
||||
|
||||
log_handlers += [stdout, stderr]
|
||||
log_handlers += [stdout, stderr]
|
||||
|
||||
# Cloud logging setup
|
||||
if config.enable_cloud_logging or force_cloud_logging:
|
||||
import google.cloud.logging
|
||||
from google.cloud.logging.handlers import CloudLoggingHandler
|
||||
from google.cloud.logging_v2.handlers.transports import (
|
||||
BackgroundThreadTransport,
|
||||
)
|
||||
else:
|
||||
# Use Google Cloud Structured Log Handler. Log entries are printed to stdout
|
||||
# in a JSON format which is automatically picked up by Google Cloud Logging.
|
||||
from google.cloud.logging.handlers import StructuredLogHandler
|
||||
|
||||
client = google.cloud.logging.Client()
|
||||
# Use BackgroundThreadTransport to prevent blocking the main thread
|
||||
# and deadlocks when gRPC calls to Google Cloud Logging hang
|
||||
cloud_handler = CloudLoggingHandler(
|
||||
client,
|
||||
name="autogpt_logs",
|
||||
transport=BackgroundThreadTransport,
|
||||
)
|
||||
cloud_handler.setLevel(config.level)
|
||||
log_handlers.append(cloud_handler)
|
||||
structured_log_handler = StructuredLogHandler(stream=sys.stdout)
|
||||
structured_log_handler.setLevel(config.level)
|
||||
log_handlers.append(structured_log_handler)
|
||||
|
||||
# File logging setup
|
||||
if config.enable_file_logging:
|
||||
@@ -185,7 +179,13 @@ def configure_logging(force_cloud_logging: bool = False) -> None:
|
||||
|
||||
# Configure the root logger
|
||||
logging.basicConfig(
|
||||
format=DEBUG_LOG_FORMAT if config.level == logging.DEBUG else SIMPLE_LOG_FORMAT,
|
||||
format=(
|
||||
"%(levelname)s %(message)s"
|
||||
if structured_logging
|
||||
else (
|
||||
DEBUG_LOG_FORMAT if config.level == logging.DEBUG else SIMPLE_LOG_FORMAT
|
||||
)
|
||||
),
|
||||
level=config.level,
|
||||
handlers=log_handlers,
|
||||
)
|
||||
|
||||
@@ -134,13 +134,6 @@ POSTMARK_WEBHOOK_TOKEN=
|
||||
# Error Tracking
|
||||
SENTRY_DSN=
|
||||
|
||||
# Cloudflare Turnstile (CAPTCHA) Configuration
|
||||
# Get these from the Cloudflare Turnstile dashboard: https://dash.cloudflare.com/?to=/:account/turnstile
|
||||
# This is the backend secret key
|
||||
TURNSTILE_SECRET_KEY=
|
||||
# This is the verify URL
|
||||
TURNSTILE_VERIFY_URL=https://challenges.cloudflare.com/turnstile/v0/siteverify
|
||||
|
||||
# Feature Flags
|
||||
LAUNCH_DARKLY_SDK_KEY=
|
||||
|
||||
|
||||
@@ -47,6 +47,7 @@ RUN poetry install --no-ansi --no-root
|
||||
|
||||
# Generate Prisma client
|
||||
COPY autogpt_platform/backend/schema.prisma ./
|
||||
COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/partial_types.py
|
||||
RUN poetry run prisma generate
|
||||
|
||||
FROM debian:13-slim AS server_dependencies
|
||||
@@ -92,6 +93,7 @@ FROM server_dependencies AS migrate
|
||||
|
||||
# Migration stage only needs schema and migrations - much lighter than full backend
|
||||
COPY autogpt_platform/backend/schema.prisma /app/autogpt_platform/backend/
|
||||
COPY autogpt_platform/backend/backend/data/partial_types.py /app/autogpt_platform/backend/backend/data/partial_types.py
|
||||
COPY autogpt_platform/backend/migrations /app/autogpt_platform/backend/migrations
|
||||
|
||||
FROM server_dependencies AS server
|
||||
|
||||
@@ -108,7 +108,7 @@ import fastapi.testclient
|
||||
import pytest
|
||||
from pytest_snapshot.plugin import Snapshot
|
||||
|
||||
from backend.server.v2.myroute import router
|
||||
from backend.api.features.myroute import router
|
||||
|
||||
app = fastapi.FastAPI()
|
||||
app.include_router(router)
|
||||
@@ -149,7 +149,7 @@ These provide the easiest way to set up authentication mocking in test modules:
|
||||
import fastapi
|
||||
import fastapi.testclient
|
||||
import pytest
|
||||
from backend.server.v2.myroute import router
|
||||
from backend.api.features.myroute import router
|
||||
|
||||
app = fastapi.FastAPI()
|
||||
app.include_router(router)
|
||||
|
||||
242
autogpt_platform/backend/agents/StoreAgent_rows.csv
Normal file
242
autogpt_platform/backend/agents/StoreAgent_rows.csv
Normal file
@@ -0,0 +1,242 @@
|
||||
listing_id,storeListingVersionId,slug,agent_name,agent_video,agent_image,featured,sub_heading,description,categories,useForOnboarding,is_available
|
||||
6e60a900-9d7d-490e-9af2-a194827ed632,d85882b8-633f-44ce-a315-c20a8c123d19,flux-ai-image-generator,Flux AI Image Generator,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/ca154dd1-140e-454c-91bd-2d8a00de3f08.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/577d995d-bc38-40a9-a23f-1f30f5774bdb.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/415db1b7-115c-43ab-bd6c-4e9f7ef95be1.jpg""]",false,Transform ideas into breathtaking images,"Transform ideas into breathtaking images with this AI-powered Image Generator. Using cutting-edge Flux AI technology, the tool crafts highly detailed, photorealistic visuals from simple text prompts. Perfect for artists, marketers, and content creators, this generator produces unique images tailored to user specifications. From fantastical scenes to lifelike portraits, users can unleash creativity with professional-quality results in seconds. Easy to use and endlessly versatile, bring imagination to life with the AI Image Generator today!","[""creative""]",false,true
|
||||
f11fc6e9-6166-4676-ac5d-f07127b270c1,c775f60d-b99f-418b-8fe0-53172258c3ce,youtube-transcription-scraper,YouTube Transcription Scraper,https://youtu.be/H8S3pU68lGE,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/65bce54b-0124-4b0d-9e3e-f9b89d0dc99e.jpg""]",false,Fetch the transcriptions from the most popular YouTube videos in your chosen topic,"Effortlessly gather transcriptions from multiple YouTube videos with this agent. It scrapes and compiles video transcripts into a clean, organized list, making it easy to extract insights, quotes, or content from various sources in one go. Ideal for researchers, content creators, and marketers looking to quickly analyze or repurpose video content.","[""writing""]",false,true
|
||||
17908889-b599-4010-8e4f-bed19b8f3446,6e16e65a-ad34-4108-b4fd-4a23fced5ea2,business-ownerceo-finder,Decision Maker Lead Finder,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/1020d94e-b6a2-4fa7-bbdf-2c218b0de563.jpg""]",false,Contact CEOs today,"Find the key decision-makers you need, fast.
|
||||
|
||||
This agent identifies business owners or CEOs of local companies in any area you choose. Simply enter what kind of businesses you’re looking for and where, and it will:
|
||||
|
||||
* Search the area and gather public information
|
||||
* Return names, roles, and contact details when available
|
||||
* Provide smart Google search suggestions if details aren’t found
|
||||
|
||||
Perfect for:
|
||||
|
||||
* B2B sales teams seeking verified leads
|
||||
* Recruiters sourcing local talent
|
||||
* Researchers looking to connect with business leaders
|
||||
|
||||
Save hours of manual searching and get straight to the people who matter most.","[""business""]",true,true
|
||||
72beca1d-45ea-4403-a7ce-e2af168ee428,415b7352-0dc6-4214-9d87-0ad3751b711d,smart-meeting-brief,Smart Meeting Prep,https://youtu.be/9ydZR2hkxaY,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/2f116ce1-63ae-4d39-a5cd-f514defc2b97.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/0a71a60a-2263-4f12-9836-9c76ab49f155.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/95327695-9184-403c-907a-a9d3bdafa6a5.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/2bc77788-790b-47d4-8a61-ce97b695e9f5.png""]",true,Business meeting briefings delivered daily,"Never walk into a meeting unprepared again. Every day at 4 pm, the Smart Meeting Prep Agent scans your calendar for tomorrow's external meetings. It reviews your past email exchanges, researches each participant's background and role, and compiles the insights into a concise briefing, so you can close your workday ready for tomorrow's calls.
|
||||
|
||||
How It Works
|
||||
1. At 4 pm, the agent scans your calendar and identifies external meetings scheduled for the next day.
|
||||
2. It reviews recent email threads with each participant to surface key relationship history and communication context.
|
||||
3. It conducts online research to gather publicly available information on roles, company backgrounds, and relevant professional data.
|
||||
4. It produces a unified briefing for each participant, including past exchange highlights, profile notes, and strategic conversation points.","[""personal""]",true,true
|
||||
9fa5697a-617b-4fae-aea0-7dbbed279976,b8ceb480-a7a2-4c90-8513-181a49f7071f,automated-support-ai,Automated Support Agent,https://youtu.be/nBMfu_5sgDA,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/ed56febc-2205-4179-9e7e-505d8500b66c.png""]",true,Automate up to 80 percent of inbound support emails,"Overview:
|
||||
Support teams spend countless hours on basic tickets. This agent automates repetitive customer support tasks. It reads incoming requests, researches your knowledge base, and responds automatically when confident. When unsure, it escalates to a human for final resolution.
|
||||
|
||||
How it Works:
|
||||
New support emails are routed to the agent.
|
||||
The agent checks internal documentation for answers.
|
||||
It measures confidence in the answer found and either replies directly or escalates to a human.
|
||||
|
||||
Business Value:
|
||||
Automating the easy 80 percent of support tickets allows your team to focus on high-value, complex customer issues, improving efficiency and response times.","[""business""]",false,true
|
||||
2bdac92b-a12c-4131-bb46-0e3b89f61413,31daf49d-31d3-476b-aa4c-099abc59b458,unspirational-poster-maker,Unspirational Poster Maker,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/6a490dac-27e5-405f-a4c4-8d1c55b85060.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/d343fbb5-478c-4e38-94df-4337293b61f1.jpg""]",false,Because adulting is hard,"This witty AI agent generates hilariously relatable ""motivational"" posters that tackle the everyday struggles of procrastination, overthinking, and workplace chaos with a blend of absurdity and sarcasm. From goldfish facing impossible tasks to cats in existential crises, The Unspirational Poster Maker designs tongue-in-cheek graphics and captions that mock productivity clichés and embrace our collective struggles to ""get it together."" Perfect for adding a touch of humour to the workday, these posters remind us that sometimes, all we can do is laugh at the chaos.","[""creative""]",false,true
|
||||
9adf005e-2854-4cc7-98cf-f7103b92a7b7,a03b0d8c-4751-43d6-a54e-c3b7856ba4e3,ai-shortform-video-generator-create-viral-ready-content,AI Video Generator,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/8d2670b9-fea5-4966-a597-0a4511bffdc3.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/aabe8aec-0110-4ce7-a259-4f86fe8fe07d.png""]",false,Create Viral-Ready Shorts Content in Seconds,"OVERVIEW
|
||||
Transform any trending headline or broad topic into a polished, vertical short-form video in a single run.
|
||||
The agent automates research, scriptwriting, metadata creation, and Revid.ai rendering, returning one ready-to-publish MP4 plus its title, script and hashtags.
|
||||
|
||||
HOW IT WORKS
|
||||
1. Input a topic or an exact news headline.
|
||||
2. The agent fetches live search results and selects the most engaging related story.
|
||||
3. Key facts are summarised into concise research notes.
|
||||
4. Claude writes a 30–35 second script with visual cues, a three-second hook, tension loops, and a call-to-action.
|
||||
5. GPT-4o generates an eye-catching title and one or two discoverability hashtags.
|
||||
6. The script is sent to a state-of-the-art AI video generator to render a single 9:16 MP4 (default: 720 p, 30 fps, voice “Brian”, style “movingImage”, music “Bladerunner 2049”).
|
||||
– All voice, style and resolution settings can be adjusted in the Builder before you press ""Run"".
|
||||
7. Output delivered: Title, Script, Hashtags, Video URL.
|
||||
|
||||
KEY USE CASES
|
||||
- Broad-topic explainers (e.g. “Artificial Intelligence” or “Climate Tech”).
|
||||
- Real-time newsjacking with a specific breaking headline.
|
||||
- Product-launch spotlights and quick event recaps while interest is high.
|
||||
|
||||
BUSINESS VALUE
|
||||
- One-click speed: from idea to finished video in minutes.
|
||||
- Consistent brand look: Revid presets keep voice, style and aspect ratio on spec.
|
||||
- No-code workflow: marketers create social video without design or development queues.
|
||||
- Cloud convenience: Auto-GPT Cloud users are pre-configured with all required keys.
|
||||
Self-hosted users simply add OpenAI, Anthropic, Perplexity (OpenRouter/Jina) and Revid keys once.
|
||||
|
||||
IMPORTANT NOTES
|
||||
- The agent outputs exactly one video per execution. Run it again for additional shorts.
|
||||
- Video rendering time varies; AI-generated footage may take several minutes.","[""writing""]",false,true
|
||||
864e48ef-fee5-42c1-b6a4-2ae139db9fc1,55d40473-0f31-4ada-9e40-d3a7139fcbd4,automated-blog-writer,Automated SEO Blog Writer,https://youtu.be/nKcDCbDVobs,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/2dd5f95b-5b30-4bf8-a11b-bac776c5141a.jpg""]",true,"Automate research, writing, and publishing for high-ranking blog posts","Scale your blog with a fully automated content engine. The Automated SEO Blog Writer learns your brand voice, finds high-demand keywords, and creates SEO-optimized articles that attract organic traffic and boost visibility.
|
||||
|
||||
How it works:
|
||||
|
||||
1. Share your pitch, website, and values.
|
||||
2. The agent studies your site and uncovers proven SEO opportunities.
|
||||
3. It spends two hours researching and drafting each post.
|
||||
4. You set the cadence—publishing runs on autopilot.
|
||||
|
||||
Business value: Consistently publish research-backed, optimized posts that build domain authority, rankings, and thought leadership while you focus on what matters most.
|
||||
|
||||
Use cases:
|
||||
• Founders: Keep your blog active with no time drain.
|
||||
• Agencies: Deliver scalable SEO content for clients.
|
||||
• Strategists: Automate execution, focus on strategy.
|
||||
• Marketers: Drive steady organic growth.
|
||||
• Local businesses: Capture nearby search traffic.","[""writing""]",false,true
|
||||
6046f42e-eb84-406f-bae0-8e052064a4fa,a548e507-09a7-4b30-909c-f63fcda10fff,lead-finder-local-businesses,Lead Finder,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/abd6605f-d5f8-426b-af36-052e8ba5044f.webp""]",false,Auto-Prospect Like a Pro,"Turbo-charge your local lead generation with the AutoGPT Marketplace’s top Google Maps prospecting agent. “Lead Finder: Local Businesses” delivers verified, ready-to-contact prospects in any niche and city—so you can focus on closing, not searching.
|
||||
|
||||
**WHAT IT DOES**
|
||||
• Searches Google Maps via the official API (no scraping)
|
||||
• Prompts like “dentists in Chicago” or “coffee shops near me”
|
||||
• Returns: Name, Website, Rating, Reviews, **Phone & Address**
|
||||
• Exports instantly to your CRM, sheet, or outreach workflow
|
||||
|
||||
**WHY YOU’LL LOVE IT**
|
||||
✓ Hyper-targeted leads in minutes
|
||||
✓ Unlimited searches & locations
|
||||
✓ Zero CAPTCHAs or IP blocks
|
||||
✓ Works on AutoGPT Cloud or self-hosted (with your API key)
|
||||
✓ Cut prospecting time by 90%
|
||||
|
||||
**PERFECT FOR**
|
||||
— Marketers & PPC agencies
|
||||
— SEO consultants & designers
|
||||
— SaaS founders & sales teams
|
||||
|
||||
Stop scrolling directories—start filling your pipeline. Start now and let AI prospect while you profit.
|
||||
|
||||
→ Click *Add to Library* and own your market today.","[""business""]",true,true
|
||||
f623c862-24e9-44fc-8ce8-d8282bb51ad2,eafa21d3-bf14-4f63-a97f-a5ee41df83b3,linkedin-post-generator,LinkedIn Post Generator,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/297f6a8e-81a8-43e2-b106-c7ad4a5662df.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/fceebdc1-aef6-4000-97fc-4ef587f56bda.png""]",false,Auto‑craft LinkedIn gold,"Create research‑driven, high‑impact LinkedIn posts in minutes. This agent searches YouTube for the best videos on your chosen topic, pulls their transcripts, and distils the most valuable insights into a polished post ready for your company page or personal feed.
|
||||
|
||||
FEATURES
|
||||
• Automated YouTube research – discovers and analyses top‑ranked videos so you don’t have to
|
||||
• AI‑curated synthesis – combines multiple transcripts into one authoritative narrative
|
||||
• Full creative control – adjust style, tone, objective, opinion, clarity, target word count and number of videos
|
||||
• LinkedIn‑optimised output – hook, 2‑3 key points, CTA, strategic line breaks, 3‑5 hashtags, no markdown
|
||||
• One‑click publish – returns a ready‑to‑post text block (≤1 300 characters)
|
||||
|
||||
HOW IT WORKS
|
||||
1. Enter a topic and your preferred writing parameters.
|
||||
2. The agent builds a YouTube search, fetches the page, and extracts the top N video URLs.
|
||||
3. It pulls each transcript, then feeds them—plus your settings—into Claude 3.5 Sonnet.
|
||||
4. The model writes a concise, engaging post designed for maximum LinkedIn engagement.
|
||||
|
||||
USE CASES
|
||||
• Thought‑leadership updates backed by fresh video research
|
||||
• Rapid industry summaries after major events, webinars, or conferences
|
||||
• Consistent LinkedIn content for busy founders, marketers, and creators
|
||||
|
||||
WHY YOU’LL LOVE IT
|
||||
Save hours of manual research, avoid surface‑level hot‑takes, and publish posts that showcase real expertise—without the heavy lift.","[""writing""]",true,true
|
||||
7d4120ad-b6b3-4419-8bdb-7dd7d350ef32,e7bb29a1-23c7-4fee-aa3b-5426174b8c52,youtube-to-linkedin-post-converter,YouTube to LinkedIn Post Converter,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/f084b326-a708-4396-be51-7ba59ad2ef32.png""]",false,Transform Your YouTube Videos into Engaging LinkedIn Posts with AI,"WHAT IT DOES:
|
||||
This agent converts YouTube video content into a LinkedIn post by analyzing the video's transcript. It provides you with a tailored post that reflects the core ideas, key takeaways, and tone of the original video, optimizing it for engagement on LinkedIn.
|
||||
|
||||
HOW IT WORKS:
|
||||
- You provide the URL to the YouTube video (required)
|
||||
- You can choose the structure for the LinkedIn post (e.g., Personal Achievement Story, Lesson Learned, Thought Leadership, etc.)
|
||||
- You can also select the tone (e.g., Inspirational, Analytical, Conversational, etc.)
|
||||
- The transcript of the video is analyzed by the GPT-4 model and the Claude 3.5 Sonnet model
|
||||
- The models extract key insights, memorable quotes, and the main points from the video
|
||||
- You’ll receive a LinkedIn post, formatted according to your chosen structure and tone, optimized for professional engagement
|
||||
|
||||
INPUTS:
|
||||
- Source YouTube Video – Provide the URL to the YouTube video
|
||||
- Structure – Choose the post format (e.g., Personal Achievement Story, Thought Leadership, etc.)
|
||||
- Content – Specify the main message or idea of the post (e.g., Hot Take, Key Takeaways, etc.)
|
||||
- Tone – Select the tone for the post (e.g., Conversational, Inspirational, etc.)
|
||||
|
||||
OUTPUT:
|
||||
- LinkedIn Post – A well-crafted, AI-generated LinkedIn post with a professional tone, based on the video content and your specified preferences
|
||||
|
||||
Perfect for content creators, marketers, and professionals who want to repurpose YouTube videos for LinkedIn and boost their professional branding.","[""writing""]",false,true
|
||||
c61d6a83-ea48-4df8-b447-3da2d9fe5814,00fdd42c-a14c-4d19-a567-65374ea0e87f,personalized-morning-coffee-newsletter,Personal Newsletter,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/f4b38e4c-8166-4caf-9411-96c9c4c82d4c.png""]",false,Start your day with personalized AI newsletters that deliver credibility and context for every interest or mood.,"This Personal Newsletter Agent provides a bespoke daily digest on your favorite topics and tone. Whether you prefer industry insights, lighthearted reads, or breaking news, this agent crafts your own unique newsletter to keep you informed and entertained.
|
||||
|
||||
|
||||
How It Works
|
||||
1. Enter your favorite topics, industries, or areas of interest.
|
||||
2. Choose your tone—professional, casual, or humorous.
|
||||
3. Set your preferred delivery cadence: daily or weekly.
|
||||
4. The agent scans top sources and compiles 3–5 engaging stories, insights, and fun facts into a conversational newsletter.
|
||||
|
||||
Skip the morning scroll and enjoy a thoughtfully curated newsletter designed just for you. Stay ahead of trends, spark creative ideas, and enjoy an effortless, informed start to your day.
|
||||
|
||||
|
||||
Use Cases
|
||||
• Executives: Get a daily digest of market updates and leadership insights.
|
||||
• Marketers: Receive curated creative trends and campaign inspiration.
|
||||
• Entrepreneurs: Stay updated on your industry without information overload.","[""research""]",true,true
|
||||
e2e49cfc-4a39-4d62-a6b3-c095f6d025ff,fc2c9976-0962-4625-a27b-d316573a9e7f,email-address-finder,Email Scout - Contact Finder Assistant,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/da8a690a-7a8b-4c1d-b6f8-e2f840c0205d.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/6a2ac25c-1609-4881-8140-e6da2421afb3.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/26179263-fe06-45bd-b6a0-0754660a0a46.jpg""]",false,Find contact details from name and location using AI search,"Finding someone's professional email address can be time-consuming and frustrating. Manual searching across multiple websites, social profiles, and business directories often leads to dead ends or outdated information.
|
||||
|
||||
Email Scout automates this process by intelligently searching across publicly available sources when you provide a person's name and location. Simply input basic information like ""Tim Cook, USA"" or ""Sarah Smith, London"" and let the AI assistant do the work of finding potential contact details.
|
||||
|
||||
Key Features:
|
||||
- Quick search from just name and location
|
||||
- Scans multiple public sources
|
||||
- Automated AI-powered search process
|
||||
- Easy to use with simple inputs
|
||||
|
||||
Perfect for recruiters, business development professionals, researchers, and anyone needing to establish professional contact.
|
||||
|
||||
Note: This tool searches only publicly available information. Search results depend on what contact information people have made public. Some searches may not yield results if the information isn't publicly accessible.","[""""]",false,true
|
||||
81bcc372-0922-4a36-bc35-f7b1e51d6939,e437cc95-e671-489d-b915-76561fba8c7f,ai-youtube-to-blog-converter,YouTube Video to SEO Blog Writer,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/239e5a41-2515-4e1c-96ef-31d0d37ecbeb.webp"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/c7d96966-786f-4be6-ad7d-3a51c84efc0e.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/0275a74c-e2c2-4e29-a6e4-3a616c3c35dd.png""]",false,One link. One click. One powerful blog post.,"Effortlessly transform your YouTube videos into high-quality, SEO-optimized blog posts.
|
||||
|
||||
Your videos deserve a second life—in writing.
|
||||
Make your content work twice as hard by repurposing it into engaging, searchable articles.
|
||||
|
||||
Perfect for content creators, marketers, and bloggers, this tool analyzes video content and generates well-structured blog posts tailored to your tone, audience, and word count. Just paste a YouTube URL and let the AI handle the rest.
|
||||
|
||||
FEATURES
|
||||
|
||||
• CONTENT ANALYSIS
|
||||
Extracts key points from the video while preserving your message and intent.
|
||||
|
||||
• CUSTOMIZABLE OUTPUT
|
||||
Select a tone that fits your audience: casual, professional, educational, or formal.
|
||||
|
||||
• SEO OPTIMIZATION
|
||||
Automatically creates engaging titles and structured subheadings for better search visibility.
|
||||
|
||||
• USER-FRIENDLY
|
||||
Repurpose your videos into written content to expand your reach and improve accessibility.
|
||||
|
||||
Whether you're looking to grow your blog, boost SEO, or simply get more out of your content, the AI YouTube-to-Blog Converter makes it effortless.
|
||||
","[""writing""]",true,true
|
||||
5c3510d2-fc8b-4053-8e19-67f53c86eb1a,f2cc74bb-f43f-4395-9c35-ecb30b5b4fc9,ai-webpage-copy-improver,AI Webpage Copy Improver,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/d562d26f-5891-4b09-8859-fbb205972313.jpg""]",false,Boost Your Website's Search Engine Performance,"Elevate your web content with this powerful AI Webpage Copy Improver. Designed for marketers, SEO specialists, and web developers, this tool analyses and enhances website copy for maximum impact. Using advanced language models, it optimizes text for better clarity, SEO performance, and increased conversion rates. The AI examines your existing content, identifies areas for improvement, and generates refined copy that maintains your brand voice while boosting engagement. From homepage headlines to product descriptions, transform your web presence with AI-driven insights. Improve readability, incorporate targeted keywords, and craft compelling calls-to-action - all with the click of a button. Take your digital marketing to the next level with the AI Webpage Copy Improver.","[""marketing""]",true,true
|
||||
94d03bd3-7d44-4d47-b60c-edb2f89508d6,b6f6f0d3-49f4-4e3b-8155-ffe9141b32c0,domain-name-finder,Domain Name Finder,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/28545e09-b2b8-4916-b4c6-67f982510a78.jpeg""]",false,Instantly generate brand-ready domain names that are actually available,"Overview:
|
||||
Finding a domain name that fits your brand shouldn’t take hours of searching and failed checks. The Domain Name Finder Agent turns your pitch into hundreds of creative, brand-ready domain ideas—filtered by live availability so every result is actionable.
|
||||
|
||||
How It Works
|
||||
1. Input your product pitch, company name, or core keywords.
|
||||
2. The agent analyzes brand tone, audience, and industry context.
|
||||
3. It generates a list of unique, memorable domains that match your criteria.
|
||||
4. All names are pre-filtered for real-time availability, so you can register immediately.
|
||||
|
||||
|
||||
Business Value
|
||||
Save hours of guesswork and eliminate dead ends. Accelerate brand launches, startup naming, and campaign creation with ready-to-claim domains.
|
||||
|
||||
|
||||
Key Use Cases
|
||||
• Startup Founders: Quickly find brand-ready domains for MVP launches or rebrands.
|
||||
• Marketers: Test name options across campaigns with instant availability data.
|
||||
• Entrepreneurs: Validate ideas faster with instant domain options.","[""business""]",false,true
|
||||
7a831906-daab-426f-9d66-bcf98d869426,516d813b-d1bc-470f-add7-c63a4b2c2bad,ai-function,AI Function,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/620e8117-2ee1-4384-89e6-c2ef4ec3d9c9.webp"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/476259e2-5a79-4a7b-8e70-deeebfca70d7.png""]",false,Never Code Again,"AI FUNCTION MAGIC
|
||||
Your AI‑powered assistant for turning plain‑English descriptions into working Python functions.
|
||||
|
||||
HOW IT WORKS
|
||||
1. Describe what the function should do.
|
||||
2. Specify the inputs it needs.
|
||||
3. Receive the generated Python code.
|
||||
|
||||
FEATURES
|
||||
- Effortless Function Generation: convert natural‑language specs into complete functions.
|
||||
- Customizable Inputs: define the parameters that matter to you.
|
||||
- Versatile Use Cases: simulate data, automate tasks, prototype ideas.
|
||||
- Seamless Integration: add the generated function directly to your codebase.
|
||||
|
||||
EXAMPLE
|
||||
Request: “Create a function that generates 20 examples of fake people, each with a name, date of birth, job title, and age.”
|
||||
Input parameter: number_of_people (default 20)
|
||||
Result: a list of dictionaries such as
|
||||
[
|
||||
{ ""name"": ""Emma Martinez"", ""date_of_birth"": ""1992‑11‑03"", ""job_title"": ""Data Analyst"", ""age"": 32 },
|
||||
{ ""name"": ""Liam O’Connor"", ""date_of_birth"": ""1985‑07‑19"", ""job_title"": ""Marketing Manager"", ""age"": 39 },
|
||||
…18 more entries…
|
||||
]","[""development""]",false,true
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,590 @@
|
||||
{
|
||||
"id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
|
||||
"version": 29,
|
||||
"is_active": false,
|
||||
"name": "Unspirational Poster Maker",
|
||||
"description": "This witty AI agent generates hilariously relatable \"motivational\" posters that tackle the everyday struggles of procrastination, overthinking, and workplace chaos with a blend of absurdity and sarcasm. From goldfish facing impossible tasks to cats in existential crises, The Unspirational Poster Maker designs tongue-in-cheek graphics and captions that mock productivity clich\u00e9s and embrace our collective struggles to \"get it together.\" Perfect for adding a touch of humour to the workday, these posters remind us that sometimes, all we can do is laugh at the chaos.",
|
||||
"instructions": null,
|
||||
"recommended_schedule_cron": null,
|
||||
"nodes": [
|
||||
{
|
||||
"id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
"input_default": {
|
||||
"name": "Generated Image",
|
||||
"description": "The resulting generated image ready for you to review and post."
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 2329.937006807125,
|
||||
"y": 80.49068076698347
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "c6c511e8-e6a4-4969-9bc8-f67d60c1e229",
|
||||
"source_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "20845dda-91de-4508-8077-0504b1a5ae03",
|
||||
"source_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "6524c611-774b-45e9-899d-9a6aa80c549c",
|
||||
"source_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "714a0821-e5ba-4af7-9432-50491adda7b1",
|
||||
"source_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [],
|
||||
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "7e026d19-f9a6-412f-8082-610f9ba0c410",
|
||||
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
"input_default": {
|
||||
"name": "Theme",
|
||||
"value": "Cooking"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": -1219.5966324967521,
|
||||
"y": 80.50339731789956
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "8c2bd1f7-b17b-4835-81b6-bb336097aa7a",
|
||||
"source_id": "7e026d19-f9a6-412f-8082-610f9ba0c410",
|
||||
"sink_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_THEME",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "28bda769-b88b-44c9-be5c-52c2667f137e",
|
||||
"block_id": "6ab085e2-20b3-4055-bc3e-08036e01eca6",
|
||||
"input_default": {
|
||||
"upscale": "No Upscale"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1132.373897280427,
|
||||
"y": 88.44610377514573
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "54588c74-e090-4e49-89e4-844b9952a585",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "20845dda-91de-4508-8077-0504b1a5ae03",
|
||||
"source_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
|
||||
"block_id": "6ab085e2-20b3-4055-bc3e-08036e01eca6",
|
||||
"input_default": {
|
||||
"upscale": "No Upscale"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 590.7543882245375,
|
||||
"y": 85.69546832466654
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "66646786-3006-4417-a6b7-0158f2603d1d",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "6524c611-774b-45e9-899d-9a6aa80c549c",
|
||||
"source_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "576c5677-9050-4d1c-aad4-36b820c04fef",
|
||||
"block_id": "6ab085e2-20b3-4055-bc3e-08036e01eca6",
|
||||
"input_default": {
|
||||
"upscale": "No Upscale"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 60.48904654237981,
|
||||
"y": 86.06183359510214
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "201d3e03-bc06-4cee-846d-4c3c804d8857",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "714a0821-e5ba-4af7-9432-50491adda7b1",
|
||||
"source_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
|
||||
"block_id": "6ab085e2-20b3-4055-bc3e-08036e01eca6",
|
||||
"input_default": {
|
||||
"prompt": "A cat sprawled dramatically across an important-looking document during a work-from-home meeting, making direct eye contact with the camera while knocking over a coffee mug in slow motion. Text Overlay: \"Chaos is a career path. Be the obstacle everyone has to work around.\"",
|
||||
"upscale": "No Upscale"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1668.3572666956795,
|
||||
"y": 89.69665262457966
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "509b7587-1940-4a06-808d-edde9a74f400",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "c6c511e8-e6a4-4969-9bc8-f67d60c1e229",
|
||||
"source_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
|
||||
"input_default": {
|
||||
"model": "gpt-4o",
|
||||
"prompt": "<example_output>\nA photo of a sloth lounging on a desk, with its head resting on a keyboard. The keyboard is on top of a laptop with a blank spreadsheet open. A to-do list is placed beside the laptop, with the top item written as \"Do literally anything\". There is a text overlay that says \"If you can't outwork them, outnap them.\".\n</example_output>\n\nCreate a relatable satirical, snarky, user-deprecating motivational style image based on the theme: \"{{THEME}}\".\n\nOutput only the image description and caption, without any additional commentary or formatting.",
|
||||
"prompt_values": {}
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": -561.1139207164056,
|
||||
"y": 78.60434452403524
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "8c2bd1f7-b17b-4835-81b6-bb336097aa7a",
|
||||
"source_id": "7e026d19-f9a6-412f-8082-610f9ba0c410",
|
||||
"sink_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_THEME",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "54588c74-e090-4e49-89e4-844b9952a585",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "201d3e03-bc06-4cee-846d-4c3c804d8857",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "509b7587-1940-4a06-808d-edde9a74f400",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "66646786-3006-4417-a6b7-0158f2603d1d",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
{
|
||||
"id": "66646786-3006-4417-a6b7-0158f2603d1d",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "c6c511e8-e6a4-4969-9bc8-f67d60c1e229",
|
||||
"source_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "6524c611-774b-45e9-899d-9a6aa80c549c",
|
||||
"source_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "20845dda-91de-4508-8077-0504b1a5ae03",
|
||||
"source_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "8c2bd1f7-b17b-4835-81b6-bb336097aa7a",
|
||||
"source_id": "7e026d19-f9a6-412f-8082-610f9ba0c410",
|
||||
"sink_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_THEME",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "201d3e03-bc06-4cee-846d-4c3c804d8857",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "714a0821-e5ba-4af7-9432-50491adda7b1",
|
||||
"source_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "54588c74-e090-4e49-89e4-844b9952a585",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "509b7587-1940-4a06-808d-edde9a74f400",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"forked_from_id": null,
|
||||
"forked_from_version": null,
|
||||
"sub_graphs": [],
|
||||
"user_id": "",
|
||||
"created_at": "2024-12-20T19:58:34.390Z",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Theme": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Theme",
|
||||
"default": "Cooking"
|
||||
}
|
||||
},
|
||||
"required": []
|
||||
},
|
||||
"output_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Generated Image": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Generated Image",
|
||||
"description": "The resulting generated image ready for you to review and post."
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"Generated Image"
|
||||
]
|
||||
},
|
||||
"has_external_trigger": false,
|
||||
"has_human_in_the_loop": false,
|
||||
"trigger_setup_info": null,
|
||||
"credentials_input_schema": {
|
||||
"properties": {
|
||||
"ideogram_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"ideogram"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "ideogram",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.IDEOGRAM: 'ideogram'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator_values": []
|
||||
},
|
||||
"openai_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"openai"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "openai",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.OPENAI: 'openai'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator": "model",
|
||||
"discriminator_mapping": {
|
||||
"Llama-3.3-70B-Instruct": "llama_api",
|
||||
"Llama-3.3-8B-Instruct": "llama_api",
|
||||
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
|
||||
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
|
||||
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
|
||||
"amazon/nova-lite-v1": "open_router",
|
||||
"amazon/nova-micro-v1": "open_router",
|
||||
"amazon/nova-pro-v1": "open_router",
|
||||
"claude-3-7-sonnet-20250219": "anthropic",
|
||||
"claude-3-haiku-20240307": "anthropic",
|
||||
"claude-haiku-4-5-20251001": "anthropic",
|
||||
"claude-opus-4-1-20250805": "anthropic",
|
||||
"claude-opus-4-20250514": "anthropic",
|
||||
"claude-opus-4-5-20251101": "anthropic",
|
||||
"claude-sonnet-4-20250514": "anthropic",
|
||||
"claude-sonnet-4-5-20250929": "anthropic",
|
||||
"cohere/command-r-08-2024": "open_router",
|
||||
"cohere/command-r-plus-08-2024": "open_router",
|
||||
"deepseek/deepseek-chat": "open_router",
|
||||
"deepseek/deepseek-r1-0528": "open_router",
|
||||
"dolphin-mistral:latest": "ollama",
|
||||
"google/gemini-2.0-flash-001": "open_router",
|
||||
"google/gemini-2.0-flash-lite-001": "open_router",
|
||||
"google/gemini-2.5-flash": "open_router",
|
||||
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
|
||||
"google/gemini-2.5-pro-preview-03-25": "open_router",
|
||||
"google/gemini-3-pro-preview": "open_router",
|
||||
"gpt-3.5-turbo": "openai",
|
||||
"gpt-4-turbo": "openai",
|
||||
"gpt-4.1-2025-04-14": "openai",
|
||||
"gpt-4.1-mini-2025-04-14": "openai",
|
||||
"gpt-4o": "openai",
|
||||
"gpt-4o-mini": "openai",
|
||||
"gpt-5-2025-08-07": "openai",
|
||||
"gpt-5-chat-latest": "openai",
|
||||
"gpt-5-mini-2025-08-07": "openai",
|
||||
"gpt-5-nano-2025-08-07": "openai",
|
||||
"gpt-5.1-2025-11-13": "openai",
|
||||
"gryphe/mythomax-l2-13b": "open_router",
|
||||
"llama-3.1-8b-instant": "groq",
|
||||
"llama-3.3-70b-versatile": "groq",
|
||||
"llama3": "ollama",
|
||||
"llama3.1:405b": "ollama",
|
||||
"llama3.2": "ollama",
|
||||
"llama3.3": "ollama",
|
||||
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/llama-4-maverick": "open_router",
|
||||
"meta-llama/llama-4-scout": "open_router",
|
||||
"microsoft/wizardlm-2-8x22b": "open_router",
|
||||
"mistralai/mistral-nemo": "open_router",
|
||||
"moonshotai/kimi-k2": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
|
||||
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
|
||||
"o1": "openai",
|
||||
"o1-mini": "openai",
|
||||
"o3-2025-04-16": "openai",
|
||||
"o3-mini": "openai",
|
||||
"openai/gpt-oss-120b": "open_router",
|
||||
"openai/gpt-oss-20b": "open_router",
|
||||
"perplexity/sonar": "open_router",
|
||||
"perplexity/sonar-deep-research": "open_router",
|
||||
"perplexity/sonar-pro": "open_router",
|
||||
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
|
||||
"qwen/qwen3-coder": "open_router",
|
||||
"v0-1.0-md": "v0",
|
||||
"v0-1.5-lg": "v0",
|
||||
"v0-1.5-md": "v0",
|
||||
"x-ai/grok-4": "open_router",
|
||||
"x-ai/grok-4-fast": "open_router",
|
||||
"x-ai/grok-4.1-fast": "open_router",
|
||||
"x-ai/grok-code-fast-1": "open_router"
|
||||
},
|
||||
"discriminator_values": [
|
||||
"gpt-4o"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"ideogram_api_key_credentials",
|
||||
"openai_api_key_credentials"
|
||||
],
|
||||
"title": "UnspirationalPosterMakerCredentialsInputSchema",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,447 @@
|
||||
{
|
||||
"id": "622849a7-5848-4838-894d-01f8f07e3fad",
|
||||
"version": 18,
|
||||
"is_active": true,
|
||||
"name": "AI Function",
|
||||
"description": "## AI-Powered Function Magic: Never code again!\nProvide a description of a python function and your inputs and AI will provide the results.",
|
||||
"instructions": null,
|
||||
"recommended_schedule_cron": null,
|
||||
"nodes": [
|
||||
{
|
||||
"id": "26ff2973-3f9a-451d-b902-d45e5da0a7fe",
|
||||
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
"input_default": {
|
||||
"name": "return",
|
||||
"title": null,
|
||||
"value": null,
|
||||
"format": "",
|
||||
"secret": false,
|
||||
"advanced": false,
|
||||
"description": "The value returned by the function"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1598.8622921127233,
|
||||
"y": 291.59140862204725
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "caecc1de-fdbc-4fd9-9570-074057bb15f9",
|
||||
"source_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"sink_id": "26ff2973-3f9a-451d-b902-d45e5da0a7fe",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [],
|
||||
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
|
||||
"graph_version": 18,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
|
||||
"input_default": {
|
||||
"model": "o3-mini",
|
||||
"retry": 3,
|
||||
"prompt": "{{ARGS}}",
|
||||
"sys_prompt": "You are now the following python function:\n\n```\n# {{DESCRIPTION}}\n{{FUNCTION}}\n```\n\nThe user will provide your input arguments.\nOnly respond with your `return` value.\nDo not include any commentary or additional text in your response. \nDo not include ``` backticks or any other decorators.",
|
||||
"ollama_host": "localhost:11434",
|
||||
"prompt_values": {}
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 995,
|
||||
"y": 290.50000000000006
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "dc7cb15f-76cc-4533-b96c-dd9e3f7f75ed",
|
||||
"source_id": "4eab3a55-20f2-4c1d-804c-7377ba8202d2",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_FUNCTION",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "093bdca5-9f44-42f9-8e1c-276dd2971675",
|
||||
"source_id": "844530de-2354-46d8-b748-67306b7bbca1",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_ARGS",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "6c63d8ee-b63d-4ff6-bae0-7db8f99bb7af",
|
||||
"source_id": "0fd6ef54-c1cd-478d-b764-17e40f882b99",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_DESCRIPTION",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "caecc1de-fdbc-4fd9-9570-074057bb15f9",
|
||||
"source_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"sink_id": "26ff2973-3f9a-451d-b902-d45e5da0a7fe",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
|
||||
"graph_version": 18,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "4eab3a55-20f2-4c1d-804c-7377ba8202d2",
|
||||
"block_id": "7fcd3bcb-8e1b-4e69-903d-32d3d4a92158",
|
||||
"input_default": {
|
||||
"name": "Function Definition",
|
||||
"title": null,
|
||||
"value": "def fake_people(n: int) -> list[dict]:",
|
||||
"secret": false,
|
||||
"advanced": false,
|
||||
"description": "The function definition (text). This is what you would type on the first line of the function when programming.\n\ne.g \"def fake_people(n: int) -> list[dict]:\"",
|
||||
"placeholder_values": []
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": -672.6908629664215,
|
||||
"y": 302.42044359789116
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "dc7cb15f-76cc-4533-b96c-dd9e3f7f75ed",
|
||||
"source_id": "4eab3a55-20f2-4c1d-804c-7377ba8202d2",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_FUNCTION",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
|
||||
"graph_version": 18,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "844530de-2354-46d8-b748-67306b7bbca1",
|
||||
"block_id": "7fcd3bcb-8e1b-4e69-903d-32d3d4a92158",
|
||||
"input_default": {
|
||||
"name": "Arguments",
|
||||
"title": null,
|
||||
"value": "20",
|
||||
"secret": false,
|
||||
"advanced": false,
|
||||
"description": "The function's inputs\n\ne.g \"20\"",
|
||||
"placeholder_values": []
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": -158.1623599617334,
|
||||
"y": 295.410856928333
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "093bdca5-9f44-42f9-8e1c-276dd2971675",
|
||||
"source_id": "844530de-2354-46d8-b748-67306b7bbca1",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_ARGS",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
|
||||
"graph_version": 18,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "0fd6ef54-c1cd-478d-b764-17e40f882b99",
|
||||
"block_id": "90a56ffb-7024-4b2b-ab50-e26c5e5ab8ba",
|
||||
"input_default": {
|
||||
"name": "Description",
|
||||
"title": null,
|
||||
"value": "Generates n examples of fake data representing people, each with a name, DoB, Job title, and an age.",
|
||||
"secret": false,
|
||||
"advanced": false,
|
||||
"description": "Describe what the function does.\n\ne.g \"Generates n examples of fake data representing people, each with a name, DoB, Job title, and an age.\"",
|
||||
"placeholder_values": []
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 374.4548658057796,
|
||||
"y": 290.3779121974126
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "6c63d8ee-b63d-4ff6-bae0-7db8f99bb7af",
|
||||
"source_id": "0fd6ef54-c1cd-478d-b764-17e40f882b99",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_DESCRIPTION",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
|
||||
"graph_version": 18,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
{
|
||||
"id": "caecc1de-fdbc-4fd9-9570-074057bb15f9",
|
||||
"source_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"sink_id": "26ff2973-3f9a-451d-b902-d45e5da0a7fe",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "6c63d8ee-b63d-4ff6-bae0-7db8f99bb7af",
|
||||
"source_id": "0fd6ef54-c1cd-478d-b764-17e40f882b99",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_DESCRIPTION",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "093bdca5-9f44-42f9-8e1c-276dd2971675",
|
||||
"source_id": "844530de-2354-46d8-b748-67306b7bbca1",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_ARGS",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "dc7cb15f-76cc-4533-b96c-dd9e3f7f75ed",
|
||||
"source_id": "4eab3a55-20f2-4c1d-804c-7377ba8202d2",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_FUNCTION",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"forked_from_id": null,
|
||||
"forked_from_version": null,
|
||||
"sub_graphs": [],
|
||||
"user_id": "",
|
||||
"created_at": "2025-04-19T17:10:48.857Z",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Function Definition": {
|
||||
"advanced": false,
|
||||
"anyOf": [
|
||||
{
|
||||
"format": "short-text",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"secret": false,
|
||||
"title": "Function Definition",
|
||||
"description": "The function definition (text). This is what you would type on the first line of the function when programming.\n\ne.g \"def fake_people(n: int) -> list[dict]:\"",
|
||||
"default": "def fake_people(n: int) -> list[dict]:"
|
||||
},
|
||||
"Arguments": {
|
||||
"advanced": false,
|
||||
"anyOf": [
|
||||
{
|
||||
"format": "short-text",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"secret": false,
|
||||
"title": "Arguments",
|
||||
"description": "The function's inputs\n\ne.g \"20\"",
|
||||
"default": "20"
|
||||
},
|
||||
"Description": {
|
||||
"advanced": false,
|
||||
"anyOf": [
|
||||
{
|
||||
"format": "long-text",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"secret": false,
|
||||
"title": "Description",
|
||||
"description": "Describe what the function does.\n\ne.g \"Generates n examples of fake data representing people, each with a name, DoB, Job title, and an age.\"",
|
||||
"default": "Generates n examples of fake data representing people, each with a name, DoB, Job title, and an age."
|
||||
}
|
||||
},
|
||||
"required": []
|
||||
},
|
||||
"output_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"return": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "return",
|
||||
"description": "The value returned by the function"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"return"
|
||||
]
|
||||
},
|
||||
"has_external_trigger": false,
|
||||
"has_human_in_the_loop": false,
|
||||
"trigger_setup_info": null,
|
||||
"credentials_input_schema": {
|
||||
"properties": {
|
||||
"openai_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"openai"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "openai",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.OPENAI: 'openai'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator": "model",
|
||||
"discriminator_mapping": {
|
||||
"Llama-3.3-70B-Instruct": "llama_api",
|
||||
"Llama-3.3-8B-Instruct": "llama_api",
|
||||
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
|
||||
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
|
||||
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
|
||||
"amazon/nova-lite-v1": "open_router",
|
||||
"amazon/nova-micro-v1": "open_router",
|
||||
"amazon/nova-pro-v1": "open_router",
|
||||
"claude-3-7-sonnet-20250219": "anthropic",
|
||||
"claude-3-haiku-20240307": "anthropic",
|
||||
"claude-haiku-4-5-20251001": "anthropic",
|
||||
"claude-opus-4-1-20250805": "anthropic",
|
||||
"claude-opus-4-20250514": "anthropic",
|
||||
"claude-opus-4-5-20251101": "anthropic",
|
||||
"claude-sonnet-4-20250514": "anthropic",
|
||||
"claude-sonnet-4-5-20250929": "anthropic",
|
||||
"cohere/command-r-08-2024": "open_router",
|
||||
"cohere/command-r-plus-08-2024": "open_router",
|
||||
"deepseek/deepseek-chat": "open_router",
|
||||
"deepseek/deepseek-r1-0528": "open_router",
|
||||
"dolphin-mistral:latest": "ollama",
|
||||
"google/gemini-2.0-flash-001": "open_router",
|
||||
"google/gemini-2.0-flash-lite-001": "open_router",
|
||||
"google/gemini-2.5-flash": "open_router",
|
||||
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
|
||||
"google/gemini-2.5-pro-preview-03-25": "open_router",
|
||||
"google/gemini-3-pro-preview": "open_router",
|
||||
"gpt-3.5-turbo": "openai",
|
||||
"gpt-4-turbo": "openai",
|
||||
"gpt-4.1-2025-04-14": "openai",
|
||||
"gpt-4.1-mini-2025-04-14": "openai",
|
||||
"gpt-4o": "openai",
|
||||
"gpt-4o-mini": "openai",
|
||||
"gpt-5-2025-08-07": "openai",
|
||||
"gpt-5-chat-latest": "openai",
|
||||
"gpt-5-mini-2025-08-07": "openai",
|
||||
"gpt-5-nano-2025-08-07": "openai",
|
||||
"gpt-5.1-2025-11-13": "openai",
|
||||
"gryphe/mythomax-l2-13b": "open_router",
|
||||
"llama-3.1-8b-instant": "groq",
|
||||
"llama-3.3-70b-versatile": "groq",
|
||||
"llama3": "ollama",
|
||||
"llama3.1:405b": "ollama",
|
||||
"llama3.2": "ollama",
|
||||
"llama3.3": "ollama",
|
||||
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/llama-4-maverick": "open_router",
|
||||
"meta-llama/llama-4-scout": "open_router",
|
||||
"microsoft/wizardlm-2-8x22b": "open_router",
|
||||
"mistralai/mistral-nemo": "open_router",
|
||||
"moonshotai/kimi-k2": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
|
||||
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
|
||||
"o1": "openai",
|
||||
"o1-mini": "openai",
|
||||
"o3-2025-04-16": "openai",
|
||||
"o3-mini": "openai",
|
||||
"openai/gpt-oss-120b": "open_router",
|
||||
"openai/gpt-oss-20b": "open_router",
|
||||
"perplexity/sonar": "open_router",
|
||||
"perplexity/sonar-deep-research": "open_router",
|
||||
"perplexity/sonar-pro": "open_router",
|
||||
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
|
||||
"qwen/qwen3-coder": "open_router",
|
||||
"v0-1.0-md": "v0",
|
||||
"v0-1.5-lg": "v0",
|
||||
"v0-1.5-md": "v0",
|
||||
"x-ai/grok-4": "open_router",
|
||||
"x-ai/grok-4-fast": "open_router",
|
||||
"x-ai/grok-4.1-fast": "open_router",
|
||||
"x-ai/grok-code-fast-1": "open_router"
|
||||
},
|
||||
"discriminator_values": [
|
||||
"o3-mini"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"openai_api_key_credentials"
|
||||
],
|
||||
"title": "AIFunctionCredentialsInputSchema",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,403 @@
|
||||
{
|
||||
"id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
|
||||
"version": 12,
|
||||
"is_active": true,
|
||||
"name": "Flux AI Image Generator",
|
||||
"description": "Transform ideas into breathtaking images with this AI-powered Image Generator. Using cutting-edge Flux AI technology, the tool crafts highly detailed, photorealistic visuals from simple text prompts. Perfect for artists, marketers, and content creators, this generator produces unique images tailored to user specifications. From fantastical scenes to lifelike portraits, users can unleash creativity with professional-quality results in seconds. Easy to use and endlessly versatile, bring imagination to life with the AI Image Generator today!",
|
||||
"instructions": null,
|
||||
"recommended_schedule_cron": null,
|
||||
"nodes": [
|
||||
{
|
||||
"id": "7482c59d-725f-4686-82b9-0dfdc4e92316",
|
||||
"block_id": "cc10ff7b-7753-4ff2-9af6-9399b1a7eddc",
|
||||
"input_default": {
|
||||
"text": "Press the \"Advanced\" toggle and input your replicate API key.\n\nYou can get one here:\nhttps://replicate.com/account/api-tokens\n"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 872.8268131538296,
|
||||
"y": 614.9436919065381
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [],
|
||||
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "0d1dec1a-e4ee-4349-9673-449a01bbf14e",
|
||||
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
"input_default": {
|
||||
"name": "Generated Image"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1453.6844137728922,
|
||||
"y": 963.2466395125115
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "06665d23-2f3d-4445-8f22-573446fcff5b",
|
||||
"source_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
|
||||
"sink_id": "0d1dec1a-e4ee-4349-9673-449a01bbf14e",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [],
|
||||
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "6f24c45f-1548-4eda-9784-da06ce0abef8",
|
||||
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
"input_default": {
|
||||
"name": "Image Subject",
|
||||
"value": "Otto the friendly, purple \"Chief Automation Octopus\" helping people automate their tedious tasks.",
|
||||
"description": "The subject of the image"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": -314.43009631839783,
|
||||
"y": 962.935949165938
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "1077c61a-a32a-4ed7-becf-11bcf835b914",
|
||||
"source_id": "6f24c45f-1548-4eda-9784-da06ce0abef8",
|
||||
"sink_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_TOPIC",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
|
||||
"block_id": "90f8c45e-e983-4644-aa0b-b4ebe2f531bc",
|
||||
"input_default": {
|
||||
"prompt": "dog",
|
||||
"output_format": "png",
|
||||
"replicate_model_name": "Flux Pro 1.1"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 873.0119949791526,
|
||||
"y": 966.1604399052493
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "a17ec505-9377-4700-8fe0-124ca81d43a9",
|
||||
"source_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
|
||||
"sink_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "06665d23-2f3d-4445-8f22-573446fcff5b",
|
||||
"source_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
|
||||
"sink_id": "0d1dec1a-e4ee-4349-9673-449a01bbf14e",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
|
||||
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
|
||||
"input_default": {
|
||||
"model": "gpt-4o-mini",
|
||||
"prompt": "Generate an incredibly detailed, photorealistic image prompt about {{TOPIC}}, describing the camera it's taken with and prompting the diffusion model to use all the best quality techniques.\n\nOutput only the prompt with no additional commentary.",
|
||||
"prompt_values": {}
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 277.3057034159709,
|
||||
"y": 962.8382498113764
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "1077c61a-a32a-4ed7-becf-11bcf835b914",
|
||||
"source_id": "6f24c45f-1548-4eda-9784-da06ce0abef8",
|
||||
"sink_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_TOPIC",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "a17ec505-9377-4700-8fe0-124ca81d43a9",
|
||||
"source_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
|
||||
"sink_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
{
|
||||
"id": "1077c61a-a32a-4ed7-becf-11bcf835b914",
|
||||
"source_id": "6f24c45f-1548-4eda-9784-da06ce0abef8",
|
||||
"sink_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_TOPIC",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "06665d23-2f3d-4445-8f22-573446fcff5b",
|
||||
"source_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
|
||||
"sink_id": "0d1dec1a-e4ee-4349-9673-449a01bbf14e",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "a17ec505-9377-4700-8fe0-124ca81d43a9",
|
||||
"source_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
|
||||
"sink_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"forked_from_id": null,
|
||||
"forked_from_version": null,
|
||||
"sub_graphs": [],
|
||||
"user_id": "",
|
||||
"created_at": "2024-12-20T18:46:11.492Z",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Image Subject": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Image Subject",
|
||||
"description": "The subject of the image",
|
||||
"default": "Otto the friendly, purple \"Chief Automation Octopus\" helping people automate their tedious tasks."
|
||||
}
|
||||
},
|
||||
"required": []
|
||||
},
|
||||
"output_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Generated Image": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Generated Image"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"Generated Image"
|
||||
]
|
||||
},
|
||||
"has_external_trigger": false,
|
||||
"has_human_in_the_loop": false,
|
||||
"trigger_setup_info": null,
|
||||
"credentials_input_schema": {
|
||||
"properties": {
|
||||
"replicate_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"replicate"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "replicate",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.REPLICATE: 'replicate'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator_values": []
|
||||
},
|
||||
"openai_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"openai"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "openai",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.OPENAI: 'openai'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator": "model",
|
||||
"discriminator_mapping": {
|
||||
"Llama-3.3-70B-Instruct": "llama_api",
|
||||
"Llama-3.3-8B-Instruct": "llama_api",
|
||||
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
|
||||
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
|
||||
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
|
||||
"amazon/nova-lite-v1": "open_router",
|
||||
"amazon/nova-micro-v1": "open_router",
|
||||
"amazon/nova-pro-v1": "open_router",
|
||||
"claude-3-7-sonnet-20250219": "anthropic",
|
||||
"claude-3-haiku-20240307": "anthropic",
|
||||
"claude-haiku-4-5-20251001": "anthropic",
|
||||
"claude-opus-4-1-20250805": "anthropic",
|
||||
"claude-opus-4-20250514": "anthropic",
|
||||
"claude-opus-4-5-20251101": "anthropic",
|
||||
"claude-sonnet-4-20250514": "anthropic",
|
||||
"claude-sonnet-4-5-20250929": "anthropic",
|
||||
"cohere/command-r-08-2024": "open_router",
|
||||
"cohere/command-r-plus-08-2024": "open_router",
|
||||
"deepseek/deepseek-chat": "open_router",
|
||||
"deepseek/deepseek-r1-0528": "open_router",
|
||||
"dolphin-mistral:latest": "ollama",
|
||||
"google/gemini-2.0-flash-001": "open_router",
|
||||
"google/gemini-2.0-flash-lite-001": "open_router",
|
||||
"google/gemini-2.5-flash": "open_router",
|
||||
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
|
||||
"google/gemini-2.5-pro-preview-03-25": "open_router",
|
||||
"google/gemini-3-pro-preview": "open_router",
|
||||
"gpt-3.5-turbo": "openai",
|
||||
"gpt-4-turbo": "openai",
|
||||
"gpt-4.1-2025-04-14": "openai",
|
||||
"gpt-4.1-mini-2025-04-14": "openai",
|
||||
"gpt-4o": "openai",
|
||||
"gpt-4o-mini": "openai",
|
||||
"gpt-5-2025-08-07": "openai",
|
||||
"gpt-5-chat-latest": "openai",
|
||||
"gpt-5-mini-2025-08-07": "openai",
|
||||
"gpt-5-nano-2025-08-07": "openai",
|
||||
"gpt-5.1-2025-11-13": "openai",
|
||||
"gryphe/mythomax-l2-13b": "open_router",
|
||||
"llama-3.1-8b-instant": "groq",
|
||||
"llama-3.3-70b-versatile": "groq",
|
||||
"llama3": "ollama",
|
||||
"llama3.1:405b": "ollama",
|
||||
"llama3.2": "ollama",
|
||||
"llama3.3": "ollama",
|
||||
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/llama-4-maverick": "open_router",
|
||||
"meta-llama/llama-4-scout": "open_router",
|
||||
"microsoft/wizardlm-2-8x22b": "open_router",
|
||||
"mistralai/mistral-nemo": "open_router",
|
||||
"moonshotai/kimi-k2": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
|
||||
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
|
||||
"o1": "openai",
|
||||
"o1-mini": "openai",
|
||||
"o3-2025-04-16": "openai",
|
||||
"o3-mini": "openai",
|
||||
"openai/gpt-oss-120b": "open_router",
|
||||
"openai/gpt-oss-20b": "open_router",
|
||||
"perplexity/sonar": "open_router",
|
||||
"perplexity/sonar-deep-research": "open_router",
|
||||
"perplexity/sonar-pro": "open_router",
|
||||
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
|
||||
"qwen/qwen3-coder": "open_router",
|
||||
"v0-1.0-md": "v0",
|
||||
"v0-1.5-lg": "v0",
|
||||
"v0-1.5-md": "v0",
|
||||
"x-ai/grok-4": "open_router",
|
||||
"x-ai/grok-4-fast": "open_router",
|
||||
"x-ai/grok-4.1-fast": "open_router",
|
||||
"x-ai/grok-code-fast-1": "open_router"
|
||||
},
|
||||
"discriminator_values": [
|
||||
"gpt-4o-mini"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"replicate_api_key_credentials",
|
||||
"openai_api_key_credentials"
|
||||
],
|
||||
"title": "FluxAIImageGeneratorCredentialsInputSchema",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,505 @@
|
||||
{
|
||||
"id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
|
||||
"version": 12,
|
||||
"is_active": true,
|
||||
"name": "AI Webpage Copy Improver",
|
||||
"description": "Elevate your web content with this powerful AI Webpage Copy Improver. Designed for marketers, SEO specialists, and web developers, this tool analyses and enhances website copy for maximum impact. Using advanced language models, it optimizes text for better clarity, SEO performance, and increased conversion rates. The AI examines your existing content, identifies areas for improvement, and generates refined copy that maintains your brand voice while boosting engagement. From homepage headlines to product descriptions, transform your web presence with AI-driven insights. Improve readability, incorporate targeted keywords, and craft compelling calls-to-action - all with the click of a button. Take your digital marketing to the next level with the AI Webpage Copy Improver.",
|
||||
"instructions": null,
|
||||
"recommended_schedule_cron": null,
|
||||
"nodes": [
|
||||
{
|
||||
"id": "130ec496-f75d-4fe2-9cd6-8c00d08ea4a7",
|
||||
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
"input_default": {
|
||||
"name": "Improved Webpage Copy"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1039.5884372540172,
|
||||
"y": -0.8359099621230968
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "d4334477-3616-454f-a430-614ca27f5b36",
|
||||
"source_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"sink_id": "130ec496-f75d-4fe2-9cd6-8c00d08ea4a7",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [],
|
||||
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "cefccd07-fe70-4feb-bf76-46b20aaa5d35",
|
||||
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
"input_default": {
|
||||
"name": "Original Page Analysis",
|
||||
"description": "Analysis of the webpage as it currently stands."
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1037.7724103954706,
|
||||
"y": -606.5934325506903
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "f979ab78-0903-4f19-a7c2-a419d5d81aef",
|
||||
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"sink_id": "cefccd07-fe70-4feb-bf76-46b20aaa5d35",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [],
|
||||
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "375f8bc3-afd9-4025-ad8e-9aeb329af7ce",
|
||||
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
"input_default": {
|
||||
"name": "Homepage URL",
|
||||
"value": "https://agpt.co",
|
||||
"description": "Enter the URL of the homepage you want to improve"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": -1195.1455674454749,
|
||||
"y": 0
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "cbb12335-fefd-4560-9fff-98675130fbad",
|
||||
"source_id": "375f8bc3-afd9-4025-ad8e-9aeb329af7ce",
|
||||
"sink_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"source_name": "result",
|
||||
"sink_name": "url",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"block_id": "436c3984-57fd-4b85-8e9a-459b356883bd",
|
||||
"input_default": {
|
||||
"raw_content": false
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": -631.7330786555249,
|
||||
"y": 1.9638396496230826
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "cbb12335-fefd-4560-9fff-98675130fbad",
|
||||
"source_id": "375f8bc3-afd9-4025-ad8e-9aeb329af7ce",
|
||||
"sink_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"source_name": "result",
|
||||
"sink_name": "url",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "adfa6113-77b3-4e32-b136-3e694b87553e",
|
||||
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"source_name": "content",
|
||||
"sink_name": "prompt_values_#_CONTENT",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "5d5656fd-4208-4296-bc70-e39cc31caada",
|
||||
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"sink_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"source_name": "content",
|
||||
"sink_name": "prompt_values_#_CONTENT",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
|
||||
"input_default": {
|
||||
"model": "gpt-4o",
|
||||
"prompt": "Current Webpage Content:\n```\n{{CONTENT}}\n```\n\nBased on the following analysis of the webpage content:\n\n```\n{{ANALYSIS}}\n```\n\nRewrite and improve the content to address the identified issues. Focus on:\n1. Enhancing clarity and readability\n2. Optimizing for SEO (suggest and incorporate relevant keywords)\n3. Improving calls-to-action for better conversion rates\n4. Refining the structure and organization\n5. Maintaining brand consistency while improving the overall tone\n\nProvide the improved content in HTML format inside a code-block with \"```\" backticks, preserving the original structure where appropriate. Also, include a brief summary of the changes made and their potential impact.",
|
||||
"prompt_values": {}
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 488.37278423303917,
|
||||
"y": 0
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "adfa6113-77b3-4e32-b136-3e694b87553e",
|
||||
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"source_name": "content",
|
||||
"sink_name": "prompt_values_#_CONTENT",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "6bcca45d-c9d5-439e-ac43-e4a1264d8f57",
|
||||
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt_values_#_ANALYSIS",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "d4334477-3616-454f-a430-614ca27f5b36",
|
||||
"source_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"sink_id": "130ec496-f75d-4fe2-9cd6-8c00d08ea4a7",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
|
||||
"input_default": {
|
||||
"model": "gpt-4o",
|
||||
"prompt": "Analyze the following webpage content and provide a detailed report on its current state, including strengths and weaknesses in terms of clarity, SEO optimization, and potential for conversion:\n\n{{CONTENT}}\n\nInclude observations on:\n1. Overall readability and clarity\n2. Use of keywords and SEO-friendly language\n3. Effectiveness of calls-to-action\n4. Structure and organization of content\n5. Tone and brand consistency",
|
||||
"prompt_values": {}
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": -72.66206703605442,
|
||||
"y": -0.58403945075381
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "5d5656fd-4208-4296-bc70-e39cc31caada",
|
||||
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"sink_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"source_name": "content",
|
||||
"sink_name": "prompt_values_#_CONTENT",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "f979ab78-0903-4f19-a7c2-a419d5d81aef",
|
||||
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"sink_id": "cefccd07-fe70-4feb-bf76-46b20aaa5d35",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "6bcca45d-c9d5-439e-ac43-e4a1264d8f57",
|
||||
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt_values_#_ANALYSIS",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
{
|
||||
"id": "adfa6113-77b3-4e32-b136-3e694b87553e",
|
||||
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"source_name": "content",
|
||||
"sink_name": "prompt_values_#_CONTENT",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "d4334477-3616-454f-a430-614ca27f5b36",
|
||||
"source_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"sink_id": "130ec496-f75d-4fe2-9cd6-8c00d08ea4a7",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "5d5656fd-4208-4296-bc70-e39cc31caada",
|
||||
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"sink_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"source_name": "content",
|
||||
"sink_name": "prompt_values_#_CONTENT",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "f979ab78-0903-4f19-a7c2-a419d5d81aef",
|
||||
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"sink_id": "cefccd07-fe70-4feb-bf76-46b20aaa5d35",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "6bcca45d-c9d5-439e-ac43-e4a1264d8f57",
|
||||
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt_values_#_ANALYSIS",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "cbb12335-fefd-4560-9fff-98675130fbad",
|
||||
"source_id": "375f8bc3-afd9-4025-ad8e-9aeb329af7ce",
|
||||
"sink_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"source_name": "result",
|
||||
"sink_name": "url",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"forked_from_id": null,
|
||||
"forked_from_version": null,
|
||||
"sub_graphs": [],
|
||||
"user_id": "",
|
||||
"created_at": "2024-12-20T19:47:22.036Z",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Homepage URL": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Homepage URL",
|
||||
"description": "Enter the URL of the homepage you want to improve",
|
||||
"default": "https://agpt.co"
|
||||
}
|
||||
},
|
||||
"required": []
|
||||
},
|
||||
"output_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Improved Webpage Copy": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Improved Webpage Copy"
|
||||
},
|
||||
"Original Page Analysis": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Original Page Analysis",
|
||||
"description": "Analysis of the webpage as it currently stands."
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"Improved Webpage Copy",
|
||||
"Original Page Analysis"
|
||||
]
|
||||
},
|
||||
"has_external_trigger": false,
|
||||
"has_human_in_the_loop": false,
|
||||
"trigger_setup_info": null,
|
||||
"credentials_input_schema": {
|
||||
"properties": {
|
||||
"jina_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"jina"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "jina",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.JINA: 'jina'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator_values": []
|
||||
},
|
||||
"openai_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"openai"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "openai",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.OPENAI: 'openai'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator": "model",
|
||||
"discriminator_mapping": {
|
||||
"Llama-3.3-70B-Instruct": "llama_api",
|
||||
"Llama-3.3-8B-Instruct": "llama_api",
|
||||
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
|
||||
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
|
||||
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
|
||||
"amazon/nova-lite-v1": "open_router",
|
||||
"amazon/nova-micro-v1": "open_router",
|
||||
"amazon/nova-pro-v1": "open_router",
|
||||
"claude-3-7-sonnet-20250219": "anthropic",
|
||||
"claude-3-haiku-20240307": "anthropic",
|
||||
"claude-haiku-4-5-20251001": "anthropic",
|
||||
"claude-opus-4-1-20250805": "anthropic",
|
||||
"claude-opus-4-20250514": "anthropic",
|
||||
"claude-opus-4-5-20251101": "anthropic",
|
||||
"claude-sonnet-4-20250514": "anthropic",
|
||||
"claude-sonnet-4-5-20250929": "anthropic",
|
||||
"cohere/command-r-08-2024": "open_router",
|
||||
"cohere/command-r-plus-08-2024": "open_router",
|
||||
"deepseek/deepseek-chat": "open_router",
|
||||
"deepseek/deepseek-r1-0528": "open_router",
|
||||
"dolphin-mistral:latest": "ollama",
|
||||
"google/gemini-2.0-flash-001": "open_router",
|
||||
"google/gemini-2.0-flash-lite-001": "open_router",
|
||||
"google/gemini-2.5-flash": "open_router",
|
||||
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
|
||||
"google/gemini-2.5-pro-preview-03-25": "open_router",
|
||||
"google/gemini-3-pro-preview": "open_router",
|
||||
"gpt-3.5-turbo": "openai",
|
||||
"gpt-4-turbo": "openai",
|
||||
"gpt-4.1-2025-04-14": "openai",
|
||||
"gpt-4.1-mini-2025-04-14": "openai",
|
||||
"gpt-4o": "openai",
|
||||
"gpt-4o-mini": "openai",
|
||||
"gpt-5-2025-08-07": "openai",
|
||||
"gpt-5-chat-latest": "openai",
|
||||
"gpt-5-mini-2025-08-07": "openai",
|
||||
"gpt-5-nano-2025-08-07": "openai",
|
||||
"gpt-5.1-2025-11-13": "openai",
|
||||
"gryphe/mythomax-l2-13b": "open_router",
|
||||
"llama-3.1-8b-instant": "groq",
|
||||
"llama-3.3-70b-versatile": "groq",
|
||||
"llama3": "ollama",
|
||||
"llama3.1:405b": "ollama",
|
||||
"llama3.2": "ollama",
|
||||
"llama3.3": "ollama",
|
||||
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/llama-4-maverick": "open_router",
|
||||
"meta-llama/llama-4-scout": "open_router",
|
||||
"microsoft/wizardlm-2-8x22b": "open_router",
|
||||
"mistralai/mistral-nemo": "open_router",
|
||||
"moonshotai/kimi-k2": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
|
||||
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
|
||||
"o1": "openai",
|
||||
"o1-mini": "openai",
|
||||
"o3-2025-04-16": "openai",
|
||||
"o3-mini": "openai",
|
||||
"openai/gpt-oss-120b": "open_router",
|
||||
"openai/gpt-oss-20b": "open_router",
|
||||
"perplexity/sonar": "open_router",
|
||||
"perplexity/sonar-deep-research": "open_router",
|
||||
"perplexity/sonar-pro": "open_router",
|
||||
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
|
||||
"qwen/qwen3-coder": "open_router",
|
||||
"v0-1.0-md": "v0",
|
||||
"v0-1.5-lg": "v0",
|
||||
"v0-1.5-md": "v0",
|
||||
"x-ai/grok-4": "open_router",
|
||||
"x-ai/grok-4-fast": "open_router",
|
||||
"x-ai/grok-4.1-fast": "open_router",
|
||||
"x-ai/grok-code-fast-1": "open_router"
|
||||
},
|
||||
"discriminator_values": [
|
||||
"gpt-4o"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"jina_api_key_credentials",
|
||||
"openai_api_key_credentials"
|
||||
],
|
||||
"title": "AIWebpageCopyImproverCredentialsInputSchema",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,615 @@
|
||||
{
|
||||
"id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"version": 29,
|
||||
"is_active": true,
|
||||
"name": "Email Address Finder",
|
||||
"description": "Input information of a business and find their email address",
|
||||
"instructions": null,
|
||||
"recommended_schedule_cron": null,
|
||||
"nodes": [
|
||||
{
|
||||
"id": "04cad535-9f1a-4876-8b07-af5897d8c282",
|
||||
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
"input_default": {
|
||||
"name": "Address",
|
||||
"value": "USA"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1047.9357219838776,
|
||||
"y": 1067.9123910370954
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "aac29f7b-3cd1-4c91-9a2a-72a8301c0957",
|
||||
"source_id": "04cad535-9f1a-4876-8b07-af5897d8c282",
|
||||
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"source_name": "result",
|
||||
"sink_name": "values_#_ADDRESS",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"block_id": "3146e4fe-2cdd-4f29-bd12-0c9d5bb4deb0",
|
||||
"input_default": {
|
||||
"group": 1,
|
||||
"pattern": "<email>(.*?)<\\/email>"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 3381.2821481740634,
|
||||
"y": 246.091098184158
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "9f8188ce-1f3d-46fb-acda-b2a57c0e5da6",
|
||||
"source_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"sink_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"source_name": "response",
|
||||
"sink_name": "text",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "b15b5143-27b7-486e-a166-4095e72e5235",
|
||||
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"sink_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
|
||||
"source_name": "negative",
|
||||
"sink_name": "values_#_Result",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "23591872-3c6b-4562-87d3-5b6ade698e48",
|
||||
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
|
||||
"source_name": "positive",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
|
||||
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
"input_default": {
|
||||
"name": "Email"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 4525.4246310882,
|
||||
"y": 246.36913665010354
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "d87b07ea-dcec-4d38-a644-2c1d741ea3cb",
|
||||
"source_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
|
||||
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
|
||||
"source_name": "output",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "23591872-3c6b-4562-87d3-5b6ade698e48",
|
||||
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
|
||||
"source_name": "positive",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [],
|
||||
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "4a41df99-ffe2-4c12-b528-632979c9c030",
|
||||
"block_id": "87840993-2053-44b7-8da4-187ad4ee518c",
|
||||
"input_default": {},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 2182.7499999999995,
|
||||
"y": 242.00001144409185
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "2e411d3d-79ba-4958-9c1c-b76a45a2e649",
|
||||
"source_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"sink_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
|
||||
"source_name": "output",
|
||||
"sink_name": "query",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "899cc7d8-a96b-4107-b3c6-4c78edcf0c6b",
|
||||
"source_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
|
||||
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"source_name": "results",
|
||||
"sink_name": "prompt_values_#_WEBSITE_CONTENT",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
|
||||
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
"input_default": {
|
||||
"name": "Business Name",
|
||||
"value": "Tim Cook"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1049.9704155272595,
|
||||
"y": 244.49931152418344
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "946b522c-365f-4ee0-96f9-28863d9882ea",
|
||||
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
|
||||
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"source_name": "result",
|
||||
"sink_name": "values_#_NAME",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "43e920a7-0bb4-4fae-9a22-91df95c7342a",
|
||||
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
|
||||
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_BUSINESS_NAME",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"block_id": "db7d8f02-2f44-4c55-ab7a-eae0941f0c30",
|
||||
"input_default": {
|
||||
"format": "Email Address of {{NAME}}, {{ADDRESS}}",
|
||||
"values": {}
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1625.25,
|
||||
"y": 243.25001144409185
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "946b522c-365f-4ee0-96f9-28863d9882ea",
|
||||
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
|
||||
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"source_name": "result",
|
||||
"sink_name": "values_#_NAME",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "aac29f7b-3cd1-4c91-9a2a-72a8301c0957",
|
||||
"source_id": "04cad535-9f1a-4876-8b07-af5897d8c282",
|
||||
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"source_name": "result",
|
||||
"sink_name": "values_#_ADDRESS",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "2e411d3d-79ba-4958-9c1c-b76a45a2e649",
|
||||
"source_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"sink_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
|
||||
"source_name": "output",
|
||||
"sink_name": "query",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "266b7255-11c4-4b88-99e2-85db31a2e865",
|
||||
"block_id": "db7d8f02-2f44-4c55-ab7a-eae0941f0c30",
|
||||
"input_default": {
|
||||
"format": "Failed to find email. \nResult:\n{{RESULT}}",
|
||||
"values": {}
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 3949.7493830805934,
|
||||
"y": 705.209819698647
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "b15b5143-27b7-486e-a166-4095e72e5235",
|
||||
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"sink_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
|
||||
"source_name": "negative",
|
||||
"sink_name": "values_#_Result",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "d87b07ea-dcec-4d38-a644-2c1d741ea3cb",
|
||||
"source_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
|
||||
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
|
||||
"source_name": "output",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
|
||||
"input_default": {
|
||||
"model": "claude-sonnet-4-5-20250929",
|
||||
"prompt": "<business_website>\n{{WEBSITE_CONTENT}}\n</business_website>\n\nExtract the Contact Email of {{BUSINESS_NAME}}.\n\nIf no email that can be used to contact {{BUSINESS_NAME}} is present, output `N/A`.\nDo not share any emails other than the email for this specific entity.\n\nIf multiple present pick the likely best one.\n\nRespond with the email (or N/A) inside <email></email> tags.\n\nExample Response:\n\n<thoughts_or_comments>\nThere were many emails present, but luckily one was for {{BUSINESS_NAME}} which I have included below.\n</thoughts_or_comments>\n<email>\nexample@email.com\n</email>",
|
||||
"prompt_values": {}
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 2774.879259081777,
|
||||
"y": 243.3102035752969
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "43e920a7-0bb4-4fae-9a22-91df95c7342a",
|
||||
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
|
||||
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_BUSINESS_NAME",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "899cc7d8-a96b-4107-b3c6-4c78edcf0c6b",
|
||||
"source_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
|
||||
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"source_name": "results",
|
||||
"sink_name": "prompt_values_#_WEBSITE_CONTENT",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "9f8188ce-1f3d-46fb-acda-b2a57c0e5da6",
|
||||
"source_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"sink_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"source_name": "response",
|
||||
"sink_name": "text",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
{
|
||||
"id": "9f8188ce-1f3d-46fb-acda-b2a57c0e5da6",
|
||||
"source_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"sink_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"source_name": "response",
|
||||
"sink_name": "text",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "b15b5143-27b7-486e-a166-4095e72e5235",
|
||||
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"sink_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
|
||||
"source_name": "negative",
|
||||
"sink_name": "values_#_Result",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "d87b07ea-dcec-4d38-a644-2c1d741ea3cb",
|
||||
"source_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
|
||||
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
|
||||
"source_name": "output",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "946b522c-365f-4ee0-96f9-28863d9882ea",
|
||||
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
|
||||
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"source_name": "result",
|
||||
"sink_name": "values_#_NAME",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "23591872-3c6b-4562-87d3-5b6ade698e48",
|
||||
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
|
||||
"source_name": "positive",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "43e920a7-0bb4-4fae-9a22-91df95c7342a",
|
||||
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
|
||||
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_BUSINESS_NAME",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "2e411d3d-79ba-4958-9c1c-b76a45a2e649",
|
||||
"source_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"sink_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
|
||||
"source_name": "output",
|
||||
"sink_name": "query",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "aac29f7b-3cd1-4c91-9a2a-72a8301c0957",
|
||||
"source_id": "04cad535-9f1a-4876-8b07-af5897d8c282",
|
||||
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"source_name": "result",
|
||||
"sink_name": "values_#_ADDRESS",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "899cc7d8-a96b-4107-b3c6-4c78edcf0c6b",
|
||||
"source_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
|
||||
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"source_name": "results",
|
||||
"sink_name": "prompt_values_#_WEBSITE_CONTENT",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"forked_from_id": null,
|
||||
"forked_from_version": null,
|
||||
"sub_graphs": [],
|
||||
"user_id": "",
|
||||
"created_at": "2025-01-03T00:46:30.244Z",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Address": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Address",
|
||||
"default": "USA"
|
||||
},
|
||||
"Business Name": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Business Name",
|
||||
"default": "Tim Cook"
|
||||
}
|
||||
},
|
||||
"required": []
|
||||
},
|
||||
"output_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Email": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Email"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"Email"
|
||||
]
|
||||
},
|
||||
"has_external_trigger": false,
|
||||
"has_human_in_the_loop": false,
|
||||
"trigger_setup_info": null,
|
||||
"credentials_input_schema": {
|
||||
"properties": {
|
||||
"jina_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"jina"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "jina",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.JINA: 'jina'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator_values": []
|
||||
},
|
||||
"anthropic_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"anthropic"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "anthropic",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.ANTHROPIC: 'anthropic'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator": "model",
|
||||
"discriminator_mapping": {
|
||||
"Llama-3.3-70B-Instruct": "llama_api",
|
||||
"Llama-3.3-8B-Instruct": "llama_api",
|
||||
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
|
||||
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
|
||||
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
|
||||
"amazon/nova-lite-v1": "open_router",
|
||||
"amazon/nova-micro-v1": "open_router",
|
||||
"amazon/nova-pro-v1": "open_router",
|
||||
"claude-3-7-sonnet-20250219": "anthropic",
|
||||
"claude-3-haiku-20240307": "anthropic",
|
||||
"claude-haiku-4-5-20251001": "anthropic",
|
||||
"claude-opus-4-1-20250805": "anthropic",
|
||||
"claude-opus-4-20250514": "anthropic",
|
||||
"claude-opus-4-5-20251101": "anthropic",
|
||||
"claude-sonnet-4-20250514": "anthropic",
|
||||
"claude-sonnet-4-5-20250929": "anthropic",
|
||||
"cohere/command-r-08-2024": "open_router",
|
||||
"cohere/command-r-plus-08-2024": "open_router",
|
||||
"deepseek/deepseek-chat": "open_router",
|
||||
"deepseek/deepseek-r1-0528": "open_router",
|
||||
"dolphin-mistral:latest": "ollama",
|
||||
"google/gemini-2.0-flash-001": "open_router",
|
||||
"google/gemini-2.0-flash-lite-001": "open_router",
|
||||
"google/gemini-2.5-flash": "open_router",
|
||||
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
|
||||
"google/gemini-2.5-pro-preview-03-25": "open_router",
|
||||
"google/gemini-3-pro-preview": "open_router",
|
||||
"gpt-3.5-turbo": "openai",
|
||||
"gpt-4-turbo": "openai",
|
||||
"gpt-4.1-2025-04-14": "openai",
|
||||
"gpt-4.1-mini-2025-04-14": "openai",
|
||||
"gpt-4o": "openai",
|
||||
"gpt-4o-mini": "openai",
|
||||
"gpt-5-2025-08-07": "openai",
|
||||
"gpt-5-chat-latest": "openai",
|
||||
"gpt-5-mini-2025-08-07": "openai",
|
||||
"gpt-5-nano-2025-08-07": "openai",
|
||||
"gpt-5.1-2025-11-13": "openai",
|
||||
"gryphe/mythomax-l2-13b": "open_router",
|
||||
"llama-3.1-8b-instant": "groq",
|
||||
"llama-3.3-70b-versatile": "groq",
|
||||
"llama3": "ollama",
|
||||
"llama3.1:405b": "ollama",
|
||||
"llama3.2": "ollama",
|
||||
"llama3.3": "ollama",
|
||||
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/llama-4-maverick": "open_router",
|
||||
"meta-llama/llama-4-scout": "open_router",
|
||||
"microsoft/wizardlm-2-8x22b": "open_router",
|
||||
"mistralai/mistral-nemo": "open_router",
|
||||
"moonshotai/kimi-k2": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
|
||||
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
|
||||
"o1": "openai",
|
||||
"o1-mini": "openai",
|
||||
"o3-2025-04-16": "openai",
|
||||
"o3-mini": "openai",
|
||||
"openai/gpt-oss-120b": "open_router",
|
||||
"openai/gpt-oss-20b": "open_router",
|
||||
"perplexity/sonar": "open_router",
|
||||
"perplexity/sonar-deep-research": "open_router",
|
||||
"perplexity/sonar-pro": "open_router",
|
||||
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
|
||||
"qwen/qwen3-coder": "open_router",
|
||||
"v0-1.0-md": "v0",
|
||||
"v0-1.5-lg": "v0",
|
||||
"v0-1.5-md": "v0",
|
||||
"x-ai/grok-4": "open_router",
|
||||
"x-ai/grok-4-fast": "open_router",
|
||||
"x-ai/grok-4.1-fast": "open_router",
|
||||
"x-ai/grok-code-fast-1": "open_router"
|
||||
},
|
||||
"discriminator_values": [
|
||||
"claude-sonnet-4-5-20250929"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"jina_api_key_credentials",
|
||||
"anthropic_api_key_credentials"
|
||||
],
|
||||
"title": "EmailAddressFinderCredentialsInputSchema",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
@@ -1,13 +1,14 @@
|
||||
import asyncio
|
||||
from typing import Dict, Set
|
||||
|
||||
from fastapi import WebSocket
|
||||
|
||||
from backend.api.model import NotificationPayload, WSMessage, WSMethod
|
||||
from backend.data.execution import (
|
||||
ExecutionEventType,
|
||||
GraphExecutionEvent,
|
||||
NodeExecutionEvent,
|
||||
)
|
||||
from backend.server.model import WSMessage, WSMethod
|
||||
|
||||
_EVENT_TYPE_TO_METHOD_MAP: dict[ExecutionEventType, WSMethod] = {
|
||||
ExecutionEventType.GRAPH_EXEC_UPDATE: WSMethod.GRAPH_EXECUTION_EVENT,
|
||||
@@ -19,15 +20,24 @@ class ConnectionManager:
|
||||
def __init__(self):
|
||||
self.active_connections: Set[WebSocket] = set()
|
||||
self.subscriptions: Dict[str, Set[WebSocket]] = {}
|
||||
self.user_connections: Dict[str, Set[WebSocket]] = {}
|
||||
|
||||
async def connect_socket(self, websocket: WebSocket):
|
||||
async def connect_socket(self, websocket: WebSocket, *, user_id: str):
|
||||
await websocket.accept()
|
||||
self.active_connections.add(websocket)
|
||||
if user_id not in self.user_connections:
|
||||
self.user_connections[user_id] = set()
|
||||
self.user_connections[user_id].add(websocket)
|
||||
|
||||
def disconnect_socket(self, websocket: WebSocket):
|
||||
self.active_connections.remove(websocket)
|
||||
def disconnect_socket(self, websocket: WebSocket, *, user_id: str):
|
||||
self.active_connections.discard(websocket)
|
||||
for subscribers in self.subscriptions.values():
|
||||
subscribers.discard(websocket)
|
||||
user_conns = self.user_connections.get(user_id)
|
||||
if user_conns is not None:
|
||||
user_conns.discard(websocket)
|
||||
if not user_conns:
|
||||
self.user_connections.pop(user_id, None)
|
||||
|
||||
async def subscribe_graph_exec(
|
||||
self, *, user_id: str, graph_exec_id: str, websocket: WebSocket
|
||||
@@ -92,6 +102,26 @@ class ConnectionManager:
|
||||
|
||||
return n_sent
|
||||
|
||||
async def send_notification(
|
||||
self, *, user_id: str, payload: NotificationPayload
|
||||
) -> int:
|
||||
"""Send a notification to all websocket connections belonging to a user."""
|
||||
message = WSMessage(
|
||||
method=WSMethod.NOTIFICATION,
|
||||
data=payload.model_dump(),
|
||||
).model_dump_json()
|
||||
|
||||
connections = tuple(self.user_connections.get(user_id, set()))
|
||||
if not connections:
|
||||
return 0
|
||||
|
||||
await asyncio.gather(
|
||||
*(connection.send_text(message) for connection in connections),
|
||||
return_exceptions=True,
|
||||
)
|
||||
|
||||
return len(connections)
|
||||
|
||||
async def _subscribe(self, channel_key: str, websocket: WebSocket) -> str:
|
||||
if channel_key not in self.subscriptions:
|
||||
self.subscriptions[channel_key] = set()
|
||||
@@ -4,13 +4,13 @@ from unittest.mock import AsyncMock
|
||||
import pytest
|
||||
from fastapi import WebSocket
|
||||
|
||||
from backend.api.conn_manager import ConnectionManager
|
||||
from backend.api.model import NotificationPayload, WSMessage, WSMethod
|
||||
from backend.data.execution import (
|
||||
ExecutionStatus,
|
||||
GraphExecutionEvent,
|
||||
NodeExecutionEvent,
|
||||
)
|
||||
from backend.server.conn_manager import ConnectionManager
|
||||
from backend.server.model import WSMessage, WSMethod
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -29,8 +29,9 @@ def mock_websocket() -> AsyncMock:
|
||||
async def test_connect(
|
||||
connection_manager: ConnectionManager, mock_websocket: AsyncMock
|
||||
) -> None:
|
||||
await connection_manager.connect_socket(mock_websocket)
|
||||
await connection_manager.connect_socket(mock_websocket, user_id="user-1")
|
||||
assert mock_websocket in connection_manager.active_connections
|
||||
assert mock_websocket in connection_manager.user_connections["user-1"]
|
||||
mock_websocket.accept.assert_called_once()
|
||||
|
||||
|
||||
@@ -39,11 +40,13 @@ def test_disconnect(
|
||||
) -> None:
|
||||
connection_manager.active_connections.add(mock_websocket)
|
||||
connection_manager.subscriptions["test_channel_42"] = {mock_websocket}
|
||||
connection_manager.user_connections["user-1"] = {mock_websocket}
|
||||
|
||||
connection_manager.disconnect_socket(mock_websocket)
|
||||
connection_manager.disconnect_socket(mock_websocket, user_id="user-1")
|
||||
|
||||
assert mock_websocket not in connection_manager.active_connections
|
||||
assert mock_websocket not in connection_manager.subscriptions["test_channel_42"]
|
||||
assert "user-1" not in connection_manager.user_connections
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -207,3 +210,22 @@ async def test_send_execution_result_no_subscribers(
|
||||
await connection_manager.send_execution_update(result)
|
||||
|
||||
mock_websocket.send_text.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_notification(
|
||||
connection_manager: ConnectionManager, mock_websocket: AsyncMock
|
||||
) -> None:
|
||||
connection_manager.user_connections["user-1"] = {mock_websocket}
|
||||
|
||||
await connection_manager.send_notification(
|
||||
user_id="user-1", payload=NotificationPayload(type="info", event="hey")
|
||||
)
|
||||
|
||||
mock_websocket.send_text.assert_called_once()
|
||||
sent_message = mock_websocket.send_text.call_args[0][0]
|
||||
expected_message = WSMessage(
|
||||
method=WSMethod.NOTIFICATION,
|
||||
data={"type": "info", "event": "hey"},
|
||||
).model_dump_json()
|
||||
assert sent_message == expected_message
|
||||
@@ -1,23 +1,23 @@
|
||||
from fastapi import FastAPI
|
||||
|
||||
from backend.api.middleware.security import SecurityHeadersMiddleware
|
||||
from backend.monitoring.instrumentation import instrument_fastapi
|
||||
from backend.server.middleware.security import SecurityHeadersMiddleware
|
||||
|
||||
from .routes.v1 import v1_router
|
||||
from .v1.routes import v1_router
|
||||
|
||||
external_app = FastAPI(
|
||||
external_api = FastAPI(
|
||||
title="AutoGPT External API",
|
||||
description="External API for AutoGPT integrations",
|
||||
docs_url="/docs",
|
||||
version="1.0",
|
||||
)
|
||||
|
||||
external_app.add_middleware(SecurityHeadersMiddleware)
|
||||
external_app.include_router(v1_router, prefix="/v1")
|
||||
external_api.add_middleware(SecurityHeadersMiddleware)
|
||||
external_api.include_router(v1_router, prefix="/v1")
|
||||
|
||||
# Add Prometheus instrumentation
|
||||
instrument_fastapi(
|
||||
external_app,
|
||||
external_api,
|
||||
service_name="external-api",
|
||||
expose_endpoint=True,
|
||||
endpoint="/metrics",
|
||||
107
autogpt_platform/backend/backend/api/external/middleware.py
vendored
Normal file
107
autogpt_platform/backend/backend/api/external/middleware.py
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
from fastapi import HTTPException, Security, status
|
||||
from fastapi.security import APIKeyHeader, HTTPAuthorizationCredentials, HTTPBearer
|
||||
from prisma.enums import APIKeyPermission
|
||||
|
||||
from backend.data.auth.api_key import APIKeyInfo, validate_api_key
|
||||
from backend.data.auth.base import APIAuthorizationInfo
|
||||
from backend.data.auth.oauth import (
|
||||
InvalidClientError,
|
||||
InvalidTokenError,
|
||||
OAuthAccessTokenInfo,
|
||||
validate_access_token,
|
||||
)
|
||||
|
||||
api_key_header = APIKeyHeader(name="X-API-Key", auto_error=False)
|
||||
bearer_auth = HTTPBearer(auto_error=False)
|
||||
|
||||
|
||||
async def require_api_key(api_key: str | None = Security(api_key_header)) -> APIKeyInfo:
|
||||
"""Middleware for API key authentication only"""
|
||||
if api_key is None:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED, detail="Missing API key"
|
||||
)
|
||||
|
||||
api_key_obj = await validate_api_key(api_key)
|
||||
|
||||
if not api_key_obj:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid API key"
|
||||
)
|
||||
|
||||
return api_key_obj
|
||||
|
||||
|
||||
async def require_access_token(
|
||||
bearer: HTTPAuthorizationCredentials | None = Security(bearer_auth),
|
||||
) -> OAuthAccessTokenInfo:
|
||||
"""Middleware for OAuth access token authentication only"""
|
||||
if bearer is None:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Missing Authorization header",
|
||||
)
|
||||
|
||||
try:
|
||||
token_info, _ = await validate_access_token(bearer.credentials)
|
||||
except (InvalidClientError, InvalidTokenError) as e:
|
||||
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=str(e))
|
||||
|
||||
return token_info
|
||||
|
||||
|
||||
async def require_auth(
|
||||
api_key: str | None = Security(api_key_header),
|
||||
bearer: HTTPAuthorizationCredentials | None = Security(bearer_auth),
|
||||
) -> APIAuthorizationInfo:
|
||||
"""
|
||||
Unified authentication middleware supporting both API keys and OAuth tokens.
|
||||
|
||||
Supports two authentication methods, which are checked in order:
|
||||
1. X-API-Key header (existing API key authentication)
|
||||
2. Authorization: Bearer <token> header (OAuth access token)
|
||||
|
||||
Returns:
|
||||
APIAuthorizationInfo: base class of both APIKeyInfo and OAuthAccessTokenInfo.
|
||||
"""
|
||||
# Try API key first
|
||||
if api_key is not None:
|
||||
api_key_info = await validate_api_key(api_key)
|
||||
if api_key_info:
|
||||
return api_key_info
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid API key"
|
||||
)
|
||||
|
||||
# Try OAuth bearer token
|
||||
if bearer is not None:
|
||||
try:
|
||||
token_info, _ = await validate_access_token(bearer.credentials)
|
||||
return token_info
|
||||
except (InvalidClientError, InvalidTokenError) as e:
|
||||
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=str(e))
|
||||
|
||||
# No credentials provided
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Missing authentication. Provide API key or access token.",
|
||||
)
|
||||
|
||||
|
||||
def require_permission(permission: APIKeyPermission):
|
||||
"""
|
||||
Dependency function for checking specific permissions
|
||||
(works with API keys and OAuth tokens)
|
||||
"""
|
||||
|
||||
async def check_permission(
|
||||
auth: APIAuthorizationInfo = Security(require_auth),
|
||||
) -> APIAuthorizationInfo:
|
||||
if permission not in auth.scopes:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail=f"Missing required permission: {permission.value}",
|
||||
)
|
||||
return auth
|
||||
|
||||
return check_permission
|
||||
655
autogpt_platform/backend/backend/api/external/v1/integrations.py
vendored
Normal file
655
autogpt_platform/backend/backend/api/external/v1/integrations.py
vendored
Normal file
@@ -0,0 +1,655 @@
|
||||
"""
|
||||
External API endpoints for integrations and credentials.
|
||||
|
||||
This module provides endpoints for external applications (like Autopilot) to:
|
||||
- Initiate OAuth flows with custom callback URLs
|
||||
- Complete OAuth flows by exchanging authorization codes
|
||||
- Create API key, user/password, and host-scoped credentials
|
||||
- List and manage user credentials
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Annotated, Any, Literal, Optional, Union
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from fastapi import APIRouter, Body, HTTPException, Path, Security, status
|
||||
from prisma.enums import APIKeyPermission
|
||||
from pydantic import BaseModel, Field, SecretStr
|
||||
|
||||
from backend.api.external.middleware import require_permission
|
||||
from backend.api.features.integrations.models import get_all_provider_names
|
||||
from backend.data.auth.base import APIAuthorizationInfo
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
Credentials,
|
||||
CredentialsType,
|
||||
HostScopedCredentials,
|
||||
OAuth2Credentials,
|
||||
UserPasswordCredentials,
|
||||
)
|
||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.integrations.oauth import CREDENTIALS_BY_PROVIDER, HANDLERS_BY_NAME
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.settings import Settings
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.integrations.oauth import BaseOAuthHandler
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
settings = Settings()
|
||||
creds_manager = IntegrationCredentialsManager()
|
||||
|
||||
integrations_router = APIRouter(prefix="/integrations", tags=["integrations"])
|
||||
|
||||
|
||||
# ==================== Request/Response Models ==================== #
|
||||
|
||||
|
||||
class OAuthInitiateRequest(BaseModel):
|
||||
"""Request model for initiating an OAuth flow."""
|
||||
|
||||
callback_url: str = Field(
|
||||
..., description="The external app's callback URL for OAuth redirect"
|
||||
)
|
||||
scopes: list[str] = Field(
|
||||
default_factory=list, description="OAuth scopes to request"
|
||||
)
|
||||
state_metadata: dict[str, Any] = Field(
|
||||
default_factory=dict,
|
||||
description="Arbitrary metadata to echo back on completion",
|
||||
)
|
||||
|
||||
|
||||
class OAuthInitiateResponse(BaseModel):
|
||||
"""Response model for OAuth initiation."""
|
||||
|
||||
login_url: str = Field(..., description="URL to redirect user for OAuth consent")
|
||||
state_token: str = Field(..., description="State token for CSRF protection")
|
||||
expires_at: int = Field(
|
||||
..., description="Unix timestamp when the state token expires"
|
||||
)
|
||||
|
||||
|
||||
class OAuthCompleteRequest(BaseModel):
|
||||
"""Request model for completing an OAuth flow."""
|
||||
|
||||
code: str = Field(..., description="Authorization code from OAuth provider")
|
||||
state_token: str = Field(..., description="State token from initiate request")
|
||||
|
||||
|
||||
class OAuthCompleteResponse(BaseModel):
|
||||
"""Response model for OAuth completion."""
|
||||
|
||||
credentials_id: str = Field(..., description="ID of the stored credentials")
|
||||
provider: str = Field(..., description="Provider name")
|
||||
type: str = Field(..., description="Credential type (oauth2)")
|
||||
title: Optional[str] = Field(None, description="Credential title")
|
||||
scopes: list[str] = Field(default_factory=list, description="Granted scopes")
|
||||
username: Optional[str] = Field(None, description="Username from provider")
|
||||
state_metadata: dict[str, Any] = Field(
|
||||
default_factory=dict, description="Echoed metadata from initiate request"
|
||||
)
|
||||
|
||||
|
||||
class CredentialSummary(BaseModel):
|
||||
"""Summary of a credential without sensitive data."""
|
||||
|
||||
id: str
|
||||
provider: str
|
||||
type: CredentialsType
|
||||
title: Optional[str] = None
|
||||
scopes: Optional[list[str]] = None
|
||||
username: Optional[str] = None
|
||||
host: Optional[str] = None
|
||||
|
||||
|
||||
class ProviderInfo(BaseModel):
|
||||
"""Information about an integration provider."""
|
||||
|
||||
name: str
|
||||
supports_oauth: bool = False
|
||||
supports_api_key: bool = False
|
||||
supports_user_password: bool = False
|
||||
supports_host_scoped: bool = False
|
||||
default_scopes: list[str] = Field(default_factory=list)
|
||||
|
||||
|
||||
# ==================== Credential Creation Models ==================== #
|
||||
|
||||
|
||||
class CreateAPIKeyCredentialRequest(BaseModel):
|
||||
"""Request model for creating API key credentials."""
|
||||
|
||||
type: Literal["api_key"] = "api_key"
|
||||
api_key: str = Field(..., description="The API key")
|
||||
title: str = Field(..., description="A name for this credential")
|
||||
expires_at: Optional[int] = Field(
|
||||
None, description="Unix timestamp when the API key expires"
|
||||
)
|
||||
|
||||
|
||||
class CreateUserPasswordCredentialRequest(BaseModel):
|
||||
"""Request model for creating username/password credentials."""
|
||||
|
||||
type: Literal["user_password"] = "user_password"
|
||||
username: str = Field(..., description="Username")
|
||||
password: str = Field(..., description="Password")
|
||||
title: str = Field(..., description="A name for this credential")
|
||||
|
||||
|
||||
class CreateHostScopedCredentialRequest(BaseModel):
|
||||
"""Request model for creating host-scoped credentials."""
|
||||
|
||||
type: Literal["host_scoped"] = "host_scoped"
|
||||
host: str = Field(..., description="Host/domain pattern to match")
|
||||
headers: dict[str, str] = Field(..., description="Headers to include in requests")
|
||||
title: str = Field(..., description="A name for this credential")
|
||||
|
||||
|
||||
# Union type for credential creation
|
||||
CreateCredentialRequest = Annotated[
|
||||
CreateAPIKeyCredentialRequest
|
||||
| CreateUserPasswordCredentialRequest
|
||||
| CreateHostScopedCredentialRequest,
|
||||
Field(discriminator="type"),
|
||||
]
|
||||
|
||||
|
||||
class CreateCredentialResponse(BaseModel):
|
||||
"""Response model for credential creation."""
|
||||
|
||||
id: str
|
||||
provider: str
|
||||
type: CredentialsType
|
||||
title: Optional[str] = None
|
||||
|
||||
|
||||
# ==================== Helper Functions ==================== #
|
||||
|
||||
|
||||
def validate_callback_url(callback_url: str) -> bool:
|
||||
"""Validate that the callback URL is from an allowed origin."""
|
||||
allowed_origins = settings.config.external_oauth_callback_origins
|
||||
|
||||
try:
|
||||
parsed = urlparse(callback_url)
|
||||
callback_origin = f"{parsed.scheme}://{parsed.netloc}"
|
||||
|
||||
for allowed in allowed_origins:
|
||||
# Simple origin matching
|
||||
if callback_origin == allowed:
|
||||
return True
|
||||
|
||||
# Allow localhost with any port in development (proper hostname check)
|
||||
if parsed.hostname == "localhost":
|
||||
for allowed in allowed_origins:
|
||||
allowed_parsed = urlparse(allowed)
|
||||
if allowed_parsed.hostname == "localhost":
|
||||
return True
|
||||
|
||||
return False
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _get_oauth_handler_for_external(
|
||||
provider_name: str, redirect_uri: str
|
||||
) -> "BaseOAuthHandler":
|
||||
"""Get an OAuth handler configured with an external redirect URI."""
|
||||
# Ensure blocks are loaded so SDK providers are available
|
||||
try:
|
||||
from backend.blocks import load_all_blocks
|
||||
|
||||
load_all_blocks()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to load blocks: {e}")
|
||||
|
||||
if provider_name not in HANDLERS_BY_NAME:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Provider '{provider_name}' does not support OAuth",
|
||||
)
|
||||
|
||||
# Check if this provider has custom OAuth credentials
|
||||
oauth_credentials = CREDENTIALS_BY_PROVIDER.get(provider_name)
|
||||
|
||||
if oauth_credentials and not oauth_credentials.use_secrets:
|
||||
import os
|
||||
|
||||
client_id = (
|
||||
os.getenv(oauth_credentials.client_id_env_var)
|
||||
if oauth_credentials.client_id_env_var
|
||||
else None
|
||||
)
|
||||
client_secret = (
|
||||
os.getenv(oauth_credentials.client_secret_env_var)
|
||||
if oauth_credentials.client_secret_env_var
|
||||
else None
|
||||
)
|
||||
else:
|
||||
client_id = getattr(settings.secrets, f"{provider_name}_client_id", None)
|
||||
client_secret = getattr(
|
||||
settings.secrets, f"{provider_name}_client_secret", None
|
||||
)
|
||||
|
||||
if not (client_id and client_secret):
|
||||
logger.error(f"Attempt to use unconfigured {provider_name} OAuth integration")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_501_NOT_IMPLEMENTED,
|
||||
detail={
|
||||
"message": f"Integration with provider '{provider_name}' is not configured.",
|
||||
"hint": "Set client ID and secret in the application's deployment environment",
|
||||
},
|
||||
)
|
||||
|
||||
handler_class = HANDLERS_BY_NAME[provider_name]
|
||||
return handler_class(
|
||||
client_id=client_id,
|
||||
client_secret=client_secret,
|
||||
redirect_uri=redirect_uri,
|
||||
)
|
||||
|
||||
|
||||
# ==================== Endpoints ==================== #
|
||||
|
||||
|
||||
@integrations_router.get("/providers", response_model=list[ProviderInfo])
|
||||
async def list_providers(
|
||||
auth: APIAuthorizationInfo = Security(
|
||||
require_permission(APIKeyPermission.READ_INTEGRATIONS)
|
||||
),
|
||||
) -> list[ProviderInfo]:
|
||||
"""
|
||||
List all available integration providers.
|
||||
|
||||
Returns a list of all providers with their supported credential types.
|
||||
Most providers support API key credentials, and some also support OAuth.
|
||||
"""
|
||||
# Ensure blocks are loaded
|
||||
try:
|
||||
from backend.blocks import load_all_blocks
|
||||
|
||||
load_all_blocks()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to load blocks: {e}")
|
||||
|
||||
from backend.sdk.registry import AutoRegistry
|
||||
|
||||
providers = []
|
||||
for name in get_all_provider_names():
|
||||
supports_oauth = name in HANDLERS_BY_NAME
|
||||
handler_class = HANDLERS_BY_NAME.get(name)
|
||||
default_scopes = (
|
||||
getattr(handler_class, "DEFAULT_SCOPES", []) if handler_class else []
|
||||
)
|
||||
|
||||
# Check if provider has specific auth types from SDK registration
|
||||
sdk_provider = AutoRegistry.get_provider(name)
|
||||
if sdk_provider and sdk_provider.supported_auth_types:
|
||||
supports_api_key = "api_key" in sdk_provider.supported_auth_types
|
||||
supports_user_password = (
|
||||
"user_password" in sdk_provider.supported_auth_types
|
||||
)
|
||||
supports_host_scoped = "host_scoped" in sdk_provider.supported_auth_types
|
||||
else:
|
||||
# Fallback for legacy providers
|
||||
supports_api_key = True # All providers can accept API keys
|
||||
supports_user_password = name in ("smtp",)
|
||||
supports_host_scoped = name == "http"
|
||||
|
||||
providers.append(
|
||||
ProviderInfo(
|
||||
name=name,
|
||||
supports_oauth=supports_oauth,
|
||||
supports_api_key=supports_api_key,
|
||||
supports_user_password=supports_user_password,
|
||||
supports_host_scoped=supports_host_scoped,
|
||||
default_scopes=default_scopes,
|
||||
)
|
||||
)
|
||||
|
||||
return providers
|
||||
|
||||
|
||||
@integrations_router.post(
|
||||
"/{provider}/oauth/initiate",
|
||||
response_model=OAuthInitiateResponse,
|
||||
summary="Initiate OAuth flow",
|
||||
)
|
||||
async def initiate_oauth(
|
||||
provider: Annotated[str, Path(title="The OAuth provider")],
|
||||
request: OAuthInitiateRequest,
|
||||
auth: APIAuthorizationInfo = Security(
|
||||
require_permission(APIKeyPermission.MANAGE_INTEGRATIONS)
|
||||
),
|
||||
) -> OAuthInitiateResponse:
|
||||
"""
|
||||
Initiate an OAuth flow for an external application.
|
||||
|
||||
This endpoint allows external apps to start an OAuth flow with a custom
|
||||
callback URL. The callback URL must be from an allowed origin configured
|
||||
in the platform settings.
|
||||
|
||||
Returns a login URL to redirect the user to, along with a state token
|
||||
for CSRF protection.
|
||||
"""
|
||||
# Validate callback URL
|
||||
if not validate_callback_url(request.callback_url):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=(
|
||||
f"Callback URL origin is not allowed. "
|
||||
f"Allowed origins: {settings.config.external_oauth_callback_origins}",
|
||||
),
|
||||
)
|
||||
|
||||
# Validate provider
|
||||
try:
|
||||
provider_name = ProviderName(provider)
|
||||
except ValueError:
|
||||
# Check if it's a dynamically registered provider
|
||||
if provider not in HANDLERS_BY_NAME:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Provider '{provider}' not found",
|
||||
)
|
||||
provider_name = provider
|
||||
|
||||
# Get OAuth handler with external callback URL
|
||||
handler = _get_oauth_handler_for_external(
|
||||
provider if isinstance(provider_name, str) else provider_name.value,
|
||||
request.callback_url,
|
||||
)
|
||||
|
||||
# Store state token with external flow metadata
|
||||
# Note: initiated_by_api_key_id is only available for API key auth, not OAuth
|
||||
api_key_id = getattr(auth, "id", None) if auth.type == "api_key" else None
|
||||
state_token, code_challenge = await creds_manager.store.store_state_token(
|
||||
user_id=auth.user_id,
|
||||
provider=provider if isinstance(provider_name, str) else provider_name.value,
|
||||
scopes=request.scopes,
|
||||
callback_url=request.callback_url,
|
||||
state_metadata=request.state_metadata,
|
||||
initiated_by_api_key_id=api_key_id,
|
||||
)
|
||||
|
||||
# Build login URL
|
||||
login_url = handler.get_login_url(
|
||||
request.scopes, state_token, code_challenge=code_challenge
|
||||
)
|
||||
|
||||
# Calculate expiration (10 minutes from now)
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
expires_at = int((datetime.now(timezone.utc) + timedelta(minutes=10)).timestamp())
|
||||
|
||||
return OAuthInitiateResponse(
|
||||
login_url=login_url,
|
||||
state_token=state_token,
|
||||
expires_at=expires_at,
|
||||
)
|
||||
|
||||
|
||||
@integrations_router.post(
|
||||
"/{provider}/oauth/complete",
|
||||
response_model=OAuthCompleteResponse,
|
||||
summary="Complete OAuth flow",
|
||||
)
|
||||
async def complete_oauth(
|
||||
provider: Annotated[str, Path(title="The OAuth provider")],
|
||||
request: OAuthCompleteRequest,
|
||||
auth: APIAuthorizationInfo = Security(
|
||||
require_permission(APIKeyPermission.MANAGE_INTEGRATIONS)
|
||||
),
|
||||
) -> OAuthCompleteResponse:
|
||||
"""
|
||||
Complete an OAuth flow by exchanging the authorization code for tokens.
|
||||
|
||||
This endpoint should be called after the user has authorized the application
|
||||
and been redirected back to the external app's callback URL with an
|
||||
authorization code.
|
||||
"""
|
||||
# Verify state token
|
||||
valid_state = await creds_manager.store.verify_state_token(
|
||||
auth.user_id, request.state_token, provider
|
||||
)
|
||||
|
||||
if not valid_state:
|
||||
logger.warning(f"Invalid or expired state token for provider {provider}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Invalid or expired state token",
|
||||
)
|
||||
|
||||
# Verify this is an external flow (callback_url must be set)
|
||||
if not valid_state.callback_url:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="State token was not created for external OAuth flow",
|
||||
)
|
||||
|
||||
# Get OAuth handler with the original callback URL
|
||||
handler = _get_oauth_handler_for_external(provider, valid_state.callback_url)
|
||||
|
||||
try:
|
||||
scopes = valid_state.scopes
|
||||
scopes = handler.handle_default_scopes(scopes)
|
||||
|
||||
credentials = await handler.exchange_code_for_tokens(
|
||||
request.code, scopes, valid_state.code_verifier
|
||||
)
|
||||
|
||||
# Handle Linear's space-separated scopes
|
||||
if len(credentials.scopes) == 1 and " " in credentials.scopes[0]:
|
||||
credentials.scopes = credentials.scopes[0].split(" ")
|
||||
|
||||
# Check scope mismatch
|
||||
if not set(scopes).issubset(set(credentials.scopes)):
|
||||
logger.warning(
|
||||
f"Granted scopes {credentials.scopes} for provider {provider} "
|
||||
f"do not include all requested scopes {scopes}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"OAuth2 Code->Token exchange failed for provider {provider}: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"OAuth2 callback failed to exchange code for tokens: {str(e)}",
|
||||
)
|
||||
|
||||
# Store credentials
|
||||
await creds_manager.create(auth.user_id, credentials)
|
||||
|
||||
logger.info(f"Successfully completed external OAuth for provider {provider}")
|
||||
|
||||
return OAuthCompleteResponse(
|
||||
credentials_id=credentials.id,
|
||||
provider=credentials.provider,
|
||||
type=credentials.type,
|
||||
title=credentials.title,
|
||||
scopes=credentials.scopes,
|
||||
username=credentials.username,
|
||||
state_metadata=valid_state.state_metadata,
|
||||
)
|
||||
|
||||
|
||||
@integrations_router.get("/credentials", response_model=list[CredentialSummary])
|
||||
async def list_credentials(
|
||||
auth: APIAuthorizationInfo = Security(
|
||||
require_permission(APIKeyPermission.READ_INTEGRATIONS)
|
||||
),
|
||||
) -> list[CredentialSummary]:
|
||||
"""
|
||||
List all credentials for the authenticated user.
|
||||
|
||||
Returns metadata about each credential without exposing sensitive tokens.
|
||||
"""
|
||||
credentials = await creds_manager.store.get_all_creds(auth.user_id)
|
||||
return [
|
||||
CredentialSummary(
|
||||
id=cred.id,
|
||||
provider=cred.provider,
|
||||
type=cred.type,
|
||||
title=cred.title,
|
||||
scopes=cred.scopes if isinstance(cred, OAuth2Credentials) else None,
|
||||
username=cred.username if isinstance(cred, OAuth2Credentials) else None,
|
||||
host=cred.host if isinstance(cred, HostScopedCredentials) else None,
|
||||
)
|
||||
for cred in credentials
|
||||
]
|
||||
|
||||
|
||||
@integrations_router.get(
|
||||
"/{provider}/credentials", response_model=list[CredentialSummary]
|
||||
)
|
||||
async def list_credentials_by_provider(
|
||||
provider: Annotated[str, Path(title="The provider to list credentials for")],
|
||||
auth: APIAuthorizationInfo = Security(
|
||||
require_permission(APIKeyPermission.READ_INTEGRATIONS)
|
||||
),
|
||||
) -> list[CredentialSummary]:
|
||||
"""
|
||||
List credentials for a specific provider.
|
||||
"""
|
||||
credentials = await creds_manager.store.get_creds_by_provider(
|
||||
auth.user_id, provider
|
||||
)
|
||||
return [
|
||||
CredentialSummary(
|
||||
id=cred.id,
|
||||
provider=cred.provider,
|
||||
type=cred.type,
|
||||
title=cred.title,
|
||||
scopes=cred.scopes if isinstance(cred, OAuth2Credentials) else None,
|
||||
username=cred.username if isinstance(cred, OAuth2Credentials) else None,
|
||||
host=cred.host if isinstance(cred, HostScopedCredentials) else None,
|
||||
)
|
||||
for cred in credentials
|
||||
]
|
||||
|
||||
|
||||
@integrations_router.post(
|
||||
"/{provider}/credentials",
|
||||
response_model=CreateCredentialResponse,
|
||||
status_code=status.HTTP_201_CREATED,
|
||||
summary="Create credentials",
|
||||
)
|
||||
async def create_credential(
|
||||
provider: Annotated[str, Path(title="The provider to create credentials for")],
|
||||
request: Union[
|
||||
CreateAPIKeyCredentialRequest,
|
||||
CreateUserPasswordCredentialRequest,
|
||||
CreateHostScopedCredentialRequest,
|
||||
] = Body(..., discriminator="type"),
|
||||
auth: APIAuthorizationInfo = Security(
|
||||
require_permission(APIKeyPermission.MANAGE_INTEGRATIONS)
|
||||
),
|
||||
) -> CreateCredentialResponse:
|
||||
"""
|
||||
Create non-OAuth credentials for a provider.
|
||||
|
||||
Supports creating:
|
||||
- API key credentials (type: "api_key")
|
||||
- Username/password credentials (type: "user_password")
|
||||
- Host-scoped credentials (type: "host_scoped")
|
||||
|
||||
For OAuth credentials, use the OAuth initiate/complete flow instead.
|
||||
"""
|
||||
# Validate provider exists
|
||||
all_providers = get_all_provider_names()
|
||||
if provider not in all_providers:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Provider '{provider}' not found",
|
||||
)
|
||||
|
||||
# Create the appropriate credential type
|
||||
credentials: Credentials
|
||||
if request.type == "api_key":
|
||||
credentials = APIKeyCredentials(
|
||||
provider=provider,
|
||||
api_key=SecretStr(request.api_key),
|
||||
title=request.title,
|
||||
expires_at=request.expires_at,
|
||||
)
|
||||
elif request.type == "user_password":
|
||||
credentials = UserPasswordCredentials(
|
||||
provider=provider,
|
||||
username=SecretStr(request.username),
|
||||
password=SecretStr(request.password),
|
||||
title=request.title,
|
||||
)
|
||||
elif request.type == "host_scoped":
|
||||
# Convert string headers to SecretStr
|
||||
secret_headers = {k: SecretStr(v) for k, v in request.headers.items()}
|
||||
credentials = HostScopedCredentials(
|
||||
provider=provider,
|
||||
host=request.host,
|
||||
headers=secret_headers,
|
||||
title=request.title,
|
||||
)
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Unsupported credential type: {request.type}",
|
||||
)
|
||||
|
||||
# Store credentials
|
||||
try:
|
||||
await creds_manager.create(auth.user_id, credentials)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to store credentials: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to store credentials: {str(e)}",
|
||||
)
|
||||
|
||||
logger.info(f"Created {request.type} credentials for provider {provider}")
|
||||
|
||||
return CreateCredentialResponse(
|
||||
id=credentials.id,
|
||||
provider=provider,
|
||||
type=credentials.type,
|
||||
title=credentials.title,
|
||||
)
|
||||
|
||||
|
||||
class DeleteCredentialResponse(BaseModel):
|
||||
"""Response model for deleting a credential."""
|
||||
|
||||
deleted: bool = Field(..., description="Whether the credential was deleted")
|
||||
credentials_id: str = Field(..., description="ID of the deleted credential")
|
||||
|
||||
|
||||
@integrations_router.delete(
|
||||
"/{provider}/credentials/{cred_id}",
|
||||
response_model=DeleteCredentialResponse,
|
||||
)
|
||||
async def delete_credential(
|
||||
provider: Annotated[str, Path(title="The provider")],
|
||||
cred_id: Annotated[str, Path(title="The credential ID to delete")],
|
||||
auth: APIAuthorizationInfo = Security(
|
||||
require_permission(APIKeyPermission.DELETE_INTEGRATIONS)
|
||||
),
|
||||
) -> DeleteCredentialResponse:
|
||||
"""
|
||||
Delete a credential.
|
||||
|
||||
Note: This does not revoke the tokens with the provider. For full cleanup,
|
||||
use the main API's delete endpoint which handles webhook cleanup and
|
||||
token revocation.
|
||||
"""
|
||||
creds = await creds_manager.store.get_creds_by_id(auth.user_id, cred_id)
|
||||
if not creds:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND, detail="Credentials not found"
|
||||
)
|
||||
if creds.provider != provider:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Credentials do not match the specified provider",
|
||||
)
|
||||
|
||||
await creds_manager.delete(auth.user_id, cred_id)
|
||||
|
||||
return DeleteCredentialResponse(deleted=True, credentials_id=cred_id)
|
||||
328
autogpt_platform/backend/backend/api/external/v1/routes.py
vendored
Normal file
328
autogpt_platform/backend/backend/api/external/v1/routes.py
vendored
Normal file
@@ -0,0 +1,328 @@
|
||||
import logging
|
||||
import urllib.parse
|
||||
from collections import defaultdict
|
||||
from typing import Annotated, Any, Literal, Optional, Sequence
|
||||
|
||||
from fastapi import APIRouter, Body, HTTPException, Security
|
||||
from prisma.enums import AgentExecutionStatus, APIKeyPermission
|
||||
from pydantic import BaseModel, Field
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
import backend.api.features.store.cache as store_cache
|
||||
import backend.api.features.store.model as store_model
|
||||
import backend.data.block
|
||||
from backend.api.external.middleware import require_permission
|
||||
from backend.data import execution as execution_db
|
||||
from backend.data import graph as graph_db
|
||||
from backend.data import user as user_db
|
||||
from backend.data.auth.base import APIAuthorizationInfo
|
||||
from backend.data.block import BlockInput, CompletedBlockOutput
|
||||
from backend.executor.utils import add_graph_execution
|
||||
from backend.util.settings import Settings
|
||||
|
||||
from .integrations import integrations_router
|
||||
from .tools import tools_router
|
||||
|
||||
settings = Settings()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
v1_router = APIRouter()
|
||||
|
||||
v1_router.include_router(integrations_router)
|
||||
v1_router.include_router(tools_router)
|
||||
|
||||
|
||||
class UserInfoResponse(BaseModel):
|
||||
id: str
|
||||
name: Optional[str]
|
||||
email: str
|
||||
timezone: str = Field(
|
||||
description="The user's last known timezone (e.g. 'Europe/Amsterdam'), "
|
||||
"or 'not-set' if not set"
|
||||
)
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
path="/me",
|
||||
tags=["user", "meta"],
|
||||
)
|
||||
async def get_user_info(
|
||||
auth: APIAuthorizationInfo = Security(
|
||||
require_permission(APIKeyPermission.IDENTITY)
|
||||
),
|
||||
) -> UserInfoResponse:
|
||||
user = await user_db.get_user_by_id(auth.user_id)
|
||||
|
||||
return UserInfoResponse(
|
||||
id=user.id,
|
||||
name=user.name,
|
||||
email=user.email,
|
||||
timezone=user.timezone,
|
||||
)
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
path="/blocks",
|
||||
tags=["blocks"],
|
||||
dependencies=[Security(require_permission(APIKeyPermission.READ_BLOCK))],
|
||||
)
|
||||
async def get_graph_blocks() -> Sequence[dict[Any, Any]]:
|
||||
blocks = [block() for block in backend.data.block.get_blocks().values()]
|
||||
return [b.to_dict() for b in blocks if not b.disabled]
|
||||
|
||||
|
||||
@v1_router.post(
|
||||
path="/blocks/{block_id}/execute",
|
||||
tags=["blocks"],
|
||||
dependencies=[Security(require_permission(APIKeyPermission.EXECUTE_BLOCK))],
|
||||
)
|
||||
async def execute_graph_block(
|
||||
block_id: str,
|
||||
data: BlockInput,
|
||||
auth: APIAuthorizationInfo = Security(
|
||||
require_permission(APIKeyPermission.EXECUTE_BLOCK)
|
||||
),
|
||||
) -> CompletedBlockOutput:
|
||||
obj = backend.data.block.get_block(block_id)
|
||||
if not obj:
|
||||
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
|
||||
|
||||
output = defaultdict(list)
|
||||
async for name, data in obj.execute(data):
|
||||
output[name].append(data)
|
||||
return output
|
||||
|
||||
|
||||
@v1_router.post(
|
||||
path="/graphs/{graph_id}/execute/{graph_version}",
|
||||
tags=["graphs"],
|
||||
)
|
||||
async def execute_graph(
|
||||
graph_id: str,
|
||||
graph_version: int,
|
||||
node_input: Annotated[dict[str, Any], Body(..., embed=True, default_factory=dict)],
|
||||
auth: APIAuthorizationInfo = Security(
|
||||
require_permission(APIKeyPermission.EXECUTE_GRAPH)
|
||||
),
|
||||
) -> dict[str, Any]:
|
||||
try:
|
||||
graph_exec = await add_graph_execution(
|
||||
graph_id=graph_id,
|
||||
user_id=auth.user_id,
|
||||
inputs=node_input,
|
||||
graph_version=graph_version,
|
||||
)
|
||||
return {"id": graph_exec.id}
|
||||
except Exception as e:
|
||||
msg = str(e).encode().decode("unicode_escape")
|
||||
raise HTTPException(status_code=400, detail=msg)
|
||||
|
||||
|
||||
class ExecutionNode(TypedDict):
|
||||
node_id: str
|
||||
input: Any
|
||||
output: dict[str, Any]
|
||||
|
||||
|
||||
class GraphExecutionResult(TypedDict):
|
||||
execution_id: str
|
||||
status: str
|
||||
nodes: list[ExecutionNode]
|
||||
output: Optional[list[dict[str, str]]]
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
path="/graphs/{graph_id}/executions/{graph_exec_id}/results",
|
||||
tags=["graphs"],
|
||||
)
|
||||
async def get_graph_execution_results(
|
||||
graph_id: str,
|
||||
graph_exec_id: str,
|
||||
auth: APIAuthorizationInfo = Security(
|
||||
require_permission(APIKeyPermission.READ_GRAPH)
|
||||
),
|
||||
) -> GraphExecutionResult:
|
||||
graph_exec = await execution_db.get_graph_execution(
|
||||
user_id=auth.user_id,
|
||||
execution_id=graph_exec_id,
|
||||
include_node_executions=True,
|
||||
)
|
||||
if not graph_exec:
|
||||
raise HTTPException(
|
||||
status_code=404, detail=f"Graph execution #{graph_exec_id} not found."
|
||||
)
|
||||
|
||||
if not await graph_db.get_graph(
|
||||
graph_id=graph_exec.graph_id,
|
||||
version=graph_exec.graph_version,
|
||||
user_id=auth.user_id,
|
||||
):
|
||||
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
|
||||
|
||||
return GraphExecutionResult(
|
||||
execution_id=graph_exec_id,
|
||||
status=graph_exec.status.value,
|
||||
nodes=[
|
||||
ExecutionNode(
|
||||
node_id=node_exec.node_id,
|
||||
input=node_exec.input_data.get("value", node_exec.input_data),
|
||||
output={k: v for k, v in node_exec.output_data.items()},
|
||||
)
|
||||
for node_exec in graph_exec.node_executions
|
||||
],
|
||||
output=(
|
||||
[
|
||||
{name: value}
|
||||
for name, values in graph_exec.outputs.items()
|
||||
for value in values
|
||||
]
|
||||
if graph_exec.status == AgentExecutionStatus.COMPLETED
|
||||
else None
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
##############################################
|
||||
############### Store Endpoints ##############
|
||||
##############################################
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
path="/store/agents",
|
||||
tags=["store"],
|
||||
dependencies=[Security(require_permission(APIKeyPermission.READ_STORE))],
|
||||
response_model=store_model.StoreAgentsResponse,
|
||||
)
|
||||
async def get_store_agents(
|
||||
featured: bool = False,
|
||||
creator: str | None = None,
|
||||
sorted_by: Literal["rating", "runs", "name", "updated_at"] | None = None,
|
||||
search_query: str | None = None,
|
||||
category: str | None = None,
|
||||
page: int = 1,
|
||||
page_size: int = 20,
|
||||
) -> store_model.StoreAgentsResponse:
|
||||
"""
|
||||
Get a paginated list of agents from the store with optional filtering and sorting.
|
||||
|
||||
Args:
|
||||
featured: Filter to only show featured agents
|
||||
creator: Filter agents by creator username
|
||||
sorted_by: Sort agents by "runs", "rating", "name", or "updated_at"
|
||||
search_query: Search agents by name, subheading and description
|
||||
category: Filter agents by category
|
||||
page: Page number for pagination (default 1)
|
||||
page_size: Number of agents per page (default 20)
|
||||
|
||||
Returns:
|
||||
StoreAgentsResponse: Paginated list of agents matching the filters
|
||||
"""
|
||||
if page < 1:
|
||||
raise HTTPException(status_code=422, detail="Page must be greater than 0")
|
||||
|
||||
if page_size < 1:
|
||||
raise HTTPException(status_code=422, detail="Page size must be greater than 0")
|
||||
|
||||
agents = await store_cache._get_cached_store_agents(
|
||||
featured=featured,
|
||||
creator=creator,
|
||||
sorted_by=sorted_by,
|
||||
search_query=search_query,
|
||||
category=category,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
return agents
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
path="/store/agents/{username}/{agent_name}",
|
||||
tags=["store"],
|
||||
dependencies=[Security(require_permission(APIKeyPermission.READ_STORE))],
|
||||
response_model=store_model.StoreAgentDetails,
|
||||
)
|
||||
async def get_store_agent(
|
||||
username: str,
|
||||
agent_name: str,
|
||||
) -> store_model.StoreAgentDetails:
|
||||
"""
|
||||
Get details of a specific store agent by username and agent name.
|
||||
|
||||
Args:
|
||||
username: Creator's username
|
||||
agent_name: Name/slug of the agent
|
||||
|
||||
Returns:
|
||||
StoreAgentDetails: Detailed information about the agent
|
||||
"""
|
||||
username = urllib.parse.unquote(username).lower()
|
||||
agent_name = urllib.parse.unquote(agent_name).lower()
|
||||
agent = await store_cache._get_cached_agent_details(
|
||||
username=username, agent_name=agent_name
|
||||
)
|
||||
return agent
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
path="/store/creators",
|
||||
tags=["store"],
|
||||
dependencies=[Security(require_permission(APIKeyPermission.READ_STORE))],
|
||||
response_model=store_model.CreatorsResponse,
|
||||
)
|
||||
async def get_store_creators(
|
||||
featured: bool = False,
|
||||
search_query: str | None = None,
|
||||
sorted_by: Literal["agent_rating", "agent_runs", "num_agents"] | None = None,
|
||||
page: int = 1,
|
||||
page_size: int = 20,
|
||||
) -> store_model.CreatorsResponse:
|
||||
"""
|
||||
Get a paginated list of store creators with optional filtering and sorting.
|
||||
|
||||
Args:
|
||||
featured: Filter to only show featured creators
|
||||
search_query: Search creators by profile description
|
||||
sorted_by: Sort by "agent_rating", "agent_runs", or "num_agents"
|
||||
page: Page number for pagination (default 1)
|
||||
page_size: Number of creators per page (default 20)
|
||||
|
||||
Returns:
|
||||
CreatorsResponse: Paginated list of creators matching the filters
|
||||
"""
|
||||
if page < 1:
|
||||
raise HTTPException(status_code=422, detail="Page must be greater than 0")
|
||||
|
||||
if page_size < 1:
|
||||
raise HTTPException(status_code=422, detail="Page size must be greater than 0")
|
||||
|
||||
creators = await store_cache._get_cached_store_creators(
|
||||
featured=featured,
|
||||
search_query=search_query,
|
||||
sorted_by=sorted_by,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
return creators
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
path="/store/creators/{username}",
|
||||
tags=["store"],
|
||||
dependencies=[Security(require_permission(APIKeyPermission.READ_STORE))],
|
||||
response_model=store_model.CreatorDetails,
|
||||
)
|
||||
async def get_store_creator(
|
||||
username: str,
|
||||
) -> store_model.CreatorDetails:
|
||||
"""
|
||||
Get details of a specific store creator by username.
|
||||
|
||||
Args:
|
||||
username: Creator's username
|
||||
|
||||
Returns:
|
||||
CreatorDetails: Detailed information about the creator
|
||||
"""
|
||||
username = urllib.parse.unquote(username).lower()
|
||||
creator = await store_cache._get_cached_creator_details(username=username)
|
||||
return creator
|
||||
152
autogpt_platform/backend/backend/api/external/v1/tools.py
vendored
Normal file
152
autogpt_platform/backend/backend/api/external/v1/tools.py
vendored
Normal file
@@ -0,0 +1,152 @@
|
||||
"""External API routes for chat tools - stateless HTTP endpoints.
|
||||
|
||||
Note: These endpoints use ephemeral sessions that are not persisted to Redis.
|
||||
As a result, session-based rate limiting (max_agent_runs, max_agent_schedules)
|
||||
is not enforced for external API calls. Each request creates a fresh session
|
||||
with zeroed counters. Rate limiting for external API consumers should be
|
||||
handled separately (e.g., via API key quotas).
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from fastapi import APIRouter, Security
|
||||
from prisma.enums import APIKeyPermission
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from backend.api.external.middleware import require_permission
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.chat.tools import find_agent_tool, run_agent_tool
|
||||
from backend.api.features.chat.tools.models import ToolResponseBase
|
||||
from backend.data.auth.base import APIAuthorizationInfo
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
tools_router = APIRouter(prefix="/tools", tags=["tools"])
|
||||
|
||||
# Note: We use Security() as a function parameter dependency (auth: APIAuthorizationInfo = Security(...))
|
||||
# rather than in the decorator's dependencies= list. This avoids duplicate permission checks
|
||||
# while still enforcing auth AND giving us access to auth for extracting user_id.
|
||||
|
||||
|
||||
# Request models
|
||||
class FindAgentRequest(BaseModel):
|
||||
query: str = Field(..., description="Search query for finding agents")
|
||||
|
||||
|
||||
class RunAgentRequest(BaseModel):
|
||||
"""Request to run or schedule an agent.
|
||||
|
||||
The tool automatically handles the setup flow:
|
||||
- First call returns available inputs so user can decide what values to use
|
||||
- Returns missing credentials if user needs to configure them
|
||||
- Executes when inputs are provided OR use_defaults=true
|
||||
- Schedules execution if schedule_name and cron are provided
|
||||
"""
|
||||
|
||||
username_agent_slug: str = Field(
|
||||
...,
|
||||
description="The marketplace agent slug (e.g., 'username/agent-name')",
|
||||
)
|
||||
inputs: dict[str, Any] = Field(
|
||||
default_factory=dict,
|
||||
description="Dictionary of input values for the agent",
|
||||
)
|
||||
use_defaults: bool = Field(
|
||||
default=False,
|
||||
description="Set to true to run with default values (user must confirm)",
|
||||
)
|
||||
schedule_name: str | None = Field(
|
||||
None,
|
||||
description="Name for scheduled execution (triggers scheduling mode)",
|
||||
)
|
||||
cron: str | None = Field(
|
||||
None,
|
||||
description="Cron expression (5 fields: minute hour day month weekday)",
|
||||
)
|
||||
timezone: str = Field(
|
||||
default="UTC",
|
||||
description="IANA timezone (e.g., 'America/New_York', 'UTC')",
|
||||
)
|
||||
|
||||
|
||||
def _create_ephemeral_session(user_id: str | None) -> ChatSession:
|
||||
"""Create an ephemeral session for stateless API requests."""
|
||||
return ChatSession.new(user_id)
|
||||
|
||||
|
||||
@tools_router.post(
|
||||
path="/find-agent",
|
||||
)
|
||||
async def find_agent(
|
||||
request: FindAgentRequest,
|
||||
auth: APIAuthorizationInfo = Security(
|
||||
require_permission(APIKeyPermission.USE_TOOLS)
|
||||
),
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Search for agents in the marketplace based on capabilities and user needs.
|
||||
|
||||
Args:
|
||||
request: Search query for finding agents
|
||||
|
||||
Returns:
|
||||
List of matching agents or no results response
|
||||
"""
|
||||
session = _create_ephemeral_session(auth.user_id)
|
||||
result = await find_agent_tool._execute(
|
||||
user_id=auth.user_id,
|
||||
session=session,
|
||||
query=request.query,
|
||||
)
|
||||
return _response_to_dict(result)
|
||||
|
||||
|
||||
@tools_router.post(
|
||||
path="/run-agent",
|
||||
)
|
||||
async def run_agent(
|
||||
request: RunAgentRequest,
|
||||
auth: APIAuthorizationInfo = Security(
|
||||
require_permission(APIKeyPermission.USE_TOOLS)
|
||||
),
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Run or schedule an agent from the marketplace.
|
||||
|
||||
The endpoint automatically handles the setup flow:
|
||||
- Returns missing inputs if required fields are not provided
|
||||
- Returns missing credentials if user needs to configure them
|
||||
- Executes immediately if all requirements are met
|
||||
- Schedules execution if schedule_name and cron are provided
|
||||
|
||||
For scheduled execution:
|
||||
- Cron format: "minute hour day month weekday"
|
||||
- Examples: "0 9 * * 1-5" (9am weekdays), "0 0 * * *" (daily at midnight)
|
||||
- Timezone: Use IANA timezone names like "America/New_York"
|
||||
|
||||
Args:
|
||||
request: Agent slug, inputs, and optional schedule config
|
||||
|
||||
Returns:
|
||||
- setup_requirements: If inputs or credentials are missing
|
||||
- execution_started: If agent was run or scheduled successfully
|
||||
- error: If something went wrong
|
||||
"""
|
||||
session = _create_ephemeral_session(auth.user_id)
|
||||
result = await run_agent_tool._execute(
|
||||
user_id=auth.user_id,
|
||||
session=session,
|
||||
username_agent_slug=request.username_agent_slug,
|
||||
inputs=request.inputs,
|
||||
use_defaults=request.use_defaults,
|
||||
schedule_name=request.schedule_name or "",
|
||||
cron=request.cron or "",
|
||||
timezone=request.timezone,
|
||||
)
|
||||
return _response_to_dict(result)
|
||||
|
||||
|
||||
def _response_to_dict(result: ToolResponseBase) -> dict[str, Any]:
|
||||
"""Convert a tool response to a dictionary for JSON serialization."""
|
||||
return result.model_dump()
|
||||
@@ -6,9 +6,10 @@ from fastapi import APIRouter, Body, Security
|
||||
from prisma.enums import CreditTransactionType
|
||||
|
||||
from backend.data.credit import admin_get_user_history, get_user_credit_model
|
||||
from backend.server.v2.admin.model import AddUserCreditsResponse, UserHistoryResponse
|
||||
from backend.util.json import SafeJson
|
||||
|
||||
from .model import AddUserCreditsResponse, UserHistoryResponse
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -9,14 +9,15 @@ import pytest_mock
|
||||
from autogpt_libs.auth.jwt_utils import get_jwt_payload
|
||||
from pytest_snapshot.plugin import Snapshot
|
||||
|
||||
import backend.server.v2.admin.credit_admin_routes as credit_admin_routes
|
||||
import backend.server.v2.admin.model as admin_model
|
||||
from backend.data.model import UserTransaction
|
||||
from backend.util.json import SafeJson
|
||||
from backend.util.models import Pagination
|
||||
|
||||
from .credit_admin_routes import router as credit_admin_router
|
||||
from .model import UserHistoryResponse
|
||||
|
||||
app = fastapi.FastAPI()
|
||||
app.include_router(credit_admin_routes.router)
|
||||
app.include_router(credit_admin_router)
|
||||
|
||||
client = fastapi.testclient.TestClient(app)
|
||||
|
||||
@@ -30,7 +31,7 @@ def setup_app_admin_auth(mock_jwt_admin):
|
||||
|
||||
|
||||
def test_add_user_credits_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
mocker: pytest_mock.MockerFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
admin_user_id: str,
|
||||
target_user_id: str,
|
||||
@@ -42,7 +43,7 @@ def test_add_user_credits_success(
|
||||
return_value=(1500, "transaction-123-uuid")
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.server.v2.admin.credit_admin_routes.get_user_credit_model",
|
||||
"backend.api.features.admin.credit_admin_routes.get_user_credit_model",
|
||||
return_value=mock_credit_model,
|
||||
)
|
||||
|
||||
@@ -84,7 +85,7 @@ def test_add_user_credits_success(
|
||||
|
||||
|
||||
def test_add_user_credits_negative_amount(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
mocker: pytest_mock.MockerFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test credit deduction by admin (negative amount)"""
|
||||
@@ -94,7 +95,7 @@ def test_add_user_credits_negative_amount(
|
||||
return_value=(200, "transaction-456-uuid")
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.server.v2.admin.credit_admin_routes.get_user_credit_model",
|
||||
"backend.api.features.admin.credit_admin_routes.get_user_credit_model",
|
||||
return_value=mock_credit_model,
|
||||
)
|
||||
|
||||
@@ -119,12 +120,12 @@ def test_add_user_credits_negative_amount(
|
||||
|
||||
|
||||
def test_get_user_history_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
mocker: pytest_mock.MockerFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test successful retrieval of user credit history"""
|
||||
# Mock the admin_get_user_history function
|
||||
mock_history_response = admin_model.UserHistoryResponse(
|
||||
mock_history_response = UserHistoryResponse(
|
||||
history=[
|
||||
UserTransaction(
|
||||
user_id="user-1",
|
||||
@@ -150,7 +151,7 @@ def test_get_user_history_success(
|
||||
)
|
||||
|
||||
mocker.patch(
|
||||
"backend.server.v2.admin.credit_admin_routes.admin_get_user_history",
|
||||
"backend.api.features.admin.credit_admin_routes.admin_get_user_history",
|
||||
return_value=mock_history_response,
|
||||
)
|
||||
|
||||
@@ -170,12 +171,12 @@ def test_get_user_history_success(
|
||||
|
||||
|
||||
def test_get_user_history_with_filters(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
mocker: pytest_mock.MockerFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test user credit history with search and filter parameters"""
|
||||
# Mock the admin_get_user_history function
|
||||
mock_history_response = admin_model.UserHistoryResponse(
|
||||
mock_history_response = UserHistoryResponse(
|
||||
history=[
|
||||
UserTransaction(
|
||||
user_id="user-3",
|
||||
@@ -194,7 +195,7 @@ def test_get_user_history_with_filters(
|
||||
)
|
||||
|
||||
mock_get_history = mocker.patch(
|
||||
"backend.server.v2.admin.credit_admin_routes.admin_get_user_history",
|
||||
"backend.api.features.admin.credit_admin_routes.admin_get_user_history",
|
||||
return_value=mock_history_response,
|
||||
)
|
||||
|
||||
@@ -230,12 +231,12 @@ def test_get_user_history_with_filters(
|
||||
|
||||
|
||||
def test_get_user_history_empty_results(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
mocker: pytest_mock.MockerFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test user credit history with no results"""
|
||||
# Mock empty history response
|
||||
mock_history_response = admin_model.UserHistoryResponse(
|
||||
mock_history_response = UserHistoryResponse(
|
||||
history=[],
|
||||
pagination=Pagination(
|
||||
total_items=0,
|
||||
@@ -246,7 +247,7 @@ def test_get_user_history_empty_results(
|
||||
)
|
||||
|
||||
mocker.patch(
|
||||
"backend.server.v2.admin.credit_admin_routes.admin_get_user_history",
|
||||
"backend.api.features.admin.credit_admin_routes.admin_get_user_history",
|
||||
return_value=mock_history_response,
|
||||
)
|
||||
|
||||
@@ -0,0 +1,474 @@
|
||||
import asyncio
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
from autogpt_libs.auth import get_user_id, requires_admin_user
|
||||
from fastapi import APIRouter, HTTPException, Security
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from backend.blocks.llm import LlmModel
|
||||
from backend.data.analytics import (
|
||||
AccuracyTrendsResponse,
|
||||
get_accuracy_trends_and_alerts,
|
||||
)
|
||||
from backend.data.execution import (
|
||||
ExecutionStatus,
|
||||
GraphExecutionMeta,
|
||||
get_graph_executions,
|
||||
update_graph_execution_stats,
|
||||
)
|
||||
from backend.data.model import GraphExecutionStats
|
||||
from backend.executor.activity_status_generator import (
|
||||
DEFAULT_SYSTEM_PROMPT,
|
||||
DEFAULT_USER_PROMPT,
|
||||
generate_activity_status_for_execution,
|
||||
)
|
||||
from backend.executor.manager import get_db_async_client
|
||||
from backend.util.settings import Settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ExecutionAnalyticsRequest(BaseModel):
|
||||
graph_id: str = Field(..., description="Graph ID to analyze")
|
||||
graph_version: Optional[int] = Field(None, description="Optional graph version")
|
||||
user_id: Optional[str] = Field(None, description="Optional user ID filter")
|
||||
created_after: Optional[datetime] = Field(
|
||||
None, description="Optional created date lower bound"
|
||||
)
|
||||
model_name: str = Field("gpt-4o-mini", description="Model to use for generation")
|
||||
batch_size: int = Field(
|
||||
10, description="Batch size for concurrent processing", le=25, ge=1
|
||||
)
|
||||
system_prompt: Optional[str] = Field(
|
||||
None, description="Custom system prompt (default: built-in prompt)"
|
||||
)
|
||||
user_prompt: Optional[str] = Field(
|
||||
None,
|
||||
description="Custom user prompt with {{GRAPH_NAME}} and {{EXECUTION_DATA}} placeholders (default: built-in prompt)",
|
||||
)
|
||||
skip_existing: bool = Field(
|
||||
True,
|
||||
description="Whether to skip executions that already have activity status and correctness score",
|
||||
)
|
||||
|
||||
|
||||
class ExecutionAnalyticsResult(BaseModel):
|
||||
agent_id: str
|
||||
version_id: int
|
||||
user_id: str
|
||||
exec_id: str
|
||||
summary_text: Optional[str]
|
||||
score: Optional[float]
|
||||
status: str # "success", "failed", "skipped"
|
||||
error_message: Optional[str] = None
|
||||
|
||||
|
||||
class ExecutionAnalyticsResponse(BaseModel):
|
||||
total_executions: int
|
||||
processed_executions: int
|
||||
successful_analytics: int
|
||||
failed_analytics: int
|
||||
skipped_executions: int
|
||||
results: list[ExecutionAnalyticsResult]
|
||||
|
||||
|
||||
class ModelInfo(BaseModel):
|
||||
value: str
|
||||
label: str
|
||||
provider: str
|
||||
|
||||
|
||||
class ExecutionAnalyticsConfig(BaseModel):
|
||||
available_models: list[ModelInfo]
|
||||
default_system_prompt: str
|
||||
default_user_prompt: str
|
||||
recommended_model: str
|
||||
|
||||
|
||||
class AccuracyTrendsRequest(BaseModel):
|
||||
graph_id: str = Field(..., description="Graph ID to analyze", min_length=1)
|
||||
user_id: Optional[str] = Field(None, description="Optional user ID filter")
|
||||
days_back: int = Field(30, description="Number of days to look back", ge=7, le=90)
|
||||
drop_threshold: float = Field(
|
||||
10.0, description="Alert threshold percentage", ge=1.0, le=50.0
|
||||
)
|
||||
include_historical: bool = Field(
|
||||
False, description="Include historical data for charts"
|
||||
)
|
||||
|
||||
|
||||
router = APIRouter(
|
||||
prefix="/admin",
|
||||
tags=["admin", "execution_analytics"],
|
||||
dependencies=[Security(requires_admin_user)],
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/execution_analytics/config",
|
||||
response_model=ExecutionAnalyticsConfig,
|
||||
summary="Get Execution Analytics Configuration",
|
||||
)
|
||||
async def get_execution_analytics_config(
|
||||
admin_user_id: str = Security(get_user_id),
|
||||
):
|
||||
"""
|
||||
Get the configuration for execution analytics including:
|
||||
- Available AI models with metadata
|
||||
- Default system and user prompts
|
||||
- Recommended model selection
|
||||
"""
|
||||
logger.info(f"Admin user {admin_user_id} requesting execution analytics config")
|
||||
|
||||
# Generate model list from LlmModel enum with provider information
|
||||
available_models = []
|
||||
|
||||
# Function to generate friendly display names from model values
|
||||
def generate_model_label(model: LlmModel) -> str:
|
||||
"""Generate a user-friendly label from the model enum value."""
|
||||
value = model.value
|
||||
|
||||
# For all models, convert underscores/hyphens to spaces and title case
|
||||
# e.g., "gpt-4-turbo" -> "GPT 4 Turbo", "claude-3-haiku-20240307" -> "Claude 3 Haiku"
|
||||
parts = value.replace("_", "-").split("-")
|
||||
|
||||
# Handle provider prefixes (e.g., "google/", "x-ai/")
|
||||
if "/" in value:
|
||||
_, model_name = value.split("/", 1)
|
||||
parts = model_name.replace("_", "-").split("-")
|
||||
|
||||
# Capitalize and format parts
|
||||
formatted_parts = []
|
||||
for part in parts:
|
||||
# Skip date-like patterns - check for various date formats:
|
||||
# - Long dates like "20240307" (8 digits)
|
||||
# - Year components like "2024", "2025" (4 digit years >= 2020)
|
||||
# - Month/day components like "04", "16" when they appear to be dates
|
||||
if part.isdigit():
|
||||
if len(part) >= 8: # Long date format like "20240307"
|
||||
continue
|
||||
elif len(part) == 4 and int(part) >= 2020: # Year like "2024", "2025"
|
||||
continue
|
||||
elif len(part) <= 2 and int(part) <= 31: # Month/day like "04", "16"
|
||||
# Skip if this looks like a date component (basic heuristic)
|
||||
continue
|
||||
# Keep version numbers as-is
|
||||
if part.replace(".", "").isdigit():
|
||||
formatted_parts.append(part)
|
||||
# Capitalize normal words
|
||||
else:
|
||||
formatted_parts.append(
|
||||
part.upper()
|
||||
if part.upper() in ["GPT", "LLM", "API", "V0"]
|
||||
else part.capitalize()
|
||||
)
|
||||
|
||||
model_name = " ".join(formatted_parts)
|
||||
|
||||
# Format provider name for better display
|
||||
provider_name = model.provider.replace("_", " ").title()
|
||||
|
||||
# Return with provider prefix for clarity
|
||||
return f"{provider_name}: {model_name}"
|
||||
|
||||
# Include all LlmModel values (no more filtering by hardcoded list)
|
||||
recommended_model = LlmModel.GPT4O_MINI.value
|
||||
for model in LlmModel:
|
||||
label = generate_model_label(model)
|
||||
# Add "(Recommended)" suffix to the recommended model
|
||||
if model.value == recommended_model:
|
||||
label += " (Recommended)"
|
||||
|
||||
available_models.append(
|
||||
ModelInfo(
|
||||
value=model.value,
|
||||
label=label,
|
||||
provider=model.provider,
|
||||
)
|
||||
)
|
||||
|
||||
# Sort models by provider and name for better UX
|
||||
available_models.sort(key=lambda x: (x.provider, x.label))
|
||||
|
||||
return ExecutionAnalyticsConfig(
|
||||
available_models=available_models,
|
||||
default_system_prompt=DEFAULT_SYSTEM_PROMPT,
|
||||
default_user_prompt=DEFAULT_USER_PROMPT,
|
||||
recommended_model=recommended_model,
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/execution_analytics",
|
||||
response_model=ExecutionAnalyticsResponse,
|
||||
summary="Generate Execution Analytics",
|
||||
)
|
||||
async def generate_execution_analytics(
|
||||
request: ExecutionAnalyticsRequest,
|
||||
admin_user_id: str = Security(get_user_id),
|
||||
):
|
||||
"""
|
||||
Generate activity summaries and correctness scores for graph executions.
|
||||
|
||||
This endpoint:
|
||||
1. Fetches all completed executions matching the criteria
|
||||
2. Identifies executions missing activity_status or correctness_score
|
||||
3. Generates missing data using AI in batches
|
||||
4. Updates the database with new stats
|
||||
5. Returns a detailed report of the analytics operation
|
||||
"""
|
||||
logger.info(
|
||||
f"Admin user {admin_user_id} starting execution analytics generation for graph {request.graph_id}"
|
||||
)
|
||||
|
||||
try:
|
||||
# Validate model configuration
|
||||
settings = Settings()
|
||||
if not settings.secrets.openai_internal_api_key:
|
||||
raise HTTPException(status_code=500, detail="OpenAI API key not configured")
|
||||
|
||||
# Get database client
|
||||
db_client = get_db_async_client()
|
||||
|
||||
# Fetch executions to process
|
||||
executions = await get_graph_executions(
|
||||
graph_id=request.graph_id,
|
||||
graph_version=request.graph_version,
|
||||
user_id=request.user_id,
|
||||
created_time_gte=request.created_after,
|
||||
statuses=[
|
||||
ExecutionStatus.COMPLETED,
|
||||
ExecutionStatus.FAILED,
|
||||
ExecutionStatus.TERMINATED,
|
||||
], # Only process finished executions
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Found {len(executions)} total executions for graph {request.graph_id}"
|
||||
)
|
||||
|
||||
# Filter executions that need analytics generation
|
||||
executions_to_process = []
|
||||
for execution in executions:
|
||||
# Skip if we should skip existing analytics and both activity_status and correctness_score exist
|
||||
if (
|
||||
request.skip_existing
|
||||
and execution.stats
|
||||
and execution.stats.activity_status
|
||||
and execution.stats.correctness_score is not None
|
||||
):
|
||||
continue
|
||||
|
||||
# Add execution to processing list
|
||||
executions_to_process.append(execution)
|
||||
|
||||
logger.info(
|
||||
f"Found {len(executions_to_process)} executions needing analytics generation"
|
||||
)
|
||||
|
||||
# Create results for ALL executions - processed and skipped
|
||||
results = []
|
||||
successful_count = 0
|
||||
failed_count = 0
|
||||
|
||||
# Process executions that need analytics generation
|
||||
if executions_to_process:
|
||||
total_batches = len(
|
||||
range(0, len(executions_to_process), request.batch_size)
|
||||
)
|
||||
|
||||
for batch_idx, i in enumerate(
|
||||
range(0, len(executions_to_process), request.batch_size)
|
||||
):
|
||||
batch = executions_to_process[i : i + request.batch_size]
|
||||
logger.info(
|
||||
f"Processing batch {batch_idx + 1}/{total_batches} with {len(batch)} executions"
|
||||
)
|
||||
|
||||
batch_results = await _process_batch(batch, request, db_client)
|
||||
|
||||
for result in batch_results:
|
||||
results.append(result)
|
||||
if result.status == "success":
|
||||
successful_count += 1
|
||||
elif result.status == "failed":
|
||||
failed_count += 1
|
||||
|
||||
# Small delay between batches to avoid overwhelming the LLM API
|
||||
if batch_idx < total_batches - 1: # Don't delay after the last batch
|
||||
await asyncio.sleep(2)
|
||||
|
||||
# Add ALL executions to results (both processed and skipped)
|
||||
for execution in executions:
|
||||
# Skip if already processed (added to results above)
|
||||
if execution in executions_to_process:
|
||||
continue
|
||||
|
||||
results.append(
|
||||
ExecutionAnalyticsResult(
|
||||
agent_id=execution.graph_id,
|
||||
version_id=execution.graph_version,
|
||||
user_id=execution.user_id,
|
||||
exec_id=execution.id,
|
||||
summary_text=(
|
||||
execution.stats.activity_status if execution.stats else None
|
||||
),
|
||||
score=(
|
||||
execution.stats.correctness_score if execution.stats else None
|
||||
),
|
||||
status="skipped",
|
||||
error_message=None, # Not an error - just already processed
|
||||
)
|
||||
)
|
||||
|
||||
response = ExecutionAnalyticsResponse(
|
||||
total_executions=len(executions),
|
||||
processed_executions=len(executions_to_process),
|
||||
successful_analytics=successful_count,
|
||||
failed_analytics=failed_count,
|
||||
skipped_executions=len(executions) - len(executions_to_process),
|
||||
results=results,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Analytics generation completed: {successful_count} successful, {failed_count} failed, "
|
||||
f"{response.skipped_executions} skipped"
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(f"Error during execution analytics generation: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
async def _process_batch(
|
||||
executions, request: ExecutionAnalyticsRequest, db_client
|
||||
) -> list[ExecutionAnalyticsResult]:
|
||||
"""Process a batch of executions concurrently."""
|
||||
|
||||
async def process_single_execution(execution) -> ExecutionAnalyticsResult:
|
||||
try:
|
||||
# Generate activity status and score using the specified model
|
||||
# Convert stats to GraphExecutionStats if needed
|
||||
if execution.stats:
|
||||
if isinstance(execution.stats, GraphExecutionMeta.Stats):
|
||||
stats_for_generation = execution.stats.to_db()
|
||||
else:
|
||||
# Already GraphExecutionStats
|
||||
stats_for_generation = execution.stats
|
||||
else:
|
||||
stats_for_generation = GraphExecutionStats()
|
||||
|
||||
activity_response = await generate_activity_status_for_execution(
|
||||
graph_exec_id=execution.id,
|
||||
graph_id=execution.graph_id,
|
||||
graph_version=execution.graph_version,
|
||||
execution_stats=stats_for_generation,
|
||||
db_client=db_client,
|
||||
user_id=execution.user_id,
|
||||
execution_status=execution.status,
|
||||
model_name=request.model_name,
|
||||
skip_feature_flag=True, # Admin endpoint bypasses feature flags
|
||||
system_prompt=request.system_prompt or DEFAULT_SYSTEM_PROMPT,
|
||||
user_prompt=request.user_prompt or DEFAULT_USER_PROMPT,
|
||||
skip_existing=request.skip_existing,
|
||||
)
|
||||
|
||||
if not activity_response:
|
||||
return ExecutionAnalyticsResult(
|
||||
agent_id=execution.graph_id,
|
||||
version_id=execution.graph_version,
|
||||
user_id=execution.user_id,
|
||||
exec_id=execution.id,
|
||||
summary_text=None,
|
||||
score=None,
|
||||
status="skipped",
|
||||
error_message="Activity generation returned None",
|
||||
)
|
||||
|
||||
# Update the execution stats
|
||||
# Convert GraphExecutionMeta.Stats to GraphExecutionStats for DB compatibility
|
||||
if execution.stats:
|
||||
if isinstance(execution.stats, GraphExecutionMeta.Stats):
|
||||
updated_stats = execution.stats.to_db()
|
||||
else:
|
||||
# Already GraphExecutionStats
|
||||
updated_stats = execution.stats
|
||||
else:
|
||||
updated_stats = GraphExecutionStats()
|
||||
|
||||
updated_stats.activity_status = activity_response["activity_status"]
|
||||
updated_stats.correctness_score = activity_response["correctness_score"]
|
||||
|
||||
# Save to database with correct stats type
|
||||
await update_graph_execution_stats(
|
||||
graph_exec_id=execution.id, stats=updated_stats
|
||||
)
|
||||
|
||||
return ExecutionAnalyticsResult(
|
||||
agent_id=execution.graph_id,
|
||||
version_id=execution.graph_version,
|
||||
user_id=execution.user_id,
|
||||
exec_id=execution.id,
|
||||
summary_text=activity_response["activity_status"],
|
||||
score=activity_response["correctness_score"],
|
||||
status="success",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(f"Error processing execution {execution.id}: {e}")
|
||||
return ExecutionAnalyticsResult(
|
||||
agent_id=execution.graph_id,
|
||||
version_id=execution.graph_version,
|
||||
user_id=execution.user_id,
|
||||
exec_id=execution.id,
|
||||
summary_text=None,
|
||||
score=None,
|
||||
status="failed",
|
||||
error_message=str(e),
|
||||
)
|
||||
|
||||
# Process all executions in the batch concurrently
|
||||
return await asyncio.gather(
|
||||
*[process_single_execution(execution) for execution in executions]
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/execution_accuracy_trends",
|
||||
response_model=AccuracyTrendsResponse,
|
||||
summary="Get Execution Accuracy Trends and Alerts",
|
||||
)
|
||||
async def get_execution_accuracy_trends(
|
||||
graph_id: str,
|
||||
user_id: Optional[str] = None,
|
||||
days_back: int = 30,
|
||||
drop_threshold: float = 10.0,
|
||||
include_historical: bool = False,
|
||||
admin_user_id: str = Security(get_user_id),
|
||||
) -> AccuracyTrendsResponse:
|
||||
"""
|
||||
Get execution accuracy trends with moving averages and alert detection.
|
||||
Simple single-query approach.
|
||||
"""
|
||||
logger.info(
|
||||
f"Admin user {admin_user_id} requesting accuracy trends for graph {graph_id}"
|
||||
)
|
||||
|
||||
try:
|
||||
result = await get_accuracy_trends_and_alerts(
|
||||
graph_id=graph_id,
|
||||
days_back=days_back,
|
||||
user_id=user_id,
|
||||
drop_threshold=drop_threshold,
|
||||
include_historical=include_historical,
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(f"Error getting accuracy trends for graph {graph_id}: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
@@ -7,9 +7,9 @@ import fastapi
|
||||
import fastapi.responses
|
||||
import prisma.enums
|
||||
|
||||
import backend.server.v2.store.cache as store_cache
|
||||
import backend.server.v2.store.db
|
||||
import backend.server.v2.store.model
|
||||
import backend.api.features.store.cache as store_cache
|
||||
import backend.api.features.store.db as store_db
|
||||
import backend.api.features.store.model as store_model
|
||||
import backend.util.json
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -24,7 +24,7 @@ router = fastapi.APIRouter(
|
||||
@router.get(
|
||||
"/listings",
|
||||
summary="Get Admin Listings History",
|
||||
response_model=backend.server.v2.store.model.StoreListingsWithVersionsResponse,
|
||||
response_model=store_model.StoreListingsWithVersionsResponse,
|
||||
)
|
||||
async def get_admin_listings_with_versions(
|
||||
status: typing.Optional[prisma.enums.SubmissionStatus] = None,
|
||||
@@ -48,7 +48,7 @@ async def get_admin_listings_with_versions(
|
||||
StoreListingsWithVersionsResponse with listings and their versions
|
||||
"""
|
||||
try:
|
||||
listings = await backend.server.v2.store.db.get_admin_listings_with_versions(
|
||||
listings = await store_db.get_admin_listings_with_versions(
|
||||
status=status,
|
||||
search_query=search,
|
||||
page=page,
|
||||
@@ -68,11 +68,11 @@ async def get_admin_listings_with_versions(
|
||||
@router.post(
|
||||
"/submissions/{store_listing_version_id}/review",
|
||||
summary="Review Store Submission",
|
||||
response_model=backend.server.v2.store.model.StoreSubmission,
|
||||
response_model=store_model.StoreSubmission,
|
||||
)
|
||||
async def review_submission(
|
||||
store_listing_version_id: str,
|
||||
request: backend.server.v2.store.model.ReviewSubmissionRequest,
|
||||
request: store_model.ReviewSubmissionRequest,
|
||||
user_id: str = fastapi.Security(autogpt_libs.auth.get_user_id),
|
||||
):
|
||||
"""
|
||||
@@ -87,12 +87,10 @@ async def review_submission(
|
||||
StoreSubmission with updated review information
|
||||
"""
|
||||
try:
|
||||
already_approved = (
|
||||
await backend.server.v2.store.db.check_submission_already_approved(
|
||||
store_listing_version_id=store_listing_version_id,
|
||||
)
|
||||
already_approved = await store_db.check_submission_already_approved(
|
||||
store_listing_version_id=store_listing_version_id,
|
||||
)
|
||||
submission = await backend.server.v2.store.db.review_store_submission(
|
||||
submission = await store_db.review_store_submission(
|
||||
store_listing_version_id=store_listing_version_id,
|
||||
is_approved=request.is_approved,
|
||||
external_comments=request.comments,
|
||||
@@ -136,7 +134,7 @@ async def admin_download_agent_file(
|
||||
Raises:
|
||||
HTTPException: If the agent is not found or an unexpected error occurs.
|
||||
"""
|
||||
graph_data = await backend.server.v2.store.db.get_agent_as_admin(
|
||||
graph_data = await store_db.get_agent_as_admin(
|
||||
user_id=user_id,
|
||||
store_listing_version_id=store_listing_version_id,
|
||||
)
|
||||
@@ -6,10 +6,11 @@ from typing import Annotated
|
||||
import fastapi
|
||||
import pydantic
|
||||
from autogpt_libs.auth import get_user_id
|
||||
from autogpt_libs.auth.dependencies import requires_user
|
||||
|
||||
import backend.data.analytics
|
||||
|
||||
router = fastapi.APIRouter()
|
||||
router = fastapi.APIRouter(dependencies=[fastapi.Security(requires_user)])
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
340
autogpt_platform/backend/backend/api/features/analytics_test.py
Normal file
340
autogpt_platform/backend/backend/api/features/analytics_test.py
Normal file
@@ -0,0 +1,340 @@
|
||||
"""Tests for analytics API endpoints."""
|
||||
|
||||
import json
|
||||
from unittest.mock import AsyncMock, Mock
|
||||
|
||||
import fastapi
|
||||
import fastapi.testclient
|
||||
import pytest
|
||||
import pytest_mock
|
||||
from pytest_snapshot.plugin import Snapshot
|
||||
|
||||
from .analytics import router as analytics_router
|
||||
|
||||
app = fastapi.FastAPI()
|
||||
app.include_router(analytics_router)
|
||||
|
||||
client = fastapi.testclient.TestClient(app)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup_app_auth(mock_jwt_user):
|
||||
"""Setup auth overrides for all tests in this module."""
|
||||
from autogpt_libs.auth.jwt_utils import get_jwt_payload
|
||||
|
||||
app.dependency_overrides[get_jwt_payload] = mock_jwt_user["get_jwt_payload"]
|
||||
yield
|
||||
app.dependency_overrides.clear()
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# /log_raw_metric endpoint tests
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def test_log_raw_metric_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
test_user_id: str,
|
||||
) -> None:
|
||||
"""Test successful raw metric logging."""
|
||||
mock_result = Mock(id="metric-123-uuid")
|
||||
mock_log_metric = mocker.patch(
|
||||
"backend.data.analytics.log_raw_metric",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_result,
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"metric_name": "page_load_time",
|
||||
"metric_value": 2.5,
|
||||
"data_string": "/dashboard",
|
||||
}
|
||||
|
||||
response = client.post("/log_raw_metric", json=request_data)
|
||||
|
||||
assert response.status_code == 200, f"Unexpected response: {response.text}"
|
||||
assert response.json() == "metric-123-uuid"
|
||||
|
||||
mock_log_metric.assert_called_once_with(
|
||||
user_id=test_user_id,
|
||||
metric_name="page_load_time",
|
||||
metric_value=2.5,
|
||||
data_string="/dashboard",
|
||||
)
|
||||
|
||||
configured_snapshot.assert_match(
|
||||
json.dumps({"metric_id": response.json()}, indent=2, sort_keys=True),
|
||||
"analytics_log_metric_success",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"metric_value,metric_name,data_string,test_id",
|
||||
[
|
||||
(100, "api_calls_count", "external_api", "integer_value"),
|
||||
(0, "error_count", "no_errors", "zero_value"),
|
||||
(-5.2, "temperature_delta", "cooling", "negative_value"),
|
||||
(1.23456789, "precision_test", "float_precision", "float_precision"),
|
||||
(999999999, "large_number", "max_value", "large_number"),
|
||||
(0.0000001, "tiny_number", "min_value", "tiny_number"),
|
||||
],
|
||||
)
|
||||
def test_log_raw_metric_various_values(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
metric_value: float,
|
||||
metric_name: str,
|
||||
data_string: str,
|
||||
test_id: str,
|
||||
) -> None:
|
||||
"""Test raw metric logging with various metric values."""
|
||||
mock_result = Mock(id=f"metric-{test_id}-uuid")
|
||||
mocker.patch(
|
||||
"backend.data.analytics.log_raw_metric",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_result,
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"metric_name": metric_name,
|
||||
"metric_value": metric_value,
|
||||
"data_string": data_string,
|
||||
}
|
||||
|
||||
response = client.post("/log_raw_metric", json=request_data)
|
||||
|
||||
assert response.status_code == 200, f"Failed for {test_id}: {response.text}"
|
||||
|
||||
configured_snapshot.assert_match(
|
||||
json.dumps(
|
||||
{"metric_id": response.json(), "test_case": test_id},
|
||||
indent=2,
|
||||
sort_keys=True,
|
||||
),
|
||||
f"analytics_metric_{test_id}",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"invalid_data,expected_error",
|
||||
[
|
||||
({}, "Field required"),
|
||||
({"metric_name": "test"}, "Field required"),
|
||||
(
|
||||
{"metric_name": "test", "metric_value": "not_a_number", "data_string": "x"},
|
||||
"Input should be a valid number",
|
||||
),
|
||||
(
|
||||
{"metric_name": "", "metric_value": 1.0, "data_string": "test"},
|
||||
"String should have at least 1 character",
|
||||
),
|
||||
(
|
||||
{"metric_name": "test", "metric_value": 1.0, "data_string": ""},
|
||||
"String should have at least 1 character",
|
||||
),
|
||||
],
|
||||
ids=[
|
||||
"empty_request",
|
||||
"missing_metric_value_and_data_string",
|
||||
"invalid_metric_value_type",
|
||||
"empty_metric_name",
|
||||
"empty_data_string",
|
||||
],
|
||||
)
|
||||
def test_log_raw_metric_validation_errors(
|
||||
invalid_data: dict,
|
||||
expected_error: str,
|
||||
) -> None:
|
||||
"""Test validation errors for invalid metric requests."""
|
||||
response = client.post("/log_raw_metric", json=invalid_data)
|
||||
|
||||
assert response.status_code == 422
|
||||
error_detail = response.json()
|
||||
assert "detail" in error_detail, f"Missing 'detail' in error: {error_detail}"
|
||||
|
||||
error_text = json.dumps(error_detail)
|
||||
assert (
|
||||
expected_error in error_text
|
||||
), f"Expected '{expected_error}' in error response: {error_text}"
|
||||
|
||||
|
||||
def test_log_raw_metric_service_error(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
test_user_id: str,
|
||||
) -> None:
|
||||
"""Test error handling when analytics service fails."""
|
||||
mocker.patch(
|
||||
"backend.data.analytics.log_raw_metric",
|
||||
new_callable=AsyncMock,
|
||||
side_effect=Exception("Database connection failed"),
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"metric_name": "test_metric",
|
||||
"metric_value": 1.0,
|
||||
"data_string": "test",
|
||||
}
|
||||
|
||||
response = client.post("/log_raw_metric", json=request_data)
|
||||
|
||||
assert response.status_code == 500
|
||||
error_detail = response.json()["detail"]
|
||||
assert "Database connection failed" in error_detail["message"]
|
||||
assert "hint" in error_detail
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# /log_raw_analytics endpoint tests
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def test_log_raw_analytics_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
test_user_id: str,
|
||||
) -> None:
|
||||
"""Test successful raw analytics logging."""
|
||||
mock_result = Mock(id="analytics-789-uuid")
|
||||
mock_log_analytics = mocker.patch(
|
||||
"backend.data.analytics.log_raw_analytics",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_result,
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"type": "user_action",
|
||||
"data": {
|
||||
"action": "button_click",
|
||||
"button_id": "submit_form",
|
||||
"timestamp": "2023-01-01T00:00:00Z",
|
||||
"metadata": {"form_type": "registration", "fields_filled": 5},
|
||||
},
|
||||
"data_index": "button_click_submit_form",
|
||||
}
|
||||
|
||||
response = client.post("/log_raw_analytics", json=request_data)
|
||||
|
||||
assert response.status_code == 200, f"Unexpected response: {response.text}"
|
||||
assert response.json() == "analytics-789-uuid"
|
||||
|
||||
mock_log_analytics.assert_called_once_with(
|
||||
test_user_id,
|
||||
"user_action",
|
||||
request_data["data"],
|
||||
"button_click_submit_form",
|
||||
)
|
||||
|
||||
configured_snapshot.assert_match(
|
||||
json.dumps({"analytics_id": response.json()}, indent=2, sort_keys=True),
|
||||
"analytics_log_analytics_success",
|
||||
)
|
||||
|
||||
|
||||
def test_log_raw_analytics_complex_data(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test raw analytics logging with complex nested data structures."""
|
||||
mock_result = Mock(id="analytics-complex-uuid")
|
||||
mocker.patch(
|
||||
"backend.data.analytics.log_raw_analytics",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_result,
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"type": "agent_execution",
|
||||
"data": {
|
||||
"agent_id": "agent_123",
|
||||
"execution_id": "exec_456",
|
||||
"status": "completed",
|
||||
"duration_ms": 3500,
|
||||
"nodes_executed": 15,
|
||||
"blocks_used": [
|
||||
{"block_id": "llm_block", "count": 3},
|
||||
{"block_id": "http_block", "count": 5},
|
||||
{"block_id": "code_block", "count": 2},
|
||||
],
|
||||
"errors": [],
|
||||
"metadata": {
|
||||
"trigger": "manual",
|
||||
"user_tier": "premium",
|
||||
"environment": "production",
|
||||
},
|
||||
},
|
||||
"data_index": "agent_123_exec_456",
|
||||
}
|
||||
|
||||
response = client.post("/log_raw_analytics", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
|
||||
configured_snapshot.assert_match(
|
||||
json.dumps(
|
||||
{"analytics_id": response.json(), "logged_data": request_data["data"]},
|
||||
indent=2,
|
||||
sort_keys=True,
|
||||
),
|
||||
"analytics_log_analytics_complex_data",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"invalid_data,expected_error",
|
||||
[
|
||||
({}, "Field required"),
|
||||
({"type": "test"}, "Field required"),
|
||||
(
|
||||
{"type": "test", "data": "not_a_dict", "data_index": "test"},
|
||||
"Input should be a valid dictionary",
|
||||
),
|
||||
({"type": "test", "data": {"key": "value"}}, "Field required"),
|
||||
],
|
||||
ids=[
|
||||
"empty_request",
|
||||
"missing_data_and_data_index",
|
||||
"invalid_data_type",
|
||||
"missing_data_index",
|
||||
],
|
||||
)
|
||||
def test_log_raw_analytics_validation_errors(
|
||||
invalid_data: dict,
|
||||
expected_error: str,
|
||||
) -> None:
|
||||
"""Test validation errors for invalid analytics requests."""
|
||||
response = client.post("/log_raw_analytics", json=invalid_data)
|
||||
|
||||
assert response.status_code == 422
|
||||
error_detail = response.json()
|
||||
assert "detail" in error_detail, f"Missing 'detail' in error: {error_detail}"
|
||||
|
||||
error_text = json.dumps(error_detail)
|
||||
assert (
|
||||
expected_error in error_text
|
||||
), f"Expected '{expected_error}' in error response: {error_text}"
|
||||
|
||||
|
||||
def test_log_raw_analytics_service_error(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
test_user_id: str,
|
||||
) -> None:
|
||||
"""Test error handling when analytics service fails."""
|
||||
mocker.patch(
|
||||
"backend.data.analytics.log_raw_analytics",
|
||||
new_callable=AsyncMock,
|
||||
side_effect=Exception("Analytics DB unreachable"),
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"type": "test_event",
|
||||
"data": {"key": "value"},
|
||||
"data_index": "test_index",
|
||||
}
|
||||
|
||||
response = client.post("/log_raw_analytics", json=request_data)
|
||||
|
||||
assert response.status_code == 500
|
||||
error_detail = response.json()["detail"]
|
||||
assert "Analytics DB unreachable" in error_detail["message"]
|
||||
assert "hint" in error_detail
|
||||
689
autogpt_platform/backend/backend/api/features/builder/db.py
Normal file
689
autogpt_platform/backend/backend/api/features/builder/db.py
Normal file
@@ -0,0 +1,689 @@
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from difflib import SequenceMatcher
|
||||
from typing import Sequence
|
||||
|
||||
import prisma
|
||||
|
||||
import backend.api.features.library.db as library_db
|
||||
import backend.api.features.library.model as library_model
|
||||
import backend.api.features.store.db as store_db
|
||||
import backend.api.features.store.model as store_model
|
||||
import backend.data.block
|
||||
from backend.blocks import load_all_blocks
|
||||
from backend.blocks.llm import LlmModel
|
||||
from backend.data.block import AnyBlockSchema, BlockCategory, BlockInfo, BlockSchema
|
||||
from backend.data.db import query_raw_with_schema
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.cache import cached
|
||||
from backend.util.models import Pagination
|
||||
|
||||
from .model import (
|
||||
BlockCategoryResponse,
|
||||
BlockResponse,
|
||||
BlockType,
|
||||
CountResponse,
|
||||
FilterType,
|
||||
Provider,
|
||||
ProviderResponse,
|
||||
SearchEntry,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
llm_models = [name.name.lower().replace("_", " ") for name in LlmModel]
|
||||
|
||||
MAX_LIBRARY_AGENT_RESULTS = 100
|
||||
MAX_MARKETPLACE_AGENT_RESULTS = 100
|
||||
MIN_SCORE_FOR_FILTERED_RESULTS = 10.0
|
||||
|
||||
SearchResultItem = BlockInfo | library_model.LibraryAgent | store_model.StoreAgent
|
||||
|
||||
|
||||
@dataclass
|
||||
class _ScoredItem:
|
||||
item: SearchResultItem
|
||||
filter_type: FilterType
|
||||
score: float
|
||||
sort_key: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class _SearchCacheEntry:
|
||||
items: list[SearchResultItem]
|
||||
total_items: dict[FilterType, int]
|
||||
|
||||
|
||||
def get_block_categories(category_blocks: int = 3) -> list[BlockCategoryResponse]:
|
||||
categories: dict[BlockCategory, BlockCategoryResponse] = {}
|
||||
|
||||
for block_type in load_all_blocks().values():
|
||||
block: AnyBlockSchema = block_type()
|
||||
# Skip disabled blocks
|
||||
if block.disabled:
|
||||
continue
|
||||
# Skip blocks that don't have categories (all should have at least one)
|
||||
if not block.categories:
|
||||
continue
|
||||
|
||||
# Add block to the categories
|
||||
for category in block.categories:
|
||||
if category not in categories:
|
||||
categories[category] = BlockCategoryResponse(
|
||||
name=category.name.lower(),
|
||||
total_blocks=0,
|
||||
blocks=[],
|
||||
)
|
||||
|
||||
categories[category].total_blocks += 1
|
||||
|
||||
# Append if the category has less than the specified number of blocks
|
||||
if len(categories[category].blocks) < category_blocks:
|
||||
categories[category].blocks.append(block.get_info())
|
||||
|
||||
# Sort categories by name
|
||||
return sorted(categories.values(), key=lambda x: x.name)
|
||||
|
||||
|
||||
def get_blocks(
|
||||
*,
|
||||
category: str | None = None,
|
||||
type: BlockType | None = None,
|
||||
provider: ProviderName | None = None,
|
||||
page: int = 1,
|
||||
page_size: int = 50,
|
||||
) -> BlockResponse:
|
||||
"""
|
||||
Get blocks based on either category, type or provider.
|
||||
Providing nothing fetches all block types.
|
||||
"""
|
||||
# Only one of category, type, or provider can be specified
|
||||
if (category and type) or (category and provider) or (type and provider):
|
||||
raise ValueError("Only one of category, type, or provider can be specified")
|
||||
|
||||
blocks: list[AnyBlockSchema] = []
|
||||
skip = (page - 1) * page_size
|
||||
take = page_size
|
||||
total = 0
|
||||
|
||||
for block_type in load_all_blocks().values():
|
||||
block: AnyBlockSchema = block_type()
|
||||
# Skip disabled blocks
|
||||
if block.disabled:
|
||||
continue
|
||||
# Skip blocks that don't match the category
|
||||
if category and category not in {c.name.lower() for c in block.categories}:
|
||||
continue
|
||||
# Skip blocks that don't match the type
|
||||
if (
|
||||
(type == "input" and block.block_type.value != "Input")
|
||||
or (type == "output" and block.block_type.value != "Output")
|
||||
or (type == "action" and block.block_type.value in ("Input", "Output"))
|
||||
):
|
||||
continue
|
||||
# Skip blocks that don't match the provider
|
||||
if provider:
|
||||
credentials_info = block.input_schema.get_credentials_fields_info().values()
|
||||
if not any(provider in info.provider for info in credentials_info):
|
||||
continue
|
||||
|
||||
total += 1
|
||||
if skip > 0:
|
||||
skip -= 1
|
||||
continue
|
||||
if take > 0:
|
||||
take -= 1
|
||||
blocks.append(block)
|
||||
|
||||
return BlockResponse(
|
||||
blocks=[b.get_info() for b in blocks],
|
||||
pagination=Pagination(
|
||||
total_items=total,
|
||||
total_pages=(total + page_size - 1) // page_size,
|
||||
current_page=page,
|
||||
page_size=page_size,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def get_block_by_id(block_id: str) -> BlockInfo | None:
|
||||
"""
|
||||
Get a specific block by its ID.
|
||||
"""
|
||||
for block_type in load_all_blocks().values():
|
||||
block: AnyBlockSchema = block_type()
|
||||
if block.id == block_id:
|
||||
return block.get_info()
|
||||
return None
|
||||
|
||||
|
||||
async def update_search(user_id: str, search: SearchEntry) -> str:
|
||||
"""
|
||||
Upsert a search request for the user and return the search ID.
|
||||
"""
|
||||
if search.search_id:
|
||||
# Update existing search
|
||||
await prisma.models.BuilderSearchHistory.prisma().update(
|
||||
where={
|
||||
"id": search.search_id,
|
||||
},
|
||||
data={
|
||||
"searchQuery": search.search_query or "",
|
||||
"filter": search.filter or [], # type: ignore
|
||||
"byCreator": search.by_creator or [],
|
||||
},
|
||||
)
|
||||
return search.search_id
|
||||
else:
|
||||
# Create new search
|
||||
new_search = await prisma.models.BuilderSearchHistory.prisma().create(
|
||||
data={
|
||||
"userId": user_id,
|
||||
"searchQuery": search.search_query or "",
|
||||
"filter": search.filter or [], # type: ignore
|
||||
"byCreator": search.by_creator or [],
|
||||
}
|
||||
)
|
||||
return new_search.id
|
||||
|
||||
|
||||
async def get_recent_searches(user_id: str, limit: int = 5) -> list[SearchEntry]:
|
||||
"""
|
||||
Get the user's most recent search requests.
|
||||
"""
|
||||
searches = await prisma.models.BuilderSearchHistory.prisma().find_many(
|
||||
where={
|
||||
"userId": user_id,
|
||||
},
|
||||
order={
|
||||
"updatedAt": "desc",
|
||||
},
|
||||
take=limit,
|
||||
)
|
||||
return [
|
||||
SearchEntry(
|
||||
search_query=s.searchQuery,
|
||||
filter=s.filter, # type: ignore
|
||||
by_creator=s.byCreator,
|
||||
search_id=s.id,
|
||||
)
|
||||
for s in searches
|
||||
]
|
||||
|
||||
|
||||
async def get_sorted_search_results(
|
||||
*,
|
||||
user_id: str,
|
||||
search_query: str | None,
|
||||
filters: Sequence[FilterType],
|
||||
by_creator: Sequence[str] | None = None,
|
||||
) -> _SearchCacheEntry:
|
||||
normalized_filters: tuple[FilterType, ...] = tuple(sorted(set(filters or [])))
|
||||
normalized_creators: tuple[str, ...] = tuple(sorted(set(by_creator or [])))
|
||||
return await _build_cached_search_results(
|
||||
user_id=user_id,
|
||||
search_query=search_query or "",
|
||||
filters=normalized_filters,
|
||||
by_creator=normalized_creators,
|
||||
)
|
||||
|
||||
|
||||
@cached(ttl_seconds=300, shared_cache=True)
|
||||
async def _build_cached_search_results(
|
||||
user_id: str,
|
||||
search_query: str,
|
||||
filters: tuple[FilterType, ...],
|
||||
by_creator: tuple[str, ...],
|
||||
) -> _SearchCacheEntry:
|
||||
normalized_query = (search_query or "").strip().lower()
|
||||
|
||||
include_blocks = "blocks" in filters
|
||||
include_integrations = "integrations" in filters
|
||||
include_library_agents = "my_agents" in filters
|
||||
include_marketplace_agents = "marketplace_agents" in filters
|
||||
|
||||
scored_items: list[_ScoredItem] = []
|
||||
total_items: dict[FilterType, int] = {
|
||||
"blocks": 0,
|
||||
"integrations": 0,
|
||||
"marketplace_agents": 0,
|
||||
"my_agents": 0,
|
||||
}
|
||||
|
||||
block_results, block_total, integration_total = _collect_block_results(
|
||||
normalized_query=normalized_query,
|
||||
include_blocks=include_blocks,
|
||||
include_integrations=include_integrations,
|
||||
)
|
||||
scored_items.extend(block_results)
|
||||
total_items["blocks"] = block_total
|
||||
total_items["integrations"] = integration_total
|
||||
|
||||
if include_library_agents:
|
||||
library_response = await library_db.list_library_agents(
|
||||
user_id=user_id,
|
||||
search_term=search_query or None,
|
||||
page=1,
|
||||
page_size=MAX_LIBRARY_AGENT_RESULTS,
|
||||
)
|
||||
total_items["my_agents"] = library_response.pagination.total_items
|
||||
scored_items.extend(
|
||||
_build_library_items(
|
||||
agents=library_response.agents,
|
||||
normalized_query=normalized_query,
|
||||
)
|
||||
)
|
||||
|
||||
if include_marketplace_agents:
|
||||
marketplace_response = await store_db.get_store_agents(
|
||||
creators=list(by_creator) or None,
|
||||
search_query=search_query or None,
|
||||
page=1,
|
||||
page_size=MAX_MARKETPLACE_AGENT_RESULTS,
|
||||
)
|
||||
total_items["marketplace_agents"] = marketplace_response.pagination.total_items
|
||||
scored_items.extend(
|
||||
_build_marketplace_items(
|
||||
agents=marketplace_response.agents,
|
||||
normalized_query=normalized_query,
|
||||
)
|
||||
)
|
||||
|
||||
sorted_items = sorted(
|
||||
scored_items,
|
||||
key=lambda entry: (-entry.score, entry.sort_key, entry.filter_type),
|
||||
)
|
||||
|
||||
return _SearchCacheEntry(
|
||||
items=[entry.item for entry in sorted_items],
|
||||
total_items=total_items,
|
||||
)
|
||||
|
||||
|
||||
def _collect_block_results(
|
||||
*,
|
||||
normalized_query: str,
|
||||
include_blocks: bool,
|
||||
include_integrations: bool,
|
||||
) -> tuple[list[_ScoredItem], int, int]:
|
||||
results: list[_ScoredItem] = []
|
||||
block_count = 0
|
||||
integration_count = 0
|
||||
|
||||
if not include_blocks and not include_integrations:
|
||||
return results, block_count, integration_count
|
||||
|
||||
for block_type in load_all_blocks().values():
|
||||
block: AnyBlockSchema = block_type()
|
||||
if block.disabled:
|
||||
continue
|
||||
|
||||
block_info = block.get_info()
|
||||
credentials = list(block.input_schema.get_credentials_fields().values())
|
||||
is_integration = len(credentials) > 0
|
||||
|
||||
if is_integration and not include_integrations:
|
||||
continue
|
||||
if not is_integration and not include_blocks:
|
||||
continue
|
||||
|
||||
score = _score_block(block, block_info, normalized_query)
|
||||
if not _should_include_item(score, normalized_query):
|
||||
continue
|
||||
|
||||
filter_type: FilterType = "integrations" if is_integration else "blocks"
|
||||
if is_integration:
|
||||
integration_count += 1
|
||||
else:
|
||||
block_count += 1
|
||||
|
||||
results.append(
|
||||
_ScoredItem(
|
||||
item=block_info,
|
||||
filter_type=filter_type,
|
||||
score=score,
|
||||
sort_key=_get_item_name(block_info),
|
||||
)
|
||||
)
|
||||
|
||||
return results, block_count, integration_count
|
||||
|
||||
|
||||
def _build_library_items(
|
||||
*,
|
||||
agents: list[library_model.LibraryAgent],
|
||||
normalized_query: str,
|
||||
) -> list[_ScoredItem]:
|
||||
results: list[_ScoredItem] = []
|
||||
|
||||
for agent in agents:
|
||||
score = _score_library_agent(agent, normalized_query)
|
||||
if not _should_include_item(score, normalized_query):
|
||||
continue
|
||||
|
||||
results.append(
|
||||
_ScoredItem(
|
||||
item=agent,
|
||||
filter_type="my_agents",
|
||||
score=score,
|
||||
sort_key=_get_item_name(agent),
|
||||
)
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def _build_marketplace_items(
|
||||
*,
|
||||
agents: list[store_model.StoreAgent],
|
||||
normalized_query: str,
|
||||
) -> list[_ScoredItem]:
|
||||
results: list[_ScoredItem] = []
|
||||
|
||||
for agent in agents:
|
||||
score = _score_store_agent(agent, normalized_query)
|
||||
if not _should_include_item(score, normalized_query):
|
||||
continue
|
||||
|
||||
results.append(
|
||||
_ScoredItem(
|
||||
item=agent,
|
||||
filter_type="marketplace_agents",
|
||||
score=score,
|
||||
sort_key=_get_item_name(agent),
|
||||
)
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def get_providers(
|
||||
query: str = "",
|
||||
page: int = 1,
|
||||
page_size: int = 50,
|
||||
) -> ProviderResponse:
|
||||
providers = []
|
||||
query = query.lower()
|
||||
|
||||
skip = (page - 1) * page_size
|
||||
take = page_size
|
||||
|
||||
all_providers = _get_all_providers()
|
||||
|
||||
for provider in all_providers.values():
|
||||
if (
|
||||
query not in provider.name.value.lower()
|
||||
and query not in provider.description.lower()
|
||||
):
|
||||
continue
|
||||
if skip > 0:
|
||||
skip -= 1
|
||||
continue
|
||||
if take > 0:
|
||||
take -= 1
|
||||
providers.append(provider)
|
||||
|
||||
total = len(all_providers)
|
||||
|
||||
return ProviderResponse(
|
||||
providers=providers,
|
||||
pagination=Pagination(
|
||||
total_items=total,
|
||||
total_pages=(total + page_size - 1) // page_size,
|
||||
current_page=page,
|
||||
page_size=page_size,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
async def get_counts(user_id: str) -> CountResponse:
|
||||
my_agents = await prisma.models.LibraryAgent.prisma().count(
|
||||
where={
|
||||
"userId": user_id,
|
||||
"isDeleted": False,
|
||||
"isArchived": False,
|
||||
}
|
||||
)
|
||||
counts = await _get_static_counts()
|
||||
return CountResponse(
|
||||
my_agents=my_agents,
|
||||
**counts,
|
||||
)
|
||||
|
||||
|
||||
@cached(ttl_seconds=3600)
|
||||
async def _get_static_counts():
|
||||
"""
|
||||
Get counts of blocks, integrations, and marketplace agents.
|
||||
This is cached to avoid unnecessary database queries and calculations.
|
||||
"""
|
||||
all_blocks = 0
|
||||
input_blocks = 0
|
||||
action_blocks = 0
|
||||
output_blocks = 0
|
||||
integrations = 0
|
||||
|
||||
for block_type in load_all_blocks().values():
|
||||
block: AnyBlockSchema = block_type()
|
||||
if block.disabled:
|
||||
continue
|
||||
|
||||
all_blocks += 1
|
||||
|
||||
if block.block_type.value == "Input":
|
||||
input_blocks += 1
|
||||
elif block.block_type.value == "Output":
|
||||
output_blocks += 1
|
||||
else:
|
||||
action_blocks += 1
|
||||
|
||||
credentials = list(block.input_schema.get_credentials_fields().values())
|
||||
if len(credentials) > 0:
|
||||
integrations += 1
|
||||
|
||||
marketplace_agents = await prisma.models.StoreAgent.prisma().count()
|
||||
|
||||
return {
|
||||
"all_blocks": all_blocks,
|
||||
"input_blocks": input_blocks,
|
||||
"action_blocks": action_blocks,
|
||||
"output_blocks": output_blocks,
|
||||
"integrations": integrations,
|
||||
"marketplace_agents": marketplace_agents,
|
||||
}
|
||||
|
||||
|
||||
def _matches_llm_model(schema_cls: type[BlockSchema], query: str) -> bool:
|
||||
for field in schema_cls.model_fields.values():
|
||||
if field.annotation == LlmModel:
|
||||
# Check if query matches any value in llm_models
|
||||
if any(query in name for name in llm_models):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _score_block(
|
||||
block: AnyBlockSchema,
|
||||
block_info: BlockInfo,
|
||||
normalized_query: str,
|
||||
) -> float:
|
||||
if not normalized_query:
|
||||
return 0.0
|
||||
|
||||
name = block_info.name.lower()
|
||||
description = block_info.description.lower()
|
||||
score = _score_primary_fields(name, description, normalized_query)
|
||||
|
||||
category_text = " ".join(
|
||||
category.get("category", "").lower() for category in block_info.categories
|
||||
)
|
||||
score += _score_additional_field(category_text, normalized_query, 12, 6)
|
||||
|
||||
credentials_info = block.input_schema.get_credentials_fields_info().values()
|
||||
provider_names = [
|
||||
provider.value.lower()
|
||||
for info in credentials_info
|
||||
for provider in info.provider
|
||||
]
|
||||
provider_text = " ".join(provider_names)
|
||||
score += _score_additional_field(provider_text, normalized_query, 15, 6)
|
||||
|
||||
if _matches_llm_model(block.input_schema, normalized_query):
|
||||
score += 20
|
||||
|
||||
return score
|
||||
|
||||
|
||||
def _score_library_agent(
|
||||
agent: library_model.LibraryAgent,
|
||||
normalized_query: str,
|
||||
) -> float:
|
||||
if not normalized_query:
|
||||
return 0.0
|
||||
|
||||
name = agent.name.lower()
|
||||
description = (agent.description or "").lower()
|
||||
instructions = (agent.instructions or "").lower()
|
||||
|
||||
score = _score_primary_fields(name, description, normalized_query)
|
||||
score += _score_additional_field(instructions, normalized_query, 15, 6)
|
||||
score += _score_additional_field(
|
||||
agent.creator_name.lower(), normalized_query, 10, 5
|
||||
)
|
||||
|
||||
return score
|
||||
|
||||
|
||||
def _score_store_agent(
|
||||
agent: store_model.StoreAgent,
|
||||
normalized_query: str,
|
||||
) -> float:
|
||||
if not normalized_query:
|
||||
return 0.0
|
||||
|
||||
name = agent.agent_name.lower()
|
||||
description = agent.description.lower()
|
||||
sub_heading = agent.sub_heading.lower()
|
||||
|
||||
score = _score_primary_fields(name, description, normalized_query)
|
||||
score += _score_additional_field(sub_heading, normalized_query, 12, 6)
|
||||
score += _score_additional_field(agent.creator.lower(), normalized_query, 10, 5)
|
||||
|
||||
return score
|
||||
|
||||
|
||||
def _score_primary_fields(name: str, description: str, query: str) -> float:
|
||||
score = 0.0
|
||||
if name == query:
|
||||
score += 120
|
||||
elif name.startswith(query):
|
||||
score += 90
|
||||
elif query in name:
|
||||
score += 60
|
||||
|
||||
score += SequenceMatcher(None, name, query).ratio() * 50
|
||||
if description:
|
||||
if query in description:
|
||||
score += 30
|
||||
score += SequenceMatcher(None, description, query).ratio() * 25
|
||||
return score
|
||||
|
||||
|
||||
def _score_additional_field(
|
||||
value: str,
|
||||
query: str,
|
||||
contains_weight: float,
|
||||
similarity_weight: float,
|
||||
) -> float:
|
||||
if not value or not query:
|
||||
return 0.0
|
||||
|
||||
score = 0.0
|
||||
if query in value:
|
||||
score += contains_weight
|
||||
score += SequenceMatcher(None, value, query).ratio() * similarity_weight
|
||||
return score
|
||||
|
||||
|
||||
def _should_include_item(score: float, normalized_query: str) -> bool:
|
||||
if not normalized_query:
|
||||
return True
|
||||
return score >= MIN_SCORE_FOR_FILTERED_RESULTS
|
||||
|
||||
|
||||
def _get_item_name(item: SearchResultItem) -> str:
|
||||
if isinstance(item, BlockInfo):
|
||||
return item.name.lower()
|
||||
if isinstance(item, library_model.LibraryAgent):
|
||||
return item.name.lower()
|
||||
return item.agent_name.lower()
|
||||
|
||||
|
||||
@cached(ttl_seconds=3600)
|
||||
def _get_all_providers() -> dict[ProviderName, Provider]:
|
||||
providers: dict[ProviderName, Provider] = {}
|
||||
|
||||
for block_type in load_all_blocks().values():
|
||||
block: AnyBlockSchema = block_type()
|
||||
if block.disabled:
|
||||
continue
|
||||
|
||||
credentials_info = block.input_schema.get_credentials_fields_info().values()
|
||||
for info in credentials_info:
|
||||
for provider in info.provider: # provider is a ProviderName enum member
|
||||
if provider in providers:
|
||||
providers[provider].integration_count += 1
|
||||
else:
|
||||
providers[provider] = Provider(
|
||||
name=provider, description="", integration_count=1
|
||||
)
|
||||
return providers
|
||||
|
||||
|
||||
@cached(ttl_seconds=3600)
|
||||
async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]:
|
||||
suggested_blocks = []
|
||||
# Sum the number of executions for each block type
|
||||
# Prisma cannot group by nested relations, so we do a raw query
|
||||
# Calculate the cutoff timestamp
|
||||
timestamp_threshold = datetime.now(timezone.utc) - timedelta(days=30)
|
||||
|
||||
results = await query_raw_with_schema(
|
||||
"""
|
||||
SELECT
|
||||
agent_node."agentBlockId" AS block_id,
|
||||
COUNT(execution.id) AS execution_count
|
||||
FROM {schema_prefix}"AgentNodeExecution" execution
|
||||
JOIN {schema_prefix}"AgentNode" agent_node ON execution."agentNodeId" = agent_node.id
|
||||
WHERE execution."endedTime" >= $1::timestamp
|
||||
GROUP BY agent_node."agentBlockId"
|
||||
ORDER BY execution_count DESC;
|
||||
""",
|
||||
timestamp_threshold,
|
||||
)
|
||||
|
||||
# Get the top blocks based on execution count
|
||||
# But ignore Input and Output blocks
|
||||
blocks: list[tuple[BlockInfo, int]] = []
|
||||
|
||||
for block_type in load_all_blocks().values():
|
||||
block: AnyBlockSchema = block_type()
|
||||
if block.disabled or block.block_type in (
|
||||
backend.data.block.BlockType.INPUT,
|
||||
backend.data.block.BlockType.OUTPUT,
|
||||
backend.data.block.BlockType.AGENT,
|
||||
):
|
||||
continue
|
||||
# Find the execution count for this block
|
||||
execution_count = next(
|
||||
(row["execution_count"] for row in results if row["block_id"] == block.id),
|
||||
0,
|
||||
)
|
||||
blocks.append((block.get_info(), execution_count))
|
||||
# Sort blocks by execution count
|
||||
blocks.sort(key=lambda x: x[1], reverse=True)
|
||||
|
||||
suggested_blocks = [block[0] for block in blocks]
|
||||
|
||||
# Return the top blocks
|
||||
return suggested_blocks[:count]
|
||||
@@ -2,8 +2,8 @@ from typing import Literal
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
import backend.server.v2.library.model as library_model
|
||||
import backend.server.v2.store.model as store_model
|
||||
import backend.api.features.library.model as library_model
|
||||
import backend.api.features.store.model as store_model
|
||||
from backend.data.block import BlockInfo
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.models import Pagination
|
||||
@@ -18,10 +18,17 @@ FilterType = Literal[
|
||||
BlockType = Literal["all", "input", "action", "output"]
|
||||
|
||||
|
||||
class SearchEntry(BaseModel):
|
||||
search_query: str | None = None
|
||||
filter: list[FilterType] | None = None
|
||||
by_creator: list[str] | None = None
|
||||
search_id: str | None = None
|
||||
|
||||
|
||||
# Suggestions
|
||||
class SuggestionsResponse(BaseModel):
|
||||
otto_suggestions: list[str]
|
||||
recent_searches: list[str]
|
||||
recent_searches: list[SearchEntry]
|
||||
providers: list[ProviderName]
|
||||
top_blocks: list[BlockInfo]
|
||||
|
||||
@@ -32,7 +39,7 @@ class BlockCategoryResponse(BaseModel):
|
||||
total_blocks: int
|
||||
blocks: list[BlockInfo]
|
||||
|
||||
model_config = {"use_enum_values": False} # <== use enum names like "AI"
|
||||
model_config = {"use_enum_values": False} # Use enum names like "AI"
|
||||
|
||||
|
||||
# Input/Action/Output and see all for block categories
|
||||
@@ -53,17 +60,11 @@ class ProviderResponse(BaseModel):
|
||||
pagination: Pagination
|
||||
|
||||
|
||||
class SearchBlocksResponse(BaseModel):
|
||||
blocks: BlockResponse
|
||||
total_block_count: int
|
||||
total_integration_count: int
|
||||
|
||||
|
||||
class SearchResponse(BaseModel):
|
||||
items: list[BlockInfo | library_model.LibraryAgent | store_model.StoreAgent]
|
||||
search_id: str
|
||||
total_items: dict[FilterType, int]
|
||||
page: int
|
||||
more_pages: bool
|
||||
pagination: Pagination
|
||||
|
||||
|
||||
class CountResponse(BaseModel):
|
||||
@@ -4,15 +4,12 @@ from typing import Annotated, Sequence
|
||||
import fastapi
|
||||
from autogpt_libs.auth.dependencies import get_user_id, requires_user
|
||||
|
||||
import backend.server.v2.builder.db as builder_db
|
||||
import backend.server.v2.builder.model as builder_model
|
||||
import backend.server.v2.library.db as library_db
|
||||
import backend.server.v2.library.model as library_model
|
||||
import backend.server.v2.store.db as store_db
|
||||
import backend.server.v2.store.model as store_model
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.models import Pagination
|
||||
|
||||
from . import db as builder_db
|
||||
from . import model as builder_model
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = fastapi.APIRouter(
|
||||
@@ -45,7 +42,9 @@ def sanitize_query(query: str | None) -> str | None:
|
||||
summary="Get Builder suggestions",
|
||||
response_model=builder_model.SuggestionsResponse,
|
||||
)
|
||||
async def get_suggestions() -> builder_model.SuggestionsResponse:
|
||||
async def get_suggestions(
|
||||
user_id: Annotated[str, fastapi.Security(get_user_id)],
|
||||
) -> builder_model.SuggestionsResponse:
|
||||
"""
|
||||
Get all suggestions for the Blocks Menu.
|
||||
"""
|
||||
@@ -55,11 +54,7 @@ async def get_suggestions() -> builder_model.SuggestionsResponse:
|
||||
"Help me create a list",
|
||||
"Help me feed my data to Google Maps",
|
||||
],
|
||||
recent_searches=[
|
||||
"image generation",
|
||||
"deepfake",
|
||||
"competitor analysis",
|
||||
],
|
||||
recent_searches=await builder_db.get_recent_searches(user_id),
|
||||
providers=[
|
||||
ProviderName.TWITTER,
|
||||
ProviderName.GITHUB,
|
||||
@@ -147,7 +142,6 @@ async def get_providers(
|
||||
)
|
||||
|
||||
|
||||
# Not using post method because on frontend, orval doesn't support Infinite Query with POST method.
|
||||
@router.get(
|
||||
"/search",
|
||||
summary="Builder search",
|
||||
@@ -157,7 +151,7 @@ async def get_providers(
|
||||
async def search(
|
||||
user_id: Annotated[str, fastapi.Security(get_user_id)],
|
||||
search_query: Annotated[str | None, fastapi.Query()] = None,
|
||||
filter: Annotated[list[str] | None, fastapi.Query()] = None,
|
||||
filter: Annotated[list[builder_model.FilterType] | None, fastapi.Query()] = None,
|
||||
search_id: Annotated[str | None, fastapi.Query()] = None,
|
||||
by_creator: Annotated[list[str] | None, fastapi.Query()] = None,
|
||||
page: Annotated[int, fastapi.Query()] = 1,
|
||||
@@ -176,69 +170,43 @@ async def search(
|
||||
]
|
||||
search_query = sanitize_query(search_query)
|
||||
|
||||
# Blocks&Integrations
|
||||
blocks = builder_model.SearchBlocksResponse(
|
||||
blocks=builder_model.BlockResponse(
|
||||
blocks=[],
|
||||
pagination=Pagination.empty(),
|
||||
),
|
||||
total_block_count=0,
|
||||
total_integration_count=0,
|
||||
# Get all possible results
|
||||
cached_results = await builder_db.get_sorted_search_results(
|
||||
user_id=user_id,
|
||||
search_query=search_query,
|
||||
filters=filter,
|
||||
by_creator=by_creator,
|
||||
)
|
||||
if "blocks" in filter or "integrations" in filter:
|
||||
blocks = builder_db.search_blocks(
|
||||
include_blocks="blocks" in filter,
|
||||
include_integrations="integrations" in filter,
|
||||
query=search_query or "",
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
|
||||
# Library Agents
|
||||
my_agents = library_model.LibraryAgentResponse(
|
||||
agents=[],
|
||||
pagination=Pagination.empty(),
|
||||
# Paginate results
|
||||
total_combined_items = len(cached_results.items)
|
||||
pagination = Pagination(
|
||||
total_items=total_combined_items,
|
||||
total_pages=(total_combined_items + page_size - 1) // page_size,
|
||||
current_page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
if "my_agents" in filter:
|
||||
my_agents = await library_db.list_library_agents(
|
||||
user_id=user_id,
|
||||
search_term=search_query,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
|
||||
# Marketplace Agents
|
||||
marketplace_agents = store_model.StoreAgentsResponse(
|
||||
agents=[],
|
||||
pagination=Pagination.empty(),
|
||||
)
|
||||
if "marketplace_agents" in filter:
|
||||
marketplace_agents = await store_db.get_store_agents(
|
||||
creators=by_creator,
|
||||
start_idx = (page - 1) * page_size
|
||||
end_idx = start_idx + page_size
|
||||
paginated_items = cached_results.items[start_idx:end_idx]
|
||||
|
||||
# Update the search entry by id
|
||||
search_id = await builder_db.update_search(
|
||||
user_id,
|
||||
builder_model.SearchEntry(
|
||||
search_query=search_query,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
|
||||
more_pages = False
|
||||
if (
|
||||
blocks.blocks.pagination.current_page < blocks.blocks.pagination.total_pages
|
||||
or my_agents.pagination.current_page < my_agents.pagination.total_pages
|
||||
or marketplace_agents.pagination.current_page
|
||||
< marketplace_agents.pagination.total_pages
|
||||
):
|
||||
more_pages = True
|
||||
filter=filter,
|
||||
by_creator=by_creator,
|
||||
search_id=search_id,
|
||||
),
|
||||
)
|
||||
|
||||
return builder_model.SearchResponse(
|
||||
items=blocks.blocks.blocks + my_agents.agents + marketplace_agents.agents,
|
||||
total_items={
|
||||
"blocks": blocks.total_block_count,
|
||||
"integrations": blocks.total_integration_count,
|
||||
"marketplace_agents": marketplace_agents.pagination.total_items,
|
||||
"my_agents": my_agents.pagination.total_items,
|
||||
},
|
||||
page=page,
|
||||
more_pages=more_pages,
|
||||
items=paginated_items,
|
||||
search_id=search_id,
|
||||
total_items=cached_results.total_items,
|
||||
pagination=pagination,
|
||||
)
|
||||
|
||||
|
||||
118
autogpt_platform/backend/backend/api/features/chat/config.py
Normal file
118
autogpt_platform/backend/backend/api/features/chat/config.py
Normal file
@@ -0,0 +1,118 @@
|
||||
"""Configuration management for chat system."""
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import Field, field_validator
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
|
||||
class ChatConfig(BaseSettings):
|
||||
"""Configuration for the chat system."""
|
||||
|
||||
# OpenAI API Configuration
|
||||
model: str = Field(
|
||||
default="qwen/qwen3-235b-a22b-2507", description="Default model to use"
|
||||
)
|
||||
api_key: str | None = Field(default=None, description="OpenAI API key")
|
||||
base_url: str | None = Field(
|
||||
default="https://openrouter.ai/api/v1",
|
||||
description="Base URL for API (e.g., for OpenRouter)",
|
||||
)
|
||||
|
||||
# Session TTL Configuration - 12 hours
|
||||
session_ttl: int = Field(default=43200, description="Session TTL in seconds")
|
||||
|
||||
# System Prompt Configuration
|
||||
system_prompt_path: str = Field(
|
||||
default="prompts/chat_system.md",
|
||||
description="Path to system prompt file relative to chat module",
|
||||
)
|
||||
|
||||
# Streaming Configuration
|
||||
max_context_messages: int = Field(
|
||||
default=50, ge=1, le=200, description="Maximum context messages"
|
||||
)
|
||||
|
||||
stream_timeout: int = Field(default=300, description="Stream timeout in seconds")
|
||||
max_retries: int = Field(default=3, description="Maximum number of retries")
|
||||
max_agent_runs: int = Field(default=3, description="Maximum number of agent runs")
|
||||
max_agent_schedules: int = Field(
|
||||
default=3, description="Maximum number of agent schedules"
|
||||
)
|
||||
|
||||
@field_validator("api_key", mode="before")
|
||||
@classmethod
|
||||
def get_api_key(cls, v):
|
||||
"""Get API key from environment if not provided."""
|
||||
if v is None:
|
||||
# Try to get from environment variables
|
||||
# First check for CHAT_API_KEY (Pydantic prefix)
|
||||
v = os.getenv("CHAT_API_KEY")
|
||||
if not v:
|
||||
# Fall back to OPEN_ROUTER_API_KEY
|
||||
v = os.getenv("OPEN_ROUTER_API_KEY")
|
||||
if not v:
|
||||
# Fall back to OPENAI_API_KEY
|
||||
v = os.getenv("OPENAI_API_KEY")
|
||||
return v
|
||||
|
||||
@field_validator("base_url", mode="before")
|
||||
@classmethod
|
||||
def get_base_url(cls, v):
|
||||
"""Get base URL from environment if not provided."""
|
||||
if v is None:
|
||||
# Check for OpenRouter or custom base URL
|
||||
v = os.getenv("CHAT_BASE_URL")
|
||||
if not v:
|
||||
v = os.getenv("OPENROUTER_BASE_URL")
|
||||
if not v:
|
||||
v = os.getenv("OPENAI_BASE_URL")
|
||||
if not v:
|
||||
v = "https://openrouter.ai/api/v1"
|
||||
return v
|
||||
|
||||
def get_system_prompt(self, **template_vars) -> str:
|
||||
"""Load and render the system prompt from file.
|
||||
|
||||
Args:
|
||||
**template_vars: Variables to substitute in the template
|
||||
|
||||
Returns:
|
||||
Rendered system prompt string
|
||||
|
||||
"""
|
||||
# Get the path relative to this module
|
||||
module_dir = Path(__file__).parent
|
||||
prompt_path = module_dir / self.system_prompt_path
|
||||
|
||||
# Check for .j2 extension first (Jinja2 template)
|
||||
j2_path = Path(str(prompt_path) + ".j2")
|
||||
if j2_path.exists():
|
||||
try:
|
||||
from jinja2 import Template
|
||||
|
||||
template = Template(j2_path.read_text())
|
||||
return template.render(**template_vars)
|
||||
except ImportError:
|
||||
# Jinja2 not installed, fall back to reading as plain text
|
||||
return j2_path.read_text()
|
||||
|
||||
# Check for markdown file
|
||||
if prompt_path.exists():
|
||||
content = prompt_path.read_text()
|
||||
|
||||
# Simple variable substitution if Jinja2 is not available
|
||||
for key, value in template_vars.items():
|
||||
placeholder = f"{{{key}}}"
|
||||
content = content.replace(placeholder, str(value))
|
||||
|
||||
return content
|
||||
raise FileNotFoundError(f"System prompt file not found: {prompt_path}")
|
||||
|
||||
class Config:
|
||||
"""Pydantic config."""
|
||||
|
||||
env_file = ".env"
|
||||
env_file_encoding = "utf-8"
|
||||
extra = "ignore" # Ignore extra environment variables
|
||||
204
autogpt_platform/backend/backend/api/features/chat/model.py
Normal file
204
autogpt_platform/backend/backend/api/features/chat/model.py
Normal file
@@ -0,0 +1,204 @@
|
||||
import logging
|
||||
import uuid
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from openai.types.chat import (
|
||||
ChatCompletionAssistantMessageParam,
|
||||
ChatCompletionDeveloperMessageParam,
|
||||
ChatCompletionFunctionMessageParam,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionSystemMessageParam,
|
||||
ChatCompletionToolMessageParam,
|
||||
ChatCompletionUserMessageParam,
|
||||
)
|
||||
from openai.types.chat.chat_completion_assistant_message_param import FunctionCall
|
||||
from openai.types.chat.chat_completion_message_tool_call_param import (
|
||||
ChatCompletionMessageToolCallParam,
|
||||
Function,
|
||||
)
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.redis_client import get_redis_async
|
||||
from backend.util.exceptions import RedisError
|
||||
|
||||
from .config import ChatConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
config = ChatConfig()
|
||||
|
||||
|
||||
class ChatMessage(BaseModel):
|
||||
role: str
|
||||
content: str | None = None
|
||||
name: str | None = None
|
||||
tool_call_id: str | None = None
|
||||
refusal: str | None = None
|
||||
tool_calls: list[dict] | None = None
|
||||
function_call: dict | None = None
|
||||
|
||||
|
||||
class Usage(BaseModel):
|
||||
prompt_tokens: int
|
||||
completion_tokens: int
|
||||
total_tokens: int
|
||||
|
||||
|
||||
class ChatSession(BaseModel):
|
||||
session_id: str
|
||||
user_id: str | None
|
||||
messages: list[ChatMessage]
|
||||
usage: list[Usage]
|
||||
credentials: dict[str, dict] = {} # Map of provider -> credential metadata
|
||||
started_at: datetime
|
||||
updated_at: datetime
|
||||
successful_agent_runs: dict[str, int] = {}
|
||||
successful_agent_schedules: dict[str, int] = {}
|
||||
|
||||
@staticmethod
|
||||
def new(user_id: str | None) -> "ChatSession":
|
||||
return ChatSession(
|
||||
session_id=str(uuid.uuid4()),
|
||||
user_id=user_id,
|
||||
messages=[],
|
||||
usage=[],
|
||||
credentials={},
|
||||
started_at=datetime.now(UTC),
|
||||
updated_at=datetime.now(UTC),
|
||||
)
|
||||
|
||||
def to_openai_messages(self) -> list[ChatCompletionMessageParam]:
|
||||
messages = []
|
||||
for message in self.messages:
|
||||
if message.role == "developer":
|
||||
m = ChatCompletionDeveloperMessageParam(
|
||||
role="developer",
|
||||
content=message.content or "",
|
||||
)
|
||||
if message.name:
|
||||
m["name"] = message.name
|
||||
messages.append(m)
|
||||
elif message.role == "system":
|
||||
m = ChatCompletionSystemMessageParam(
|
||||
role="system",
|
||||
content=message.content or "",
|
||||
)
|
||||
if message.name:
|
||||
m["name"] = message.name
|
||||
messages.append(m)
|
||||
elif message.role == "user":
|
||||
m = ChatCompletionUserMessageParam(
|
||||
role="user",
|
||||
content=message.content or "",
|
||||
)
|
||||
if message.name:
|
||||
m["name"] = message.name
|
||||
messages.append(m)
|
||||
elif message.role == "assistant":
|
||||
m = ChatCompletionAssistantMessageParam(
|
||||
role="assistant",
|
||||
content=message.content or "",
|
||||
)
|
||||
if message.function_call:
|
||||
m["function_call"] = FunctionCall(
|
||||
arguments=message.function_call["arguments"],
|
||||
name=message.function_call["name"],
|
||||
)
|
||||
if message.refusal:
|
||||
m["refusal"] = message.refusal
|
||||
if message.tool_calls:
|
||||
t: list[ChatCompletionMessageToolCallParam] = []
|
||||
for tool_call in message.tool_calls:
|
||||
# Tool calls are stored with nested structure: {id, type, function: {name, arguments}}
|
||||
function_data = tool_call.get("function", {})
|
||||
|
||||
# Skip tool calls that are missing required fields
|
||||
if "id" not in tool_call or "name" not in function_data:
|
||||
logger.warning(
|
||||
f"Skipping invalid tool call: missing required fields. "
|
||||
f"Got: {tool_call.keys()}, function keys: {function_data.keys()}"
|
||||
)
|
||||
continue
|
||||
|
||||
# Arguments are stored as a JSON string
|
||||
arguments_str = function_data.get("arguments", "{}")
|
||||
|
||||
t.append(
|
||||
ChatCompletionMessageToolCallParam(
|
||||
id=tool_call["id"],
|
||||
type="function",
|
||||
function=Function(
|
||||
arguments=arguments_str,
|
||||
name=function_data["name"],
|
||||
),
|
||||
)
|
||||
)
|
||||
m["tool_calls"] = t
|
||||
if message.name:
|
||||
m["name"] = message.name
|
||||
messages.append(m)
|
||||
elif message.role == "tool":
|
||||
messages.append(
|
||||
ChatCompletionToolMessageParam(
|
||||
role="tool",
|
||||
content=message.content or "",
|
||||
tool_call_id=message.tool_call_id or "",
|
||||
)
|
||||
)
|
||||
elif message.role == "function":
|
||||
messages.append(
|
||||
ChatCompletionFunctionMessageParam(
|
||||
role="function",
|
||||
content=message.content,
|
||||
name=message.name or "",
|
||||
)
|
||||
)
|
||||
return messages
|
||||
|
||||
|
||||
async def get_chat_session(
|
||||
session_id: str,
|
||||
user_id: str | None,
|
||||
) -> ChatSession | None:
|
||||
"""Get a chat session by ID."""
|
||||
redis_key = f"chat:session:{session_id}"
|
||||
async_redis = await get_redis_async()
|
||||
|
||||
raw_session: bytes | None = await async_redis.get(redis_key)
|
||||
|
||||
if raw_session is None:
|
||||
logger.warning(f"Session {session_id} not found in Redis")
|
||||
return None
|
||||
|
||||
try:
|
||||
session = ChatSession.model_validate_json(raw_session)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to deserialize session {session_id}: {e}", exc_info=True)
|
||||
raise RedisError(f"Corrupted session data for {session_id}") from e
|
||||
|
||||
if session.user_id is not None and session.user_id != user_id:
|
||||
logger.warning(
|
||||
f"Session {session_id} user id mismatch: {session.user_id} != {user_id}"
|
||||
)
|
||||
return None
|
||||
|
||||
return session
|
||||
|
||||
|
||||
async def upsert_chat_session(
|
||||
session: ChatSession,
|
||||
) -> ChatSession:
|
||||
"""Update a chat session with the given messages."""
|
||||
|
||||
redis_key = f"chat:session:{session.session_id}"
|
||||
|
||||
async_redis = await get_redis_async()
|
||||
resp = await async_redis.setex(
|
||||
redis_key, config.session_ttl, session.model_dump_json()
|
||||
)
|
||||
|
||||
if not resp:
|
||||
raise RedisError(
|
||||
f"Failed to persist chat session {session.session_id} to Redis: {resp}"
|
||||
)
|
||||
|
||||
return session
|
||||
@@ -0,0 +1,70 @@
|
||||
import pytest
|
||||
|
||||
from .model import (
|
||||
ChatMessage,
|
||||
ChatSession,
|
||||
Usage,
|
||||
get_chat_session,
|
||||
upsert_chat_session,
|
||||
)
|
||||
|
||||
messages = [
|
||||
ChatMessage(content="Hello, how are you?", role="user"),
|
||||
ChatMessage(
|
||||
content="I'm fine, thank you!",
|
||||
role="assistant",
|
||||
tool_calls=[
|
||||
{
|
||||
"id": "t123",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_weather",
|
||||
"arguments": '{"city": "New York"}',
|
||||
},
|
||||
}
|
||||
],
|
||||
),
|
||||
ChatMessage(
|
||||
content="I'm using the tool to get the weather",
|
||||
role="tool",
|
||||
tool_call_id="t123",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_chatsession_serialization_deserialization():
|
||||
s = ChatSession.new(user_id="abc123")
|
||||
s.messages = messages
|
||||
s.usage = [Usage(prompt_tokens=100, completion_tokens=200, total_tokens=300)]
|
||||
serialized = s.model_dump_json()
|
||||
s2 = ChatSession.model_validate_json(serialized)
|
||||
assert s2.model_dump() == s.model_dump()
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_chatsession_redis_storage():
|
||||
|
||||
s = ChatSession.new(user_id=None)
|
||||
s.messages = messages
|
||||
|
||||
s = await upsert_chat_session(s)
|
||||
|
||||
s2 = await get_chat_session(
|
||||
session_id=s.session_id,
|
||||
user_id=s.user_id,
|
||||
)
|
||||
|
||||
assert s2 == s
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_chatsession_redis_storage_user_id_mismatch():
|
||||
|
||||
s = ChatSession.new(user_id="abc123")
|
||||
s.messages = messages
|
||||
s = await upsert_chat_session(s)
|
||||
|
||||
s2 = await get_chat_session(s.session_id, None)
|
||||
|
||||
assert s2 is None
|
||||
@@ -0,0 +1,104 @@
|
||||
You are Otto, an AI Co-Pilot and Forward Deployed Engineer for AutoGPT, an AI Business Automation tool. Your mission is to help users quickly find and set up AutoGPT agents to solve their business problems.
|
||||
|
||||
Here are the functions available to you:
|
||||
|
||||
<functions>
|
||||
1. **find_agent** - Search for agents that solve the user's problem
|
||||
2. **run_agent** - Run or schedule an agent (automatically handles setup)
|
||||
</functions>
|
||||
|
||||
## HOW run_agent WORKS
|
||||
|
||||
The `run_agent` tool automatically handles the entire setup flow:
|
||||
|
||||
1. **First call** (no inputs) → Returns available inputs so user can decide what values to use
|
||||
2. **Credentials check** → If missing, UI automatically prompts user to add them (you don't need to mention this)
|
||||
3. **Execution** → Runs when you provide `inputs` OR set `use_defaults=true`
|
||||
|
||||
Parameters:
|
||||
- `username_agent_slug` (required): Agent identifier like "creator/agent-name"
|
||||
- `inputs`: Object with input values for the agent
|
||||
- `use_defaults`: Set to `true` to run with default values (only after user confirms)
|
||||
- `schedule_name` + `cron`: For scheduled execution
|
||||
|
||||
## WORKFLOW
|
||||
|
||||
1. **find_agent** - Search for agents that solve the user's problem
|
||||
2. **run_agent** (first call, no inputs) - Get available inputs for the agent
|
||||
3. **Ask user** what values they want to use OR if they want to use defaults
|
||||
4. **run_agent** (second call) - Either with `inputs={...}` or `use_defaults=true`
|
||||
|
||||
## YOUR APPROACH
|
||||
|
||||
**Step 1: Understand the Problem**
|
||||
- Ask maximum 1-2 targeted questions
|
||||
- Focus on: What business problem are they solving?
|
||||
- Move quickly to searching for solutions
|
||||
|
||||
**Step 2: Find Agents**
|
||||
- Use `find_agent` immediately with relevant keywords
|
||||
- Suggest the best option from search results
|
||||
- Explain briefly how it solves their problem
|
||||
|
||||
**Step 3: Get Agent Inputs**
|
||||
- Call `run_agent(username_agent_slug="creator/agent-name")` without inputs
|
||||
- This returns the available inputs (required and optional)
|
||||
- Present these to the user and ask what values they want
|
||||
|
||||
**Step 4: Run with User's Choice**
|
||||
- If user provides values: `run_agent(username_agent_slug="...", inputs={...})`
|
||||
- If user says "use defaults": `run_agent(username_agent_slug="...", use_defaults=true)`
|
||||
- On success, share the agent link with the user
|
||||
|
||||
**For Scheduled Execution:**
|
||||
- Add `schedule_name` and `cron` parameters
|
||||
- Example: `run_agent(username_agent_slug="...", inputs={...}, schedule_name="Daily Report", cron="0 9 * * *")`
|
||||
|
||||
## FUNCTION CALL FORMAT
|
||||
|
||||
To call a function, use this exact format:
|
||||
`<function_call>function_name(parameter="value")</function_call>`
|
||||
|
||||
Examples:
|
||||
- `<function_call>find_agent(query="social media automation")</function_call>`
|
||||
- `<function_call>run_agent(username_agent_slug="creator/agent-name")</function_call>` (get inputs)
|
||||
- `<function_call>run_agent(username_agent_slug="creator/agent-name", inputs={"topic": "AI news"})</function_call>`
|
||||
- `<function_call>run_agent(username_agent_slug="creator/agent-name", use_defaults=true)</function_call>`
|
||||
|
||||
## KEY RULES
|
||||
|
||||
**What You DON'T Do:**
|
||||
- Don't help with login (frontend handles this)
|
||||
- Don't mention or explain credentials to the user (frontend handles this automatically)
|
||||
- Don't run agents without first showing available inputs to the user
|
||||
- Don't use `use_defaults=true` without user explicitly confirming
|
||||
- Don't write responses longer than 3 sentences
|
||||
|
||||
**What You DO:**
|
||||
- Always call run_agent first without inputs to see what's available
|
||||
- Ask user what values they want OR if they want to use defaults
|
||||
- Keep all responses to maximum 3 sentences
|
||||
- Include the agent link in your response after successful execution
|
||||
|
||||
**Error Handling:**
|
||||
- Authentication needed → "Please sign in via the interface"
|
||||
- Credentials missing → The UI handles this automatically. Focus on asking the user about input values instead.
|
||||
|
||||
## RESPONSE STRUCTURE
|
||||
|
||||
Before responding, wrap your analysis in <thinking> tags to systematically plan your approach:
|
||||
- Extract the key business problem or request from the user's message
|
||||
- Determine what function call (if any) you need to make next
|
||||
- Plan your response to stay under the 3-sentence maximum
|
||||
|
||||
Example interaction:
|
||||
```
|
||||
User: "Run the AI news agent for me"
|
||||
Otto: <function_call>run_agent(username_agent_slug="autogpt/ai-news")</function_call>
|
||||
[Tool returns: Agent accepts inputs - Required: topic. Optional: num_articles (default: 5)]
|
||||
Otto: The AI News agent needs a topic. What topic would you like news about, or should I use the defaults?
|
||||
User: "Use defaults"
|
||||
Otto: <function_call>run_agent(username_agent_slug="autogpt/ai-news", use_defaults=true)</function_call>
|
||||
```
|
||||
|
||||
KEEP ANSWERS TO 3 SENTENCES
|
||||
@@ -0,0 +1,101 @@
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ResponseType(str, Enum):
|
||||
"""Types of streaming responses."""
|
||||
|
||||
TEXT_CHUNK = "text_chunk"
|
||||
TEXT_ENDED = "text_ended"
|
||||
TOOL_CALL = "tool_call"
|
||||
TOOL_CALL_START = "tool_call_start"
|
||||
TOOL_RESPONSE = "tool_response"
|
||||
ERROR = "error"
|
||||
USAGE = "usage"
|
||||
STREAM_END = "stream_end"
|
||||
|
||||
|
||||
class StreamBaseResponse(BaseModel):
|
||||
"""Base response model for all streaming responses."""
|
||||
|
||||
type: ResponseType
|
||||
timestamp: str | None = None
|
||||
|
||||
def to_sse(self) -> str:
|
||||
"""Convert to SSE format."""
|
||||
return f"data: {self.model_dump_json()}\n\n"
|
||||
|
||||
|
||||
class StreamTextChunk(StreamBaseResponse):
|
||||
"""Streaming text content from the assistant."""
|
||||
|
||||
type: ResponseType = ResponseType.TEXT_CHUNK
|
||||
content: str = Field(..., description="Text content chunk")
|
||||
|
||||
|
||||
class StreamToolCallStart(StreamBaseResponse):
|
||||
"""Tool call started notification."""
|
||||
|
||||
type: ResponseType = ResponseType.TOOL_CALL_START
|
||||
tool_name: str = Field(..., description="Name of the tool that was executed")
|
||||
tool_id: str = Field(..., description="Unique tool call ID")
|
||||
|
||||
|
||||
class StreamToolCall(StreamBaseResponse):
|
||||
"""Tool invocation notification."""
|
||||
|
||||
type: ResponseType = ResponseType.TOOL_CALL
|
||||
tool_id: str = Field(..., description="Unique tool call ID")
|
||||
tool_name: str = Field(..., description="Name of the tool being called")
|
||||
arguments: dict[str, Any] = Field(
|
||||
default_factory=dict, description="Tool arguments"
|
||||
)
|
||||
|
||||
|
||||
class StreamToolExecutionResult(StreamBaseResponse):
|
||||
"""Tool execution result."""
|
||||
|
||||
type: ResponseType = ResponseType.TOOL_RESPONSE
|
||||
tool_id: str = Field(..., description="Tool call ID this responds to")
|
||||
tool_name: str = Field(..., description="Name of the tool that was executed")
|
||||
result: str | dict[str, Any] = Field(..., description="Tool execution result")
|
||||
success: bool = Field(
|
||||
default=True, description="Whether the tool execution succeeded"
|
||||
)
|
||||
|
||||
|
||||
class StreamUsage(StreamBaseResponse):
|
||||
"""Token usage statistics."""
|
||||
|
||||
type: ResponseType = ResponseType.USAGE
|
||||
prompt_tokens: int
|
||||
completion_tokens: int
|
||||
total_tokens: int
|
||||
|
||||
|
||||
class StreamError(StreamBaseResponse):
|
||||
"""Error response."""
|
||||
|
||||
type: ResponseType = ResponseType.ERROR
|
||||
message: str = Field(..., description="Error message")
|
||||
code: str | None = Field(default=None, description="Error code")
|
||||
details: dict[str, Any] | None = Field(
|
||||
default=None, description="Additional error details"
|
||||
)
|
||||
|
||||
|
||||
class StreamTextEnded(StreamBaseResponse):
|
||||
"""Text streaming completed marker."""
|
||||
|
||||
type: ResponseType = ResponseType.TEXT_ENDED
|
||||
|
||||
|
||||
class StreamEnd(StreamBaseResponse):
|
||||
"""End of stream marker."""
|
||||
|
||||
type: ResponseType = ResponseType.STREAM_END
|
||||
summary: dict[str, Any] | None = Field(
|
||||
default=None, description="Stream summary statistics"
|
||||
)
|
||||
219
autogpt_platform/backend/backend/api/features/chat/routes.py
Normal file
219
autogpt_platform/backend/backend/api/features/chat/routes.py
Normal file
@@ -0,0 +1,219 @@
|
||||
"""Chat API routes for chat session management and streaming via SSE."""
|
||||
|
||||
import logging
|
||||
from collections.abc import AsyncGenerator
|
||||
from typing import Annotated
|
||||
|
||||
from autogpt_libs import auth
|
||||
from fastapi import APIRouter, Depends, Query, Security
|
||||
from fastapi.responses import StreamingResponse
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.util.exceptions import NotFoundError
|
||||
|
||||
from . import service as chat_service
|
||||
from .config import ChatConfig
|
||||
|
||||
config = ChatConfig()
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(
|
||||
tags=["chat"],
|
||||
)
|
||||
|
||||
# ========== Request/Response Models ==========
|
||||
|
||||
|
||||
class CreateSessionResponse(BaseModel):
|
||||
"""Response model containing information on a newly created chat session."""
|
||||
|
||||
id: str
|
||||
created_at: str
|
||||
user_id: str | None
|
||||
|
||||
|
||||
class SessionDetailResponse(BaseModel):
|
||||
"""Response model providing complete details for a chat session, including messages."""
|
||||
|
||||
id: str
|
||||
created_at: str
|
||||
updated_at: str
|
||||
user_id: str | None
|
||||
messages: list[dict]
|
||||
|
||||
|
||||
# ========== Routes ==========
|
||||
|
||||
|
||||
@router.post(
|
||||
"/sessions",
|
||||
)
|
||||
async def create_session(
|
||||
user_id: Annotated[str | None, Depends(auth.get_user_id)],
|
||||
) -> CreateSessionResponse:
|
||||
"""
|
||||
Create a new chat session.
|
||||
|
||||
Initiates a new chat session for either an authenticated or anonymous user.
|
||||
|
||||
Args:
|
||||
user_id: The optional authenticated user ID parsed from the JWT. If missing, creates an anonymous session.
|
||||
|
||||
Returns:
|
||||
CreateSessionResponse: Details of the created session.
|
||||
|
||||
"""
|
||||
logger.info(
|
||||
f"Creating session with user_id: "
|
||||
f"...{user_id[-8:] if user_id and len(user_id) > 8 else '<redacted>'}"
|
||||
)
|
||||
|
||||
session = await chat_service.create_chat_session(user_id)
|
||||
|
||||
return CreateSessionResponse(
|
||||
id=session.session_id,
|
||||
created_at=session.started_at.isoformat(),
|
||||
user_id=session.user_id or None,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/sessions/{session_id}",
|
||||
)
|
||||
async def get_session(
|
||||
session_id: str,
|
||||
user_id: Annotated[str | None, Depends(auth.get_user_id)],
|
||||
) -> SessionDetailResponse:
|
||||
"""
|
||||
Retrieve the details of a specific chat session.
|
||||
|
||||
Looks up a chat session by ID for the given user (if authenticated) and returns all session data including messages.
|
||||
|
||||
Args:
|
||||
session_id: The unique identifier for the desired chat session.
|
||||
user_id: The optional authenticated user ID, or None for anonymous access.
|
||||
|
||||
Returns:
|
||||
SessionDetailResponse: Details for the requested session; raises NotFoundError if not found.
|
||||
|
||||
"""
|
||||
session = await chat_service.get_session(session_id, user_id)
|
||||
if not session:
|
||||
raise NotFoundError(f"Session {session_id} not found")
|
||||
return SessionDetailResponse(
|
||||
id=session.session_id,
|
||||
created_at=session.started_at.isoformat(),
|
||||
updated_at=session.updated_at.isoformat(),
|
||||
user_id=session.user_id or None,
|
||||
messages=[message.model_dump() for message in session.messages],
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/sessions/{session_id}/stream",
|
||||
)
|
||||
async def stream_chat(
|
||||
session_id: str,
|
||||
message: Annotated[str, Query(min_length=1, max_length=10000)],
|
||||
user_id: str | None = Depends(auth.get_user_id),
|
||||
is_user_message: bool = Query(default=True),
|
||||
):
|
||||
"""
|
||||
Stream chat responses for a session.
|
||||
|
||||
Streams the AI/completion responses in real time over Server-Sent Events (SSE), including:
|
||||
- Text fragments as they are generated
|
||||
- Tool call UI elements (if invoked)
|
||||
- Tool execution results
|
||||
|
||||
Args:
|
||||
session_id: The chat session identifier to associate with the streamed messages.
|
||||
message: The user's new message to process.
|
||||
user_id: Optional authenticated user ID.
|
||||
is_user_message: Whether the message is a user message.
|
||||
Returns:
|
||||
StreamingResponse: SSE-formatted response chunks.
|
||||
|
||||
"""
|
||||
# Validate session exists before starting the stream
|
||||
# This prevents errors after the response has already started
|
||||
session = await chat_service.get_session(session_id, user_id)
|
||||
|
||||
if not session:
|
||||
raise NotFoundError(f"Session {session_id} not found. ")
|
||||
if session.user_id is None and user_id is not None:
|
||||
session = await chat_service.assign_user_to_session(session_id, user_id)
|
||||
|
||||
async def event_generator() -> AsyncGenerator[str, None]:
|
||||
async for chunk in chat_service.stream_chat_completion(
|
||||
session_id,
|
||||
message,
|
||||
is_user_message=is_user_message,
|
||||
user_id=user_id,
|
||||
session=session, # Pass pre-fetched session to avoid double-fetch
|
||||
):
|
||||
yield chunk.to_sse()
|
||||
|
||||
return StreamingResponse(
|
||||
event_generator(),
|
||||
media_type="text/event-stream",
|
||||
headers={
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
"X-Accel-Buffering": "no", # Disable nginx buffering
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/sessions/{session_id}/assign-user",
|
||||
dependencies=[Security(auth.requires_user)],
|
||||
status_code=200,
|
||||
)
|
||||
async def session_assign_user(
|
||||
session_id: str,
|
||||
user_id: Annotated[str, Security(auth.get_user_id)],
|
||||
) -> dict:
|
||||
"""
|
||||
Assign an authenticated user to a chat session.
|
||||
|
||||
Used (typically post-login) to claim an existing anonymous session as the current authenticated user.
|
||||
|
||||
Args:
|
||||
session_id: The identifier for the (previously anonymous) session.
|
||||
user_id: The authenticated user's ID to associate with the session.
|
||||
|
||||
Returns:
|
||||
dict: Status of the assignment.
|
||||
|
||||
"""
|
||||
await chat_service.assign_user_to_session(session_id, user_id)
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
# ========== Health Check ==========
|
||||
|
||||
|
||||
@router.get("/health", status_code=200)
|
||||
async def health_check() -> dict:
|
||||
"""
|
||||
Health check endpoint for the chat service.
|
||||
|
||||
Performs a full cycle test of session creation, assignment, and retrieval. Should always return healthy
|
||||
if the service and data layer are operational.
|
||||
|
||||
Returns:
|
||||
dict: A status dictionary indicating health, service name, and API version.
|
||||
|
||||
"""
|
||||
session = await chat_service.create_chat_session(None)
|
||||
await chat_service.assign_user_to_session(session.session_id, "test_user")
|
||||
await chat_service.get_session(session.session_id, "test_user")
|
||||
|
||||
return {
|
||||
"status": "healthy",
|
||||
"service": "chat",
|
||||
"version": "0.1.0",
|
||||
}
|
||||
538
autogpt_platform/backend/backend/api/features/chat/service.py
Normal file
538
autogpt_platform/backend/backend/api/features/chat/service.py
Normal file
@@ -0,0 +1,538 @@
|
||||
import logging
|
||||
from collections.abc import AsyncGenerator
|
||||
from datetime import UTC, datetime
|
||||
from typing import Any
|
||||
|
||||
import orjson
|
||||
from openai import AsyncOpenAI
|
||||
from openai.types.chat import ChatCompletionChunk, ChatCompletionToolParam
|
||||
|
||||
from backend.util.exceptions import NotFoundError
|
||||
|
||||
from .config import ChatConfig
|
||||
from .model import (
|
||||
ChatMessage,
|
||||
ChatSession,
|
||||
Usage,
|
||||
get_chat_session,
|
||||
upsert_chat_session,
|
||||
)
|
||||
from .response_model import (
|
||||
StreamBaseResponse,
|
||||
StreamEnd,
|
||||
StreamError,
|
||||
StreamTextChunk,
|
||||
StreamTextEnded,
|
||||
StreamToolCall,
|
||||
StreamToolCallStart,
|
||||
StreamToolExecutionResult,
|
||||
StreamUsage,
|
||||
)
|
||||
from .tools import execute_tool, tools
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
config = ChatConfig()
|
||||
client = AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
|
||||
|
||||
|
||||
async def create_chat_session(
|
||||
user_id: str | None = None,
|
||||
) -> ChatSession:
|
||||
"""
|
||||
Create a new chat session and persist it to the database.
|
||||
"""
|
||||
session = ChatSession.new(user_id)
|
||||
# Persist the session immediately so it can be used for streaming
|
||||
return await upsert_chat_session(session)
|
||||
|
||||
|
||||
async def get_session(
|
||||
session_id: str,
|
||||
user_id: str | None = None,
|
||||
) -> ChatSession | None:
|
||||
"""
|
||||
Get a chat session by ID.
|
||||
"""
|
||||
return await get_chat_session(session_id, user_id)
|
||||
|
||||
|
||||
async def assign_user_to_session(
|
||||
session_id: str,
|
||||
user_id: str,
|
||||
) -> ChatSession:
|
||||
"""
|
||||
Assign a user to a chat session.
|
||||
"""
|
||||
session = await get_chat_session(session_id, None)
|
||||
if not session:
|
||||
raise NotFoundError(f"Session {session_id} not found")
|
||||
session.user_id = user_id
|
||||
return await upsert_chat_session(session)
|
||||
|
||||
|
||||
async def stream_chat_completion(
|
||||
session_id: str,
|
||||
message: str | None = None,
|
||||
is_user_message: bool = True,
|
||||
user_id: str | None = None,
|
||||
retry_count: int = 0,
|
||||
session: ChatSession | None = None,
|
||||
) -> AsyncGenerator[StreamBaseResponse, None]:
|
||||
"""Main entry point for streaming chat completions with database handling.
|
||||
|
||||
This function handles all database operations and delegates streaming
|
||||
to the internal _stream_chat_chunks function.
|
||||
|
||||
Args:
|
||||
session_id: Chat session ID
|
||||
user_message: User's input message
|
||||
user_id: User ID for authentication (None for anonymous)
|
||||
session: Optional pre-loaded session object (for recursive calls to avoid Redis refetch)
|
||||
|
||||
Yields:
|
||||
StreamBaseResponse objects formatted as SSE
|
||||
|
||||
Raises:
|
||||
NotFoundError: If session_id is invalid
|
||||
ValueError: If max_context_messages is exceeded
|
||||
|
||||
"""
|
||||
logger.info(
|
||||
f"Streaming chat completion for session {session_id} for message {message} and user id {user_id}. Message is user message: {is_user_message}"
|
||||
)
|
||||
|
||||
# Only fetch from Redis if session not provided (initial call)
|
||||
if session is None:
|
||||
session = await get_chat_session(session_id, user_id)
|
||||
logger.info(
|
||||
f"Fetched session from Redis: {session.session_id if session else 'None'}, "
|
||||
f"message_count={len(session.messages) if session else 0}"
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
f"Using provided session object: {session.session_id}, "
|
||||
f"message_count={len(session.messages)}"
|
||||
)
|
||||
|
||||
if not session:
|
||||
raise NotFoundError(
|
||||
f"Session {session_id} not found. Please create a new session first."
|
||||
)
|
||||
|
||||
if message:
|
||||
session.messages.append(
|
||||
ChatMessage(
|
||||
role="user" if is_user_message else "assistant", content=message
|
||||
)
|
||||
)
|
||||
logger.info(
|
||||
f"Appended message (role={'user' if is_user_message else 'assistant'}), "
|
||||
f"new message_count={len(session.messages)}"
|
||||
)
|
||||
|
||||
if len(session.messages) > config.max_context_messages:
|
||||
raise ValueError(f"Max messages exceeded: {config.max_context_messages}")
|
||||
|
||||
logger.info(
|
||||
f"Upserting session: {session.session_id} with user id {session.user_id}, "
|
||||
f"message_count={len(session.messages)}"
|
||||
)
|
||||
session = await upsert_chat_session(session)
|
||||
assert session, "Session not found"
|
||||
|
||||
assistant_response = ChatMessage(
|
||||
role="assistant",
|
||||
content="",
|
||||
)
|
||||
|
||||
has_yielded_end = False
|
||||
has_yielded_error = False
|
||||
has_done_tool_call = False
|
||||
has_received_text = False
|
||||
text_streaming_ended = False
|
||||
tool_response_messages: list[ChatMessage] = []
|
||||
accumulated_tool_calls: list[dict[str, Any]] = []
|
||||
should_retry = False
|
||||
|
||||
try:
|
||||
async for chunk in _stream_chat_chunks(
|
||||
session=session,
|
||||
tools=tools,
|
||||
):
|
||||
|
||||
if isinstance(chunk, StreamTextChunk):
|
||||
content = chunk.content or ""
|
||||
assert assistant_response.content is not None
|
||||
assistant_response.content += content
|
||||
has_received_text = True
|
||||
yield chunk
|
||||
elif isinstance(chunk, StreamToolCallStart):
|
||||
# Emit text_ended before first tool call, but only if we've received text
|
||||
if has_received_text and not text_streaming_ended:
|
||||
yield StreamTextEnded()
|
||||
text_streaming_ended = True
|
||||
yield chunk
|
||||
elif isinstance(chunk, StreamToolCall):
|
||||
# Accumulate tool calls in OpenAI format
|
||||
accumulated_tool_calls.append(
|
||||
{
|
||||
"id": chunk.tool_id,
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": chunk.tool_name,
|
||||
"arguments": orjson.dumps(chunk.arguments).decode("utf-8"),
|
||||
},
|
||||
}
|
||||
)
|
||||
elif isinstance(chunk, StreamToolExecutionResult):
|
||||
result_content = (
|
||||
chunk.result
|
||||
if isinstance(chunk.result, str)
|
||||
else orjson.dumps(chunk.result).decode("utf-8")
|
||||
)
|
||||
tool_response_messages.append(
|
||||
ChatMessage(
|
||||
role="tool",
|
||||
content=result_content,
|
||||
tool_call_id=chunk.tool_id,
|
||||
)
|
||||
)
|
||||
has_done_tool_call = True
|
||||
# Track if any tool execution failed
|
||||
if not chunk.success:
|
||||
logger.warning(
|
||||
f"Tool {chunk.tool_name} (ID: {chunk.tool_id}) execution failed"
|
||||
)
|
||||
yield chunk
|
||||
elif isinstance(chunk, StreamEnd):
|
||||
if not has_done_tool_call:
|
||||
has_yielded_end = True
|
||||
yield chunk
|
||||
elif isinstance(chunk, StreamError):
|
||||
has_yielded_error = True
|
||||
elif isinstance(chunk, StreamUsage):
|
||||
session.usage.append(
|
||||
Usage(
|
||||
prompt_tokens=chunk.prompt_tokens,
|
||||
completion_tokens=chunk.completion_tokens,
|
||||
total_tokens=chunk.total_tokens,
|
||||
)
|
||||
)
|
||||
else:
|
||||
logger.error(f"Unknown chunk type: {type(chunk)}", exc_info=True)
|
||||
except Exception as e:
|
||||
logger.error(f"Error during stream: {e!s}", exc_info=True)
|
||||
|
||||
# Check if this is a retryable error (JSON parsing, incomplete tool calls, etc.)
|
||||
is_retryable = isinstance(e, (orjson.JSONDecodeError, KeyError, TypeError))
|
||||
|
||||
if is_retryable and retry_count < config.max_retries:
|
||||
logger.info(
|
||||
f"Retryable error encountered. Attempt {retry_count + 1}/{config.max_retries}"
|
||||
)
|
||||
should_retry = True
|
||||
else:
|
||||
# Non-retryable error or max retries exceeded
|
||||
# Save any partial progress before reporting error
|
||||
messages_to_save: list[ChatMessage] = []
|
||||
|
||||
# Add assistant message if it has content or tool calls
|
||||
if accumulated_tool_calls:
|
||||
assistant_response.tool_calls = accumulated_tool_calls
|
||||
if assistant_response.content or assistant_response.tool_calls:
|
||||
messages_to_save.append(assistant_response)
|
||||
|
||||
# Add tool response messages after assistant message
|
||||
messages_to_save.extend(tool_response_messages)
|
||||
|
||||
session.messages.extend(messages_to_save)
|
||||
await upsert_chat_session(session)
|
||||
|
||||
if not has_yielded_error:
|
||||
error_message = str(e)
|
||||
if not is_retryable:
|
||||
error_message = f"Non-retryable error: {error_message}"
|
||||
elif retry_count >= config.max_retries:
|
||||
error_message = (
|
||||
f"Max retries ({config.max_retries}) exceeded: {error_message}"
|
||||
)
|
||||
|
||||
error_response = StreamError(
|
||||
message=error_message,
|
||||
timestamp=datetime.now(UTC).isoformat(),
|
||||
)
|
||||
yield error_response
|
||||
if not has_yielded_end:
|
||||
yield StreamEnd(
|
||||
timestamp=datetime.now(UTC).isoformat(),
|
||||
)
|
||||
return
|
||||
|
||||
# Handle retry outside of exception handler to avoid nesting
|
||||
if should_retry and retry_count < config.max_retries:
|
||||
logger.info(
|
||||
f"Retrying stream_chat_completion for session {session_id}, attempt {retry_count + 1}"
|
||||
)
|
||||
async for chunk in stream_chat_completion(
|
||||
session_id=session.session_id,
|
||||
user_id=user_id,
|
||||
retry_count=retry_count + 1,
|
||||
session=session,
|
||||
):
|
||||
yield chunk
|
||||
return # Exit after retry to avoid double-saving in finally block
|
||||
|
||||
# Normal completion path - save session and handle tool call continuation
|
||||
logger.info(
|
||||
f"Normal completion path: session={session.session_id}, "
|
||||
f"current message_count={len(session.messages)}"
|
||||
)
|
||||
|
||||
# Build the messages list in the correct order
|
||||
messages_to_save: list[ChatMessage] = []
|
||||
|
||||
# Add assistant message with tool_calls if any
|
||||
if accumulated_tool_calls:
|
||||
assistant_response.tool_calls = accumulated_tool_calls
|
||||
logger.info(
|
||||
f"Added {len(accumulated_tool_calls)} tool calls to assistant message"
|
||||
)
|
||||
if assistant_response.content or assistant_response.tool_calls:
|
||||
messages_to_save.append(assistant_response)
|
||||
logger.info(
|
||||
f"Saving assistant message with content_len={len(assistant_response.content or '')}, tool_calls={len(assistant_response.tool_calls or [])}"
|
||||
)
|
||||
|
||||
# Add tool response messages after assistant message
|
||||
messages_to_save.extend(tool_response_messages)
|
||||
logger.info(
|
||||
f"Saving {len(tool_response_messages)} tool response messages, "
|
||||
f"total_to_save={len(messages_to_save)}"
|
||||
)
|
||||
|
||||
session.messages.extend(messages_to_save)
|
||||
logger.info(f"Extended session messages, new message_count={len(session.messages)}")
|
||||
await upsert_chat_session(session)
|
||||
|
||||
# If we did a tool call, stream the chat completion again to get the next response
|
||||
if has_done_tool_call:
|
||||
logger.info(
|
||||
"Tool call executed, streaming chat completion again to get assistant response"
|
||||
)
|
||||
async for chunk in stream_chat_completion(
|
||||
session_id=session.session_id,
|
||||
user_id=user_id,
|
||||
session=session, # Pass session object to avoid Redis refetch
|
||||
):
|
||||
yield chunk
|
||||
|
||||
|
||||
async def _stream_chat_chunks(
|
||||
session: ChatSession,
|
||||
tools: list[ChatCompletionToolParam],
|
||||
) -> AsyncGenerator[StreamBaseResponse, None]:
|
||||
"""
|
||||
Pure streaming function for OpenAI chat completions with tool calling.
|
||||
|
||||
This function is database-agnostic and focuses only on streaming logic.
|
||||
|
||||
Args:
|
||||
messages: Conversation context as ChatCompletionMessageParam list
|
||||
session_id: Session ID
|
||||
user_id: User ID for tool execution
|
||||
|
||||
Yields:
|
||||
SSE formatted JSON response objects
|
||||
|
||||
"""
|
||||
model = config.model
|
||||
|
||||
logger.info("Starting pure chat stream")
|
||||
|
||||
# Loop to handle tool calls and continue conversation
|
||||
while True:
|
||||
try:
|
||||
logger.info("Creating OpenAI chat completion stream...")
|
||||
|
||||
# Create the stream with proper types
|
||||
stream = await client.chat.completions.create(
|
||||
model=model,
|
||||
messages=session.to_openai_messages(),
|
||||
tools=tools,
|
||||
tool_choice="auto",
|
||||
stream=True,
|
||||
)
|
||||
|
||||
# Variables to accumulate tool calls
|
||||
tool_calls: list[dict[str, Any]] = []
|
||||
active_tool_call_idx: int | None = None
|
||||
finish_reason: str | None = None
|
||||
# Track which tool call indices have had their start event emitted
|
||||
emitted_start_for_idx: set[int] = set()
|
||||
|
||||
# Process the stream
|
||||
chunk: ChatCompletionChunk
|
||||
async for chunk in stream:
|
||||
if chunk.usage:
|
||||
yield StreamUsage(
|
||||
prompt_tokens=chunk.usage.prompt_tokens,
|
||||
completion_tokens=chunk.usage.completion_tokens,
|
||||
total_tokens=chunk.usage.total_tokens,
|
||||
)
|
||||
|
||||
if chunk.choices:
|
||||
choice = chunk.choices[0]
|
||||
delta = choice.delta
|
||||
|
||||
# Capture finish reason
|
||||
if choice.finish_reason:
|
||||
finish_reason = choice.finish_reason
|
||||
logger.info(f"Finish reason: {finish_reason}")
|
||||
|
||||
# Handle content streaming
|
||||
if delta.content:
|
||||
# Stream the text chunk
|
||||
text_response = StreamTextChunk(
|
||||
content=delta.content,
|
||||
timestamp=datetime.now(UTC).isoformat(),
|
||||
)
|
||||
yield text_response
|
||||
|
||||
# Handle tool calls
|
||||
if delta.tool_calls:
|
||||
for tc_chunk in delta.tool_calls:
|
||||
idx = tc_chunk.index
|
||||
|
||||
# Update active tool call index if needed
|
||||
if (
|
||||
active_tool_call_idx is None
|
||||
or active_tool_call_idx != idx
|
||||
):
|
||||
active_tool_call_idx = idx
|
||||
|
||||
# Ensure we have a tool call object at this index
|
||||
while len(tool_calls) <= idx:
|
||||
tool_calls.append(
|
||||
{
|
||||
"id": "",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "",
|
||||
"arguments": "",
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
# Accumulate the tool call data
|
||||
if tc_chunk.id:
|
||||
tool_calls[idx]["id"] = tc_chunk.id
|
||||
if tc_chunk.function:
|
||||
if tc_chunk.function.name:
|
||||
tool_calls[idx]["function"][
|
||||
"name"
|
||||
] = tc_chunk.function.name
|
||||
if tc_chunk.function.arguments:
|
||||
tool_calls[idx]["function"][
|
||||
"arguments"
|
||||
] += tc_chunk.function.arguments
|
||||
|
||||
# Emit StreamToolCallStart only after we have the tool call ID
|
||||
if (
|
||||
idx not in emitted_start_for_idx
|
||||
and tool_calls[idx]["id"]
|
||||
and tool_calls[idx]["function"]["name"]
|
||||
):
|
||||
yield StreamToolCallStart(
|
||||
tool_id=tool_calls[idx]["id"],
|
||||
tool_name=tool_calls[idx]["function"]["name"],
|
||||
timestamp=datetime.now(UTC).isoformat(),
|
||||
)
|
||||
emitted_start_for_idx.add(idx)
|
||||
logger.info(f"Stream complete. Finish reason: {finish_reason}")
|
||||
|
||||
# Yield all accumulated tool calls after the stream is complete
|
||||
# This ensures all tool call arguments have been fully received
|
||||
for idx, tool_call in enumerate(tool_calls):
|
||||
try:
|
||||
async for tc in _yield_tool_call(tool_calls, idx, session):
|
||||
yield tc
|
||||
except (orjson.JSONDecodeError, KeyError, TypeError) as e:
|
||||
logger.error(
|
||||
f"Failed to parse tool call {idx}: {e}",
|
||||
exc_info=True,
|
||||
extra={"tool_call": tool_call},
|
||||
)
|
||||
yield StreamError(
|
||||
message=f"Invalid tool call arguments for tool {tool_call.get('function', {}).get('name', 'unknown')}: {e}",
|
||||
timestamp=datetime.now(UTC).isoformat(),
|
||||
)
|
||||
# Re-raise to trigger retry logic in the parent function
|
||||
raise
|
||||
|
||||
yield StreamEnd(
|
||||
timestamp=datetime.now(UTC).isoformat(),
|
||||
)
|
||||
return
|
||||
except Exception as e:
|
||||
logger.error(f"Error in stream: {e!s}", exc_info=True)
|
||||
error_response = StreamError(
|
||||
message=str(e),
|
||||
timestamp=datetime.now(UTC).isoformat(),
|
||||
)
|
||||
yield error_response
|
||||
yield StreamEnd(
|
||||
timestamp=datetime.now(UTC).isoformat(),
|
||||
)
|
||||
return
|
||||
|
||||
|
||||
async def _yield_tool_call(
|
||||
tool_calls: list[dict[str, Any]],
|
||||
yield_idx: int,
|
||||
session: ChatSession,
|
||||
) -> AsyncGenerator[StreamBaseResponse, None]:
|
||||
"""
|
||||
Yield a tool call and its execution result.
|
||||
|
||||
Raises:
|
||||
orjson.JSONDecodeError: If tool call arguments cannot be parsed as JSON
|
||||
KeyError: If expected tool call fields are missing
|
||||
TypeError: If tool call structure is invalid
|
||||
"""
|
||||
logger.info(f"Yielding tool call: {tool_calls[yield_idx]}")
|
||||
|
||||
# Parse tool call arguments - exceptions will propagate to caller
|
||||
arguments = orjson.loads(tool_calls[yield_idx]["function"]["arguments"])
|
||||
|
||||
yield StreamToolCall(
|
||||
tool_id=tool_calls[yield_idx]["id"],
|
||||
tool_name=tool_calls[yield_idx]["function"]["name"],
|
||||
arguments=arguments,
|
||||
timestamp=datetime.now(UTC).isoformat(),
|
||||
)
|
||||
|
||||
tool_execution_response: StreamToolExecutionResult = await execute_tool(
|
||||
tool_name=tool_calls[yield_idx]["function"]["name"],
|
||||
parameters=arguments,
|
||||
tool_call_id=tool_calls[yield_idx]["id"],
|
||||
user_id=session.user_id,
|
||||
session=session,
|
||||
)
|
||||
logger.info(f"Yielding Tool execution response: {tool_execution_response}")
|
||||
yield tool_execution_response
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import asyncio
|
||||
|
||||
async def main():
|
||||
session = await create_chat_session()
|
||||
async for chunk in stream_chat_completion(
|
||||
session.session_id,
|
||||
"Please find me an agent that can help me with my business. Call the tool twice once with the query 'money printing agent' and once with the query 'money generating agent'",
|
||||
user_id=session.user_id,
|
||||
):
|
||||
print(chunk)
|
||||
|
||||
asyncio.run(main())
|
||||
@@ -0,0 +1,81 @@
|
||||
import logging
|
||||
from os import getenv
|
||||
|
||||
import pytest
|
||||
|
||||
from . import service as chat_service
|
||||
from .response_model import (
|
||||
StreamEnd,
|
||||
StreamError,
|
||||
StreamTextChunk,
|
||||
StreamToolExecutionResult,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_stream_chat_completion():
|
||||
"""
|
||||
Test the stream_chat_completion function.
|
||||
"""
|
||||
api_key: str | None = getenv("OPEN_ROUTER_API_KEY")
|
||||
if not api_key:
|
||||
return pytest.skip("OPEN_ROUTER_API_KEY is not set, skipping test")
|
||||
|
||||
session = await chat_service.create_chat_session()
|
||||
|
||||
has_errors = False
|
||||
has_ended = False
|
||||
assistant_message = ""
|
||||
async for chunk in chat_service.stream_chat_completion(
|
||||
session.session_id, "Hello, how are you?", user_id=session.user_id
|
||||
):
|
||||
logger.info(chunk)
|
||||
if isinstance(chunk, StreamError):
|
||||
has_errors = True
|
||||
if isinstance(chunk, StreamTextChunk):
|
||||
assistant_message += chunk.content
|
||||
if isinstance(chunk, StreamEnd):
|
||||
has_ended = True
|
||||
|
||||
assert has_ended, "Chat completion did not end"
|
||||
assert not has_errors, "Error occurred while streaming chat completion"
|
||||
assert assistant_message, "Assistant message is empty"
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_stream_chat_completion_with_tool_calls():
|
||||
"""
|
||||
Test the stream_chat_completion function.
|
||||
"""
|
||||
api_key: str | None = getenv("OPEN_ROUTER_API_KEY")
|
||||
if not api_key:
|
||||
return pytest.skip("OPEN_ROUTER_API_KEY is not set, skipping test")
|
||||
|
||||
session = await chat_service.create_chat_session()
|
||||
session = await chat_service.upsert_chat_session(session)
|
||||
|
||||
has_errors = False
|
||||
has_ended = False
|
||||
had_tool_calls = False
|
||||
async for chunk in chat_service.stream_chat_completion(
|
||||
session.session_id,
|
||||
"Please find me an agent that can help me with my business. Use the query 'moneny printing agent'",
|
||||
user_id=session.user_id,
|
||||
):
|
||||
logger.info(chunk)
|
||||
if isinstance(chunk, StreamError):
|
||||
has_errors = True
|
||||
|
||||
if isinstance(chunk, StreamEnd):
|
||||
has_ended = True
|
||||
if isinstance(chunk, StreamToolExecutionResult):
|
||||
had_tool_calls = True
|
||||
|
||||
assert has_ended, "Chat completion did not end"
|
||||
assert not has_errors, "Error occurred while streaming chat completion"
|
||||
assert had_tool_calls, "Tool calls did not occur"
|
||||
session = await chat_service.get_session(session.session_id)
|
||||
assert session, "Session not found"
|
||||
assert session.usage, "Usage is empty"
|
||||
@@ -0,0 +1,41 @@
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from openai.types.chat import ChatCompletionToolParam
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
|
||||
from .base import BaseTool
|
||||
from .find_agent import FindAgentTool
|
||||
from .run_agent import RunAgentTool
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.api.features.chat.response_model import StreamToolExecutionResult
|
||||
|
||||
# Initialize tool instances
|
||||
find_agent_tool = FindAgentTool()
|
||||
run_agent_tool = RunAgentTool()
|
||||
|
||||
# Export tools as OpenAI format
|
||||
tools: list[ChatCompletionToolParam] = [
|
||||
find_agent_tool.as_openai_tool(),
|
||||
run_agent_tool.as_openai_tool(),
|
||||
]
|
||||
|
||||
|
||||
async def execute_tool(
|
||||
tool_name: str,
|
||||
parameters: dict[str, Any],
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
tool_call_id: str,
|
||||
) -> "StreamToolExecutionResult":
|
||||
|
||||
tool_map: dict[str, BaseTool] = {
|
||||
"find_agent": find_agent_tool,
|
||||
"run_agent": run_agent_tool,
|
||||
}
|
||||
if tool_name not in tool_map:
|
||||
raise ValueError(f"Tool {tool_name} not found")
|
||||
return await tool_map[tool_name].execute(
|
||||
user_id, session, tool_call_id, **parameters
|
||||
)
|
||||
@@ -0,0 +1,464 @@
|
||||
import uuid
|
||||
from datetime import UTC, datetime
|
||||
from os import getenv
|
||||
|
||||
import pytest
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.store import db as store_db
|
||||
from backend.blocks.firecrawl.scrape import FirecrawlScrapeBlock
|
||||
from backend.blocks.io import AgentInputBlock, AgentOutputBlock
|
||||
from backend.blocks.llm import AITextGeneratorBlock
|
||||
from backend.data.db import prisma
|
||||
from backend.data.graph import Graph, Link, Node, create_graph
|
||||
from backend.data.model import APIKeyCredentials
|
||||
from backend.data.user import get_or_create_user
|
||||
from backend.integrations.credentials_store import IntegrationCredentialsStore
|
||||
|
||||
|
||||
def make_session(user_id: str | None = None):
|
||||
return ChatSession(
|
||||
session_id=str(uuid.uuid4()),
|
||||
user_id=user_id,
|
||||
messages=[],
|
||||
usage=[],
|
||||
started_at=datetime.now(UTC),
|
||||
updated_at=datetime.now(UTC),
|
||||
successful_agent_runs={},
|
||||
successful_agent_schedules={},
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
async def setup_test_data():
|
||||
"""
|
||||
Set up test data for run_agent tests:
|
||||
1. Create a test user
|
||||
2. Create a test graph (agent input -> agent output)
|
||||
3. Create a store listing and store listing version
|
||||
4. Approve the store listing version
|
||||
"""
|
||||
# 1. Create a test user
|
||||
user_data = {
|
||||
"sub": f"test-user-{uuid.uuid4()}",
|
||||
"email": f"test-{uuid.uuid4()}@example.com",
|
||||
}
|
||||
user = await get_or_create_user(user_data)
|
||||
|
||||
# 1b. Create a profile with username for the user (required for store agent lookup)
|
||||
username = user.email.split("@")[0]
|
||||
await prisma.profile.create(
|
||||
data={
|
||||
"userId": user.id,
|
||||
"username": username,
|
||||
"name": f"Test User {username}",
|
||||
"description": "Test user profile",
|
||||
"links": [], # Required field - empty array for test profiles
|
||||
}
|
||||
)
|
||||
|
||||
# 2. Create a test graph with agent input -> agent output
|
||||
graph_id = str(uuid.uuid4())
|
||||
|
||||
# Create input node
|
||||
input_node_id = str(uuid.uuid4())
|
||||
input_block = AgentInputBlock()
|
||||
input_node = Node(
|
||||
id=input_node_id,
|
||||
block_id=input_block.id,
|
||||
input_default={
|
||||
"name": "test_input",
|
||||
"title": "Test Input",
|
||||
"value": "",
|
||||
"advanced": False,
|
||||
"description": "Test input field",
|
||||
"placeholder_values": [],
|
||||
},
|
||||
metadata={"position": {"x": 0, "y": 0}},
|
||||
)
|
||||
|
||||
# Create output node
|
||||
output_node_id = str(uuid.uuid4())
|
||||
output_block = AgentOutputBlock()
|
||||
output_node = Node(
|
||||
id=output_node_id,
|
||||
block_id=output_block.id,
|
||||
input_default={
|
||||
"name": "test_output",
|
||||
"title": "Test Output",
|
||||
"value": "",
|
||||
"format": "",
|
||||
"advanced": False,
|
||||
"description": "Test output field",
|
||||
},
|
||||
metadata={"position": {"x": 200, "y": 0}},
|
||||
)
|
||||
|
||||
# Create link from input to output
|
||||
link = Link(
|
||||
source_id=input_node_id,
|
||||
sink_id=output_node_id,
|
||||
source_name="result",
|
||||
sink_name="value",
|
||||
is_static=True,
|
||||
)
|
||||
|
||||
# Create the graph
|
||||
graph = Graph(
|
||||
id=graph_id,
|
||||
version=1,
|
||||
is_active=True,
|
||||
name="Test Agent",
|
||||
description="A simple test agent for testing",
|
||||
nodes=[input_node, output_node],
|
||||
links=[link],
|
||||
)
|
||||
|
||||
created_graph = await create_graph(graph, user.id)
|
||||
|
||||
# 3. Create a store listing and store listing version for the agent
|
||||
# Use unique slug to avoid constraint violations
|
||||
unique_slug = f"test-agent-{str(uuid.uuid4())[:8]}"
|
||||
store_submission = await store_db.create_store_submission(
|
||||
user_id=user.id,
|
||||
agent_id=created_graph.id,
|
||||
agent_version=created_graph.version,
|
||||
slug=unique_slug,
|
||||
name="Test Agent",
|
||||
description="A simple test agent",
|
||||
sub_heading="Test agent for unit tests",
|
||||
categories=["testing"],
|
||||
image_urls=["https://example.com/image.jpg"],
|
||||
)
|
||||
|
||||
assert store_submission.store_listing_version_id is not None
|
||||
# 4. Approve the store listing version
|
||||
await store_db.review_store_submission(
|
||||
store_listing_version_id=store_submission.store_listing_version_id,
|
||||
is_approved=True,
|
||||
external_comments="Approved for testing",
|
||||
internal_comments="Test approval",
|
||||
reviewer_id=user.id,
|
||||
)
|
||||
|
||||
return {
|
||||
"user": user,
|
||||
"graph": created_graph,
|
||||
"store_submission": store_submission,
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
async def setup_llm_test_data():
|
||||
"""
|
||||
Set up test data for LLM agent tests:
|
||||
1. Create a test user
|
||||
2. Create test OpenAI credentials for the user
|
||||
3. Create a test graph with input -> LLM block -> output
|
||||
4. Create and approve a store listing
|
||||
"""
|
||||
key = getenv("OPENAI_API_KEY")
|
||||
if not key:
|
||||
return pytest.skip("OPENAI_API_KEY is not set")
|
||||
|
||||
# 1. Create a test user
|
||||
user_data = {
|
||||
"sub": f"test-user-{uuid.uuid4()}",
|
||||
"email": f"test-{uuid.uuid4()}@example.com",
|
||||
}
|
||||
user = await get_or_create_user(user_data)
|
||||
|
||||
# 1b. Create a profile with username for the user (required for store agent lookup)
|
||||
username = user.email.split("@")[0]
|
||||
await prisma.profile.create(
|
||||
data={
|
||||
"userId": user.id,
|
||||
"username": username,
|
||||
"name": f"Test User {username}",
|
||||
"description": "Test user profile for LLM tests",
|
||||
"links": [], # Required field - empty array for test profiles
|
||||
}
|
||||
)
|
||||
|
||||
# 2. Create test OpenAI credentials for the user
|
||||
credentials = APIKeyCredentials(
|
||||
id=str(uuid.uuid4()),
|
||||
provider="openai",
|
||||
api_key=SecretStr("test-openai-api-key"),
|
||||
title="Test OpenAI API Key",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
# Store the credentials
|
||||
creds_store = IntegrationCredentialsStore()
|
||||
await creds_store.add_creds(user.id, credentials)
|
||||
|
||||
# 3. Create a test graph with input -> LLM block -> output
|
||||
graph_id = str(uuid.uuid4())
|
||||
|
||||
# Create input node for the prompt
|
||||
input_node_id = str(uuid.uuid4())
|
||||
input_block = AgentInputBlock()
|
||||
input_node = Node(
|
||||
id=input_node_id,
|
||||
block_id=input_block.id,
|
||||
input_default={
|
||||
"name": "user_prompt",
|
||||
"title": "User Prompt",
|
||||
"value": "",
|
||||
"advanced": False,
|
||||
"description": "Prompt for the LLM",
|
||||
"placeholder_values": [],
|
||||
},
|
||||
metadata={"position": {"x": 0, "y": 0}},
|
||||
)
|
||||
|
||||
# Create LLM block node
|
||||
llm_node_id = str(uuid.uuid4())
|
||||
llm_block = AITextGeneratorBlock()
|
||||
llm_node = Node(
|
||||
id=llm_node_id,
|
||||
block_id=llm_block.id,
|
||||
input_default={
|
||||
"model": "gpt-4o-mini",
|
||||
"sys_prompt": "You are a helpful assistant.",
|
||||
"retry": 3,
|
||||
"prompt_values": {},
|
||||
"credentials": {
|
||||
"provider": "openai",
|
||||
"id": credentials.id,
|
||||
"type": "api_key",
|
||||
"title": credentials.title,
|
||||
},
|
||||
},
|
||||
metadata={"position": {"x": 300, "y": 0}},
|
||||
)
|
||||
|
||||
# Create output node
|
||||
output_node_id = str(uuid.uuid4())
|
||||
output_block = AgentOutputBlock()
|
||||
output_node = Node(
|
||||
id=output_node_id,
|
||||
block_id=output_block.id,
|
||||
input_default={
|
||||
"name": "llm_response",
|
||||
"title": "LLM Response",
|
||||
"value": "",
|
||||
"format": "",
|
||||
"advanced": False,
|
||||
"description": "Response from the LLM",
|
||||
},
|
||||
metadata={"position": {"x": 600, "y": 0}},
|
||||
)
|
||||
|
||||
# Create links
|
||||
# Link input.result -> llm.prompt
|
||||
link1 = Link(
|
||||
source_id=input_node_id,
|
||||
sink_id=llm_node_id,
|
||||
source_name="result",
|
||||
sink_name="prompt",
|
||||
is_static=True,
|
||||
)
|
||||
|
||||
# Link llm.response -> output.value
|
||||
link2 = Link(
|
||||
source_id=llm_node_id,
|
||||
sink_id=output_node_id,
|
||||
source_name="response",
|
||||
sink_name="value",
|
||||
is_static=False,
|
||||
)
|
||||
|
||||
# Create the graph
|
||||
graph = Graph(
|
||||
id=graph_id,
|
||||
version=1,
|
||||
is_active=True,
|
||||
name="LLM Test Agent",
|
||||
description="An agent that uses an LLM to process text",
|
||||
nodes=[input_node, llm_node, output_node],
|
||||
links=[link1, link2],
|
||||
)
|
||||
|
||||
created_graph = await create_graph(graph, user.id)
|
||||
|
||||
# 4. Create and approve a store listing
|
||||
unique_slug = f"llm-test-agent-{str(uuid.uuid4())[:8]}"
|
||||
store_submission = await store_db.create_store_submission(
|
||||
user_id=user.id,
|
||||
agent_id=created_graph.id,
|
||||
agent_version=created_graph.version,
|
||||
slug=unique_slug,
|
||||
name="LLM Test Agent",
|
||||
description="An agent with LLM capabilities",
|
||||
sub_heading="Test agent with OpenAI integration",
|
||||
categories=["testing", "ai"],
|
||||
image_urls=["https://example.com/image.jpg"],
|
||||
)
|
||||
assert store_submission.store_listing_version_id is not None
|
||||
await store_db.review_store_submission(
|
||||
store_listing_version_id=store_submission.store_listing_version_id,
|
||||
is_approved=True,
|
||||
external_comments="Approved for testing",
|
||||
internal_comments="Test approval for LLM agent",
|
||||
reviewer_id=user.id,
|
||||
)
|
||||
|
||||
return {
|
||||
"user": user,
|
||||
"graph": created_graph,
|
||||
"credentials": credentials,
|
||||
"store_submission": store_submission,
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
async def setup_firecrawl_test_data():
|
||||
"""
|
||||
Set up test data for Firecrawl agent tests (missing credentials scenario):
|
||||
1. Create a test user (WITHOUT Firecrawl credentials)
|
||||
2. Create a test graph with input -> Firecrawl block -> output
|
||||
3. Create and approve a store listing
|
||||
"""
|
||||
# 1. Create a test user
|
||||
user_data = {
|
||||
"sub": f"test-user-{uuid.uuid4()}",
|
||||
"email": f"test-{uuid.uuid4()}@example.com",
|
||||
}
|
||||
user = await get_or_create_user(user_data)
|
||||
|
||||
# 1b. Create a profile with username for the user (required for store agent lookup)
|
||||
username = user.email.split("@")[0]
|
||||
await prisma.profile.create(
|
||||
data={
|
||||
"userId": user.id,
|
||||
"username": username,
|
||||
"name": f"Test User {username}",
|
||||
"description": "Test user profile for Firecrawl tests",
|
||||
"links": [], # Required field - empty array for test profiles
|
||||
}
|
||||
)
|
||||
|
||||
# NOTE: We deliberately do NOT create Firecrawl credentials for this user
|
||||
# This tests the scenario where required credentials are missing
|
||||
|
||||
# 2. Create a test graph with input -> Firecrawl block -> output
|
||||
graph_id = str(uuid.uuid4())
|
||||
|
||||
# Create input node for the URL
|
||||
input_node_id = str(uuid.uuid4())
|
||||
input_block = AgentInputBlock()
|
||||
input_node = Node(
|
||||
id=input_node_id,
|
||||
block_id=input_block.id,
|
||||
input_default={
|
||||
"name": "url",
|
||||
"title": "URL to Scrape",
|
||||
"value": "",
|
||||
"advanced": False,
|
||||
"description": "URL for Firecrawl to scrape",
|
||||
"placeholder_values": [],
|
||||
},
|
||||
metadata={"position": {"x": 0, "y": 0}},
|
||||
)
|
||||
|
||||
# Create Firecrawl block node
|
||||
firecrawl_node_id = str(uuid.uuid4())
|
||||
firecrawl_block = FirecrawlScrapeBlock()
|
||||
firecrawl_node = Node(
|
||||
id=firecrawl_node_id,
|
||||
block_id=firecrawl_block.id,
|
||||
input_default={
|
||||
"limit": 10,
|
||||
"only_main_content": True,
|
||||
"max_age": 3600000,
|
||||
"wait_for": 200,
|
||||
"formats": ["markdown"],
|
||||
"credentials": {
|
||||
"provider": "firecrawl",
|
||||
"id": "test-firecrawl-id",
|
||||
"type": "api_key",
|
||||
"title": "Firecrawl API Key",
|
||||
},
|
||||
},
|
||||
metadata={"position": {"x": 300, "y": 0}},
|
||||
)
|
||||
|
||||
# Create output node
|
||||
output_node_id = str(uuid.uuid4())
|
||||
output_block = AgentOutputBlock()
|
||||
output_node = Node(
|
||||
id=output_node_id,
|
||||
block_id=output_block.id,
|
||||
input_default={
|
||||
"name": "scraped_data",
|
||||
"title": "Scraped Data",
|
||||
"value": "",
|
||||
"format": "",
|
||||
"advanced": False,
|
||||
"description": "Data scraped by Firecrawl",
|
||||
},
|
||||
metadata={"position": {"x": 600, "y": 0}},
|
||||
)
|
||||
|
||||
# Create links
|
||||
# Link input.result -> firecrawl.url
|
||||
link1 = Link(
|
||||
source_id=input_node_id,
|
||||
sink_id=firecrawl_node_id,
|
||||
source_name="result",
|
||||
sink_name="url",
|
||||
is_static=True,
|
||||
)
|
||||
|
||||
# Link firecrawl.markdown -> output.value
|
||||
link2 = Link(
|
||||
source_id=firecrawl_node_id,
|
||||
sink_id=output_node_id,
|
||||
source_name="markdown",
|
||||
sink_name="value",
|
||||
is_static=False,
|
||||
)
|
||||
|
||||
# Create the graph
|
||||
graph = Graph(
|
||||
id=graph_id,
|
||||
version=1,
|
||||
is_active=True,
|
||||
name="Firecrawl Test Agent",
|
||||
description="An agent that uses Firecrawl to scrape websites",
|
||||
nodes=[input_node, firecrawl_node, output_node],
|
||||
links=[link1, link2],
|
||||
)
|
||||
|
||||
created_graph = await create_graph(graph, user.id)
|
||||
|
||||
# 3. Create and approve a store listing
|
||||
unique_slug = f"firecrawl-test-agent-{str(uuid.uuid4())[:8]}"
|
||||
store_submission = await store_db.create_store_submission(
|
||||
user_id=user.id,
|
||||
agent_id=created_graph.id,
|
||||
agent_version=created_graph.version,
|
||||
slug=unique_slug,
|
||||
name="Firecrawl Test Agent",
|
||||
description="An agent with Firecrawl integration (no credentials)",
|
||||
sub_heading="Test agent requiring Firecrawl credentials",
|
||||
categories=["testing", "scraping"],
|
||||
image_urls=["https://example.com/image.jpg"],
|
||||
)
|
||||
assert store_submission.store_listing_version_id is not None
|
||||
await store_db.review_store_submission(
|
||||
store_listing_version_id=store_submission.store_listing_version_id,
|
||||
is_approved=True,
|
||||
external_comments="Approved for testing",
|
||||
internal_comments="Test approval for Firecrawl agent",
|
||||
reviewer_id=user.id,
|
||||
)
|
||||
|
||||
return {
|
||||
"user": user,
|
||||
"graph": created_graph,
|
||||
"store_submission": store_submission,
|
||||
}
|
||||
119
autogpt_platform/backend/backend/api/features/chat/tools/base.py
Normal file
119
autogpt_platform/backend/backend/api/features/chat/tools/base.py
Normal file
@@ -0,0 +1,119 @@
|
||||
"""Base classes and shared utilities for chat tools."""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from openai.types.chat import ChatCompletionToolParam
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.chat.response_model import StreamToolExecutionResult
|
||||
|
||||
from .models import ErrorResponse, NeedLoginResponse, ToolResponseBase
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseTool:
|
||||
"""Base class for all chat tools."""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
"""Tool name for OpenAI function calling."""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
"""Tool description for OpenAI."""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
"""Tool parameters schema for OpenAI."""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
"""Whether this tool requires authentication."""
|
||||
return False
|
||||
|
||||
def as_openai_tool(self) -> ChatCompletionToolParam:
|
||||
"""Convert to OpenAI tool format."""
|
||||
return ChatCompletionToolParam(
|
||||
type="function",
|
||||
function={
|
||||
"name": self.name,
|
||||
"description": self.description,
|
||||
"parameters": self.parameters,
|
||||
},
|
||||
)
|
||||
|
||||
async def execute(
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
tool_call_id: str,
|
||||
**kwargs,
|
||||
) -> StreamToolExecutionResult:
|
||||
"""Execute the tool with authentication check.
|
||||
|
||||
Args:
|
||||
user_id: User ID (may be anonymous like "anon_123")
|
||||
session_id: Chat session ID
|
||||
**kwargs: Tool-specific parameters
|
||||
|
||||
Returns:
|
||||
Pydantic response object
|
||||
|
||||
"""
|
||||
if self.requires_auth and not user_id:
|
||||
logger.error(
|
||||
f"Attempted tool call for {self.name} but user not authenticated"
|
||||
)
|
||||
return StreamToolExecutionResult(
|
||||
tool_id=tool_call_id,
|
||||
tool_name=self.name,
|
||||
result=NeedLoginResponse(
|
||||
message=f"Please sign in to use {self.name}",
|
||||
session_id=session.session_id,
|
||||
).model_dump_json(),
|
||||
success=False,
|
||||
)
|
||||
|
||||
try:
|
||||
result = await self._execute(user_id, session, **kwargs)
|
||||
return StreamToolExecutionResult(
|
||||
tool_id=tool_call_id,
|
||||
tool_name=self.name,
|
||||
result=result.model_dump_json(),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in {self.name}: {e}", exc_info=True)
|
||||
return StreamToolExecutionResult(
|
||||
tool_id=tool_call_id,
|
||||
tool_name=self.name,
|
||||
result=ErrorResponse(
|
||||
message=f"An error occurred while executing {self.name}",
|
||||
error=str(e),
|
||||
session_id=session.session_id,
|
||||
).model_dump_json(),
|
||||
success=False,
|
||||
)
|
||||
|
||||
async def _execute(
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
"""Internal execution logic to be implemented by subclasses.
|
||||
|
||||
Args:
|
||||
user_id: User ID (authenticated or anonymous)
|
||||
session_id: Chat session ID
|
||||
**kwargs: Tool-specific parameters
|
||||
|
||||
Returns:
|
||||
Pydantic response object
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
@@ -0,0 +1,129 @@
|
||||
"""Tool for discovering agents from marketplace and user library."""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.store import db as store_db
|
||||
from backend.util.exceptions import DatabaseError, NotFoundError
|
||||
|
||||
from .base import BaseTool
|
||||
from .models import (
|
||||
AgentCarouselResponse,
|
||||
AgentInfo,
|
||||
ErrorResponse,
|
||||
NoResultsResponse,
|
||||
ToolResponseBase,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FindAgentTool(BaseTool):
|
||||
"""Tool for discovering agents based on user needs."""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "find_agent"
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Discover agents from the marketplace based on capabilities and user needs."
|
||||
)
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "Search query describing what the user wants to accomplish. Use single keywords for best results.",
|
||||
},
|
||||
},
|
||||
"required": ["query"],
|
||||
}
|
||||
|
||||
async def _execute(
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
"""Search for agents in the marketplace.
|
||||
|
||||
Args:
|
||||
user_id: User ID (may be anonymous)
|
||||
session_id: Chat session ID
|
||||
query: Search query
|
||||
|
||||
Returns:
|
||||
AgentCarouselResponse: List of agents found in the marketplace
|
||||
NoResultsResponse: No agents found in the marketplace
|
||||
ErrorResponse: Error message
|
||||
"""
|
||||
query = kwargs.get("query", "").strip()
|
||||
session_id = session.session_id
|
||||
if not query:
|
||||
return ErrorResponse(
|
||||
message="Please provide a search query",
|
||||
session_id=session_id,
|
||||
)
|
||||
agents = []
|
||||
try:
|
||||
logger.info(f"Searching marketplace for: {query}")
|
||||
store_results = await store_db.get_store_agents(
|
||||
search_query=query,
|
||||
page_size=5,
|
||||
)
|
||||
|
||||
logger.info(f"Find agents tool found {len(store_results.agents)} agents")
|
||||
for agent in store_results.agents:
|
||||
agent_id = f"{agent.creator}/{agent.slug}"
|
||||
logger.info(f"Building agent ID = {agent_id}")
|
||||
agents.append(
|
||||
AgentInfo(
|
||||
id=agent_id,
|
||||
name=agent.agent_name,
|
||||
description=agent.description or "",
|
||||
source="marketplace",
|
||||
in_library=False,
|
||||
creator=agent.creator,
|
||||
category="general",
|
||||
rating=agent.rating,
|
||||
runs=agent.runs,
|
||||
is_featured=False,
|
||||
),
|
||||
)
|
||||
except NotFoundError:
|
||||
pass
|
||||
except DatabaseError as e:
|
||||
logger.error(f"Error searching agents: {e}", exc_info=True)
|
||||
return ErrorResponse(
|
||||
message="Failed to search for agents. Please try again.",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
if not agents:
|
||||
return NoResultsResponse(
|
||||
message=f"No agents found matching '{query}'. Try different keywords or browse the marketplace. If you have 3 consecutive find_agent tool calls results and found no agents. Please stop trying and ask the user if there is anything else you can help with.",
|
||||
session_id=session_id,
|
||||
suggestions=[
|
||||
"Try more general terms",
|
||||
"Browse categories in the marketplace",
|
||||
"Check spelling",
|
||||
],
|
||||
)
|
||||
|
||||
# Return formatted carousel
|
||||
title = (
|
||||
f"Found {len(agents)} agent{'s' if len(agents) != 1 else ''} for '{query}'"
|
||||
)
|
||||
return AgentCarouselResponse(
|
||||
message="Now you have found some options for the user to choose from. You can add a link to a recommended agent at: /marketplace/agent/agent_id Please ask the user if they would like to use any of these agents. If they do, please call the get_agent_details tool for this agent.",
|
||||
title=title,
|
||||
agents=agents,
|
||||
count=len(agents),
|
||||
session_id=session_id,
|
||||
)
|
||||
@@ -0,0 +1,175 @@
|
||||
"""Pydantic models for tool responses."""
|
||||
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
|
||||
|
||||
class ResponseType(str, Enum):
|
||||
"""Types of tool responses."""
|
||||
|
||||
AGENT_CAROUSEL = "agent_carousel"
|
||||
AGENT_DETAILS = "agent_details"
|
||||
SETUP_REQUIREMENTS = "setup_requirements"
|
||||
EXECUTION_STARTED = "execution_started"
|
||||
NEED_LOGIN = "need_login"
|
||||
ERROR = "error"
|
||||
NO_RESULTS = "no_results"
|
||||
SUCCESS = "success"
|
||||
|
||||
|
||||
# Base response model
|
||||
class ToolResponseBase(BaseModel):
|
||||
"""Base model for all tool responses."""
|
||||
|
||||
type: ResponseType
|
||||
message: str
|
||||
session_id: str | None = None
|
||||
|
||||
|
||||
# Agent discovery models
|
||||
class AgentInfo(BaseModel):
|
||||
"""Information about an agent."""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
source: str = Field(description="marketplace or library")
|
||||
in_library: bool = False
|
||||
creator: str | None = None
|
||||
category: str | None = None
|
||||
rating: float | None = None
|
||||
runs: int | None = None
|
||||
is_featured: bool | None = None
|
||||
status: str | None = None
|
||||
can_access_graph: bool | None = None
|
||||
has_external_trigger: bool | None = None
|
||||
new_output: bool | None = None
|
||||
graph_id: str | None = None
|
||||
|
||||
|
||||
class AgentCarouselResponse(ToolResponseBase):
|
||||
"""Response for find_agent tool."""
|
||||
|
||||
type: ResponseType = ResponseType.AGENT_CAROUSEL
|
||||
title: str = "Available Agents"
|
||||
agents: list[AgentInfo]
|
||||
count: int
|
||||
name: str = "agent_carousel"
|
||||
|
||||
|
||||
class NoResultsResponse(ToolResponseBase):
|
||||
"""Response when no agents found."""
|
||||
|
||||
type: ResponseType = ResponseType.NO_RESULTS
|
||||
suggestions: list[str] = []
|
||||
name: str = "no_results"
|
||||
|
||||
|
||||
# Agent details models
|
||||
class InputField(BaseModel):
|
||||
"""Input field specification."""
|
||||
|
||||
name: str
|
||||
type: str = "string"
|
||||
description: str = ""
|
||||
required: bool = False
|
||||
default: Any | None = None
|
||||
options: list[Any] | None = None
|
||||
format: str | None = None
|
||||
|
||||
|
||||
class ExecutionOptions(BaseModel):
|
||||
"""Available execution options for an agent."""
|
||||
|
||||
manual: bool = True
|
||||
scheduled: bool = True
|
||||
webhook: bool = False
|
||||
|
||||
|
||||
class AgentDetails(BaseModel):
|
||||
"""Detailed agent information."""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
in_library: bool = False
|
||||
inputs: dict[str, Any] = {}
|
||||
credentials: list[CredentialsMetaInput] = []
|
||||
execution_options: ExecutionOptions = Field(default_factory=ExecutionOptions)
|
||||
trigger_info: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class AgentDetailsResponse(ToolResponseBase):
|
||||
"""Response for get_details action."""
|
||||
|
||||
type: ResponseType = ResponseType.AGENT_DETAILS
|
||||
agent: AgentDetails
|
||||
user_authenticated: bool = False
|
||||
graph_id: str | None = None
|
||||
graph_version: int | None = None
|
||||
|
||||
|
||||
# Setup info models
|
||||
class UserReadiness(BaseModel):
|
||||
"""User readiness status."""
|
||||
|
||||
has_all_credentials: bool = False
|
||||
missing_credentials: dict[str, Any] = {}
|
||||
ready_to_run: bool = False
|
||||
|
||||
|
||||
class SetupInfo(BaseModel):
|
||||
"""Complete setup information."""
|
||||
|
||||
agent_id: str
|
||||
agent_name: str
|
||||
requirements: dict[str, list[Any]] = Field(
|
||||
default_factory=lambda: {
|
||||
"credentials": [],
|
||||
"inputs": [],
|
||||
"execution_modes": [],
|
||||
},
|
||||
)
|
||||
user_readiness: UserReadiness = Field(default_factory=UserReadiness)
|
||||
|
||||
|
||||
class SetupRequirementsResponse(ToolResponseBase):
|
||||
"""Response for validate action."""
|
||||
|
||||
type: ResponseType = ResponseType.SETUP_REQUIREMENTS
|
||||
setup_info: SetupInfo
|
||||
graph_id: str | None = None
|
||||
graph_version: int | None = None
|
||||
|
||||
|
||||
# Execution models
|
||||
class ExecutionStartedResponse(ToolResponseBase):
|
||||
"""Response for run/schedule actions."""
|
||||
|
||||
type: ResponseType = ResponseType.EXECUTION_STARTED
|
||||
execution_id: str
|
||||
graph_id: str
|
||||
graph_name: str
|
||||
library_agent_id: str | None = None
|
||||
library_agent_link: str | None = None
|
||||
status: str = "QUEUED"
|
||||
|
||||
|
||||
# Auth/error models
|
||||
class NeedLoginResponse(ToolResponseBase):
|
||||
"""Response when login is needed."""
|
||||
|
||||
type: ResponseType = ResponseType.NEED_LOGIN
|
||||
agent_info: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class ErrorResponse(ToolResponseBase):
|
||||
"""Response for errors."""
|
||||
|
||||
type: ResponseType = ResponseType.ERROR
|
||||
error: str | None = None
|
||||
details: dict[str, Any] | None = None
|
||||
@@ -0,0 +1,501 @@
|
||||
"""Unified tool for agent operations with automatic state detection."""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
from backend.api.features.chat.config import ChatConfig
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.data.graph import GraphModel
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.data.user import get_user_by_id
|
||||
from backend.executor import utils as execution_utils
|
||||
from backend.util.clients import get_scheduler_client
|
||||
from backend.util.exceptions import DatabaseError, NotFoundError
|
||||
from backend.util.timezone_utils import (
|
||||
convert_utc_time_to_user_timezone,
|
||||
get_user_timezone_or_utc,
|
||||
)
|
||||
|
||||
from .base import BaseTool
|
||||
from .models import (
|
||||
AgentDetails,
|
||||
AgentDetailsResponse,
|
||||
ErrorResponse,
|
||||
ExecutionOptions,
|
||||
ExecutionStartedResponse,
|
||||
SetupInfo,
|
||||
SetupRequirementsResponse,
|
||||
ToolResponseBase,
|
||||
UserReadiness,
|
||||
)
|
||||
from .utils import (
|
||||
check_user_has_required_credentials,
|
||||
extract_credentials_from_schema,
|
||||
fetch_graph_from_store_slug,
|
||||
get_or_create_library_agent,
|
||||
match_user_credentials_to_graph,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
config = ChatConfig()
|
||||
|
||||
# Constants for response messages
|
||||
MSG_DO_NOT_RUN_AGAIN = "Do not run again unless explicitly requested."
|
||||
MSG_DO_NOT_SCHEDULE_AGAIN = "Do not schedule again unless explicitly requested."
|
||||
MSG_ASK_USER_FOR_VALUES = (
|
||||
"Ask the user what values to use, or call again with use_defaults=true "
|
||||
"to run with default values."
|
||||
)
|
||||
MSG_WHAT_VALUES_TO_USE = (
|
||||
"What values would you like to use, or would you like to run with defaults?"
|
||||
)
|
||||
|
||||
|
||||
class RunAgentInput(BaseModel):
|
||||
"""Input parameters for the run_agent tool."""
|
||||
|
||||
username_agent_slug: str = ""
|
||||
inputs: dict[str, Any] = Field(default_factory=dict)
|
||||
use_defaults: bool = False
|
||||
schedule_name: str = ""
|
||||
cron: str = ""
|
||||
timezone: str = "UTC"
|
||||
|
||||
@field_validator(
|
||||
"username_agent_slug", "schedule_name", "cron", "timezone", mode="before"
|
||||
)
|
||||
@classmethod
|
||||
def strip_strings(cls, v: Any) -> Any:
|
||||
"""Strip whitespace from string fields."""
|
||||
return v.strip() if isinstance(v, str) else v
|
||||
|
||||
|
||||
class RunAgentTool(BaseTool):
|
||||
"""Unified tool for agent operations with automatic state detection.
|
||||
|
||||
The tool automatically determines what to do based on provided parameters:
|
||||
1. Fetches agent details (always, silently)
|
||||
2. Checks if required inputs are provided
|
||||
3. Checks if user has required credentials
|
||||
4. Runs immediately OR schedules (if cron is provided)
|
||||
|
||||
The response tells the caller what's missing or confirms execution.
|
||||
"""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "run_agent"
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return """Run or schedule an agent from the marketplace.
|
||||
|
||||
The tool automatically handles the setup flow:
|
||||
- Returns missing inputs if required fields are not provided
|
||||
- Returns missing credentials if user needs to configure them
|
||||
- Executes immediately if all requirements are met
|
||||
- Schedules execution if cron expression is provided
|
||||
|
||||
For scheduled execution, provide: schedule_name, cron, and optionally timezone."""
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"username_agent_slug": {
|
||||
"type": "string",
|
||||
"description": "Agent identifier in format 'username/agent-name'",
|
||||
},
|
||||
"inputs": {
|
||||
"type": "object",
|
||||
"description": "Input values for the agent",
|
||||
"additionalProperties": True,
|
||||
},
|
||||
"use_defaults": {
|
||||
"type": "boolean",
|
||||
"description": "Set to true to run with default values (user must confirm)",
|
||||
},
|
||||
"schedule_name": {
|
||||
"type": "string",
|
||||
"description": "Name for scheduled execution (triggers scheduling mode)",
|
||||
},
|
||||
"cron": {
|
||||
"type": "string",
|
||||
"description": "Cron expression (5 fields: min hour day month weekday)",
|
||||
},
|
||||
"timezone": {
|
||||
"type": "string",
|
||||
"description": "IANA timezone for schedule (default: UTC)",
|
||||
},
|
||||
},
|
||||
"required": ["username_agent_slug"],
|
||||
}
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
"""All operations require authentication."""
|
||||
return True
|
||||
|
||||
async def _execute(
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
"""Execute the tool with automatic state detection."""
|
||||
params = RunAgentInput(**kwargs)
|
||||
session_id = session.session_id
|
||||
|
||||
# Validate agent slug format
|
||||
if not params.username_agent_slug or "/" not in params.username_agent_slug:
|
||||
return ErrorResponse(
|
||||
message="Please provide an agent slug in format 'username/agent-name'",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Auth is required
|
||||
if not user_id:
|
||||
return ErrorResponse(
|
||||
message="Authentication required. Please sign in to use this tool.",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Determine if this is a schedule request
|
||||
is_schedule = bool(params.schedule_name or params.cron)
|
||||
|
||||
try:
|
||||
# Step 1: Fetch agent details (always happens first)
|
||||
username, agent_name = params.username_agent_slug.split("/", 1)
|
||||
graph, store_agent = await fetch_graph_from_store_slug(username, agent_name)
|
||||
|
||||
if not graph:
|
||||
return ErrorResponse(
|
||||
message=f"Agent '{params.username_agent_slug}' not found in marketplace",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Step 2: Check credentials
|
||||
graph_credentials, missing_creds = await match_user_credentials_to_graph(
|
||||
user_id, graph
|
||||
)
|
||||
|
||||
if missing_creds:
|
||||
# Return credentials needed response with input data info
|
||||
# The UI handles credential setup automatically, so the message
|
||||
# focuses on asking about input data
|
||||
credentials = extract_credentials_from_schema(
|
||||
graph.credentials_input_schema
|
||||
)
|
||||
missing_creds_check = await check_user_has_required_credentials(
|
||||
user_id, credentials
|
||||
)
|
||||
missing_credentials_dict = {
|
||||
c.id: c.model_dump() for c in missing_creds_check
|
||||
}
|
||||
|
||||
return SetupRequirementsResponse(
|
||||
message=self._build_inputs_message(graph, MSG_WHAT_VALUES_TO_USE),
|
||||
session_id=session_id,
|
||||
setup_info=SetupInfo(
|
||||
agent_id=graph.id,
|
||||
agent_name=graph.name,
|
||||
user_readiness=UserReadiness(
|
||||
has_all_credentials=False,
|
||||
missing_credentials=missing_credentials_dict,
|
||||
ready_to_run=False,
|
||||
),
|
||||
requirements={
|
||||
"credentials": [c.model_dump() for c in credentials],
|
||||
"inputs": self._get_inputs_list(graph.input_schema),
|
||||
"execution_modes": self._get_execution_modes(graph),
|
||||
},
|
||||
),
|
||||
graph_id=graph.id,
|
||||
graph_version=graph.version,
|
||||
)
|
||||
|
||||
# Step 3: Check inputs
|
||||
# Get all available input fields from schema
|
||||
input_properties = graph.input_schema.get("properties", {})
|
||||
required_fields = set(graph.input_schema.get("required", []))
|
||||
provided_inputs = set(params.inputs.keys())
|
||||
|
||||
# If agent has inputs but none were provided AND use_defaults is not set,
|
||||
# always show what's available first so user can decide
|
||||
if input_properties and not provided_inputs and not params.use_defaults:
|
||||
credentials = extract_credentials_from_schema(
|
||||
graph.credentials_input_schema
|
||||
)
|
||||
return AgentDetailsResponse(
|
||||
message=self._build_inputs_message(graph, MSG_ASK_USER_FOR_VALUES),
|
||||
session_id=session_id,
|
||||
agent=self._build_agent_details(graph, credentials),
|
||||
user_authenticated=True,
|
||||
graph_id=graph.id,
|
||||
graph_version=graph.version,
|
||||
)
|
||||
|
||||
# Check if required inputs are missing (and not using defaults)
|
||||
missing_inputs = required_fields - provided_inputs
|
||||
|
||||
if missing_inputs and not params.use_defaults:
|
||||
# Return agent details with missing inputs info
|
||||
credentials = extract_credentials_from_schema(
|
||||
graph.credentials_input_schema
|
||||
)
|
||||
return AgentDetailsResponse(
|
||||
message=(
|
||||
f"Agent '{graph.name}' is missing required inputs: "
|
||||
f"{', '.join(missing_inputs)}. "
|
||||
"Please provide these values to run the agent."
|
||||
),
|
||||
session_id=session_id,
|
||||
agent=self._build_agent_details(graph, credentials),
|
||||
user_authenticated=True,
|
||||
graph_id=graph.id,
|
||||
graph_version=graph.version,
|
||||
)
|
||||
|
||||
# Step 4: Execute or Schedule
|
||||
if is_schedule:
|
||||
return await self._schedule_agent(
|
||||
user_id=user_id,
|
||||
session=session,
|
||||
graph=graph,
|
||||
graph_credentials=graph_credentials,
|
||||
inputs=params.inputs,
|
||||
schedule_name=params.schedule_name,
|
||||
cron=params.cron,
|
||||
timezone=params.timezone,
|
||||
)
|
||||
else:
|
||||
return await self._run_agent(
|
||||
user_id=user_id,
|
||||
session=session,
|
||||
graph=graph,
|
||||
graph_credentials=graph_credentials,
|
||||
inputs=params.inputs,
|
||||
)
|
||||
|
||||
except NotFoundError as e:
|
||||
return ErrorResponse(
|
||||
message=f"Agent '{params.username_agent_slug}' not found",
|
||||
error=str(e) if str(e) else "not_found",
|
||||
session_id=session_id,
|
||||
)
|
||||
except DatabaseError as e:
|
||||
logger.error(f"Database error: {e}", exc_info=True)
|
||||
return ErrorResponse(
|
||||
message=f"Failed to process request: {e!s}",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing agent request: {e}", exc_info=True)
|
||||
return ErrorResponse(
|
||||
message=f"Failed to process request: {e!s}",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
def _get_inputs_list(self, input_schema: dict[str, Any]) -> list[dict[str, Any]]:
|
||||
"""Extract inputs list from schema."""
|
||||
inputs_list = []
|
||||
if isinstance(input_schema, dict) and "properties" in input_schema:
|
||||
for field_name, field_schema in input_schema["properties"].items():
|
||||
inputs_list.append(
|
||||
{
|
||||
"name": field_name,
|
||||
"title": field_schema.get("title", field_name),
|
||||
"type": field_schema.get("type", "string"),
|
||||
"description": field_schema.get("description", ""),
|
||||
"required": field_name in input_schema.get("required", []),
|
||||
}
|
||||
)
|
||||
return inputs_list
|
||||
|
||||
def _get_execution_modes(self, graph: GraphModel) -> list[str]:
|
||||
"""Get available execution modes for the graph."""
|
||||
trigger_info = graph.trigger_setup_info
|
||||
if trigger_info is None:
|
||||
return ["manual", "scheduled"]
|
||||
return ["webhook"]
|
||||
|
||||
def _build_inputs_message(
|
||||
self,
|
||||
graph: GraphModel,
|
||||
suffix: str,
|
||||
) -> str:
|
||||
"""Build a message describing available inputs for an agent."""
|
||||
inputs_list = self._get_inputs_list(graph.input_schema)
|
||||
required_names = [i["name"] for i in inputs_list if i["required"]]
|
||||
optional_names = [i["name"] for i in inputs_list if not i["required"]]
|
||||
|
||||
message_parts = [f"Agent '{graph.name}' accepts the following inputs:"]
|
||||
if required_names:
|
||||
message_parts.append(f"Required: {', '.join(required_names)}.")
|
||||
if optional_names:
|
||||
message_parts.append(
|
||||
f"Optional (have defaults): {', '.join(optional_names)}."
|
||||
)
|
||||
if not inputs_list:
|
||||
message_parts = [f"Agent '{graph.name}' has no required inputs."]
|
||||
message_parts.append(suffix)
|
||||
|
||||
return " ".join(message_parts)
|
||||
|
||||
def _build_agent_details(
|
||||
self,
|
||||
graph: GraphModel,
|
||||
credentials: list[CredentialsMetaInput],
|
||||
) -> AgentDetails:
|
||||
"""Build AgentDetails from a graph."""
|
||||
trigger_info = (
|
||||
graph.trigger_setup_info.model_dump() if graph.trigger_setup_info else None
|
||||
)
|
||||
return AgentDetails(
|
||||
id=graph.id,
|
||||
name=graph.name,
|
||||
description=graph.description,
|
||||
inputs=graph.input_schema,
|
||||
credentials=credentials,
|
||||
execution_options=ExecutionOptions(
|
||||
manual=trigger_info is None,
|
||||
scheduled=trigger_info is None,
|
||||
webhook=trigger_info is not None,
|
||||
),
|
||||
trigger_info=trigger_info,
|
||||
)
|
||||
|
||||
async def _run_agent(
|
||||
self,
|
||||
user_id: str,
|
||||
session: ChatSession,
|
||||
graph: GraphModel,
|
||||
graph_credentials: dict[str, CredentialsMetaInput],
|
||||
inputs: dict[str, Any],
|
||||
) -> ToolResponseBase:
|
||||
"""Execute an agent immediately."""
|
||||
session_id = session.session_id
|
||||
|
||||
# Check rate limits
|
||||
if session.successful_agent_runs.get(graph.id, 0) >= config.max_agent_runs:
|
||||
return ErrorResponse(
|
||||
message="Maximum agent runs reached for this session. Please try again later.",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Get or create library agent
|
||||
library_agent = await get_or_create_library_agent(graph, user_id)
|
||||
|
||||
# Execute
|
||||
execution = await execution_utils.add_graph_execution(
|
||||
graph_id=library_agent.graph_id,
|
||||
user_id=user_id,
|
||||
inputs=inputs,
|
||||
graph_credentials_inputs=graph_credentials,
|
||||
)
|
||||
|
||||
# Track successful run
|
||||
session.successful_agent_runs[library_agent.graph_id] = (
|
||||
session.successful_agent_runs.get(library_agent.graph_id, 0) + 1
|
||||
)
|
||||
|
||||
library_agent_link = f"/library/agents/{library_agent.id}"
|
||||
return ExecutionStartedResponse(
|
||||
message=(
|
||||
f"Agent '{library_agent.name}' execution started successfully. "
|
||||
f"View at {library_agent_link}. "
|
||||
f"{MSG_DO_NOT_RUN_AGAIN}"
|
||||
),
|
||||
session_id=session_id,
|
||||
execution_id=execution.id,
|
||||
graph_id=library_agent.graph_id,
|
||||
graph_name=library_agent.name,
|
||||
library_agent_id=library_agent.id,
|
||||
library_agent_link=library_agent_link,
|
||||
)
|
||||
|
||||
async def _schedule_agent(
|
||||
self,
|
||||
user_id: str,
|
||||
session: ChatSession,
|
||||
graph: GraphModel,
|
||||
graph_credentials: dict[str, CredentialsMetaInput],
|
||||
inputs: dict[str, Any],
|
||||
schedule_name: str,
|
||||
cron: str,
|
||||
timezone: str,
|
||||
) -> ToolResponseBase:
|
||||
"""Set up scheduled execution for an agent."""
|
||||
session_id = session.session_id
|
||||
|
||||
# Validate schedule params
|
||||
if not schedule_name:
|
||||
return ErrorResponse(
|
||||
message="schedule_name is required for scheduled execution",
|
||||
session_id=session_id,
|
||||
)
|
||||
if not cron:
|
||||
return ErrorResponse(
|
||||
message="cron expression is required for scheduled execution",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Check rate limits
|
||||
if (
|
||||
session.successful_agent_schedules.get(graph.id, 0)
|
||||
>= config.max_agent_schedules
|
||||
):
|
||||
return ErrorResponse(
|
||||
message="Maximum agent schedules reached for this session.",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Get or create library agent
|
||||
library_agent = await get_or_create_library_agent(graph, user_id)
|
||||
|
||||
# Get user timezone
|
||||
user = await get_user_by_id(user_id)
|
||||
user_timezone = get_user_timezone_or_utc(user.timezone if user else timezone)
|
||||
|
||||
# Create schedule
|
||||
result = await get_scheduler_client().add_execution_schedule(
|
||||
user_id=user_id,
|
||||
graph_id=library_agent.graph_id,
|
||||
graph_version=library_agent.graph_version,
|
||||
name=schedule_name,
|
||||
cron=cron,
|
||||
input_data=inputs,
|
||||
input_credentials=graph_credentials,
|
||||
user_timezone=user_timezone,
|
||||
)
|
||||
|
||||
# Convert next_run_time to user timezone for display
|
||||
if result.next_run_time:
|
||||
result.next_run_time = convert_utc_time_to_user_timezone(
|
||||
result.next_run_time, user_timezone
|
||||
)
|
||||
|
||||
# Track successful schedule
|
||||
session.successful_agent_schedules[library_agent.graph_id] = (
|
||||
session.successful_agent_schedules.get(library_agent.graph_id, 0) + 1
|
||||
)
|
||||
|
||||
library_agent_link = f"/library/agents/{library_agent.id}"
|
||||
return ExecutionStartedResponse(
|
||||
message=(
|
||||
f"Agent '{library_agent.name}' scheduled successfully as '{schedule_name}'. "
|
||||
f"View at {library_agent_link}. "
|
||||
f"{MSG_DO_NOT_SCHEDULE_AGAIN}"
|
||||
),
|
||||
session_id=session_id,
|
||||
execution_id=result.id,
|
||||
graph_id=library_agent.graph_id,
|
||||
graph_name=library_agent.name,
|
||||
library_agent_id=library_agent.id,
|
||||
library_agent_link=library_agent_link,
|
||||
)
|
||||
@@ -0,0 +1,391 @@
|
||||
import uuid
|
||||
|
||||
import orjson
|
||||
import pytest
|
||||
|
||||
from ._test_data import (
|
||||
make_session,
|
||||
setup_firecrawl_test_data,
|
||||
setup_llm_test_data,
|
||||
setup_test_data,
|
||||
)
|
||||
from .run_agent import RunAgentTool
|
||||
|
||||
# This is so the formatter doesn't remove the fixture imports
|
||||
setup_llm_test_data = setup_llm_test_data
|
||||
setup_test_data = setup_test_data
|
||||
setup_firecrawl_test_data = setup_firecrawl_test_data
|
||||
|
||||
|
||||
@pytest.mark.asyncio(scope="session")
|
||||
async def test_run_agent(setup_test_data):
|
||||
"""Test that the run_agent tool successfully executes an approved agent"""
|
||||
# Use test data from fixture
|
||||
user = setup_test_data["user"]
|
||||
graph = setup_test_data["graph"]
|
||||
store_submission = setup_test_data["store_submission"]
|
||||
|
||||
# Create the tool instance
|
||||
tool = RunAgentTool()
|
||||
|
||||
# Build the proper marketplace agent_id format: username/slug
|
||||
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
|
||||
|
||||
# Build the session
|
||||
session = make_session(user_id=user.id)
|
||||
|
||||
# Execute the tool
|
||||
response = await tool.execute(
|
||||
user_id=user.id,
|
||||
session_id=str(uuid.uuid4()),
|
||||
tool_call_id=str(uuid.uuid4()),
|
||||
username_agent_slug=agent_marketplace_id,
|
||||
inputs={"test_input": "Hello World"},
|
||||
session=session,
|
||||
)
|
||||
|
||||
# Verify the response
|
||||
assert response is not None
|
||||
assert hasattr(response, "result")
|
||||
# Parse the result JSON to verify the execution started
|
||||
|
||||
assert isinstance(response.result, str)
|
||||
result_data = orjson.loads(response.result)
|
||||
assert "execution_id" in result_data
|
||||
assert "graph_id" in result_data
|
||||
assert result_data["graph_id"] == graph.id
|
||||
assert "graph_name" in result_data
|
||||
assert result_data["graph_name"] == "Test Agent"
|
||||
|
||||
|
||||
@pytest.mark.asyncio(scope="session")
|
||||
async def test_run_agent_missing_inputs(setup_test_data):
|
||||
"""Test that the run_agent tool returns error when inputs are missing"""
|
||||
# Use test data from fixture
|
||||
user = setup_test_data["user"]
|
||||
store_submission = setup_test_data["store_submission"]
|
||||
|
||||
# Create the tool instance
|
||||
tool = RunAgentTool()
|
||||
|
||||
# Build the proper marketplace agent_id format
|
||||
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
|
||||
|
||||
# Build the session
|
||||
session = make_session(user_id=user.id)
|
||||
|
||||
# Execute the tool without required inputs
|
||||
response = await tool.execute(
|
||||
user_id=user.id,
|
||||
session_id=str(uuid.uuid4()),
|
||||
tool_call_id=str(uuid.uuid4()),
|
||||
username_agent_slug=agent_marketplace_id,
|
||||
inputs={}, # Missing required input
|
||||
session=session,
|
||||
)
|
||||
|
||||
# Verify that we get an error response
|
||||
assert response is not None
|
||||
assert hasattr(response, "result")
|
||||
# The tool should return an ErrorResponse when setup info indicates not ready
|
||||
|
||||
assert isinstance(response.result, str)
|
||||
result_data = orjson.loads(response.result)
|
||||
assert "message" in result_data
|
||||
|
||||
|
||||
@pytest.mark.asyncio(scope="session")
|
||||
async def test_run_agent_invalid_agent_id(setup_test_data):
|
||||
"""Test that the run_agent tool returns error for invalid agent ID"""
|
||||
# Use test data from fixture
|
||||
user = setup_test_data["user"]
|
||||
|
||||
# Create the tool instance
|
||||
tool = RunAgentTool()
|
||||
|
||||
# Build the session
|
||||
session = make_session(user_id=user.id)
|
||||
|
||||
# Execute the tool with invalid agent ID
|
||||
response = await tool.execute(
|
||||
user_id=user.id,
|
||||
session_id=str(uuid.uuid4()),
|
||||
tool_call_id=str(uuid.uuid4()),
|
||||
username_agent_slug="invalid/agent-id",
|
||||
inputs={"test_input": "Hello World"},
|
||||
session=session,
|
||||
)
|
||||
|
||||
# Verify that we get an error response
|
||||
assert response is not None
|
||||
assert hasattr(response, "result")
|
||||
|
||||
assert isinstance(response.result, str)
|
||||
result_data = orjson.loads(response.result)
|
||||
assert "message" in result_data
|
||||
# Should get an error about failed setup or not found
|
||||
assert any(
|
||||
phrase in result_data["message"].lower() for phrase in ["not found", "failed"]
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio(scope="session")
|
||||
async def test_run_agent_with_llm_credentials(setup_llm_test_data):
|
||||
"""Test that run_agent works with an agent requiring LLM credentials"""
|
||||
# Use test data from fixture
|
||||
user = setup_llm_test_data["user"]
|
||||
graph = setup_llm_test_data["graph"]
|
||||
store_submission = setup_llm_test_data["store_submission"]
|
||||
|
||||
# Create the tool instance
|
||||
tool = RunAgentTool()
|
||||
|
||||
# Build the proper marketplace agent_id format
|
||||
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
|
||||
|
||||
# Build the session
|
||||
session = make_session(user_id=user.id)
|
||||
|
||||
# Execute the tool with a prompt for the LLM
|
||||
response = await tool.execute(
|
||||
user_id=user.id,
|
||||
session_id=str(uuid.uuid4()),
|
||||
tool_call_id=str(uuid.uuid4()),
|
||||
username_agent_slug=agent_marketplace_id,
|
||||
inputs={"user_prompt": "What is 2+2?"},
|
||||
session=session,
|
||||
)
|
||||
|
||||
# Verify the response
|
||||
assert response is not None
|
||||
assert hasattr(response, "result")
|
||||
|
||||
# Parse the result JSON to verify the execution started
|
||||
|
||||
assert isinstance(response.result, str)
|
||||
result_data = orjson.loads(response.result)
|
||||
|
||||
# Should successfully start execution since credentials are available
|
||||
assert "execution_id" in result_data
|
||||
assert "graph_id" in result_data
|
||||
assert result_data["graph_id"] == graph.id
|
||||
assert "graph_name" in result_data
|
||||
assert result_data["graph_name"] == "LLM Test Agent"
|
||||
|
||||
|
||||
@pytest.mark.asyncio(scope="session")
|
||||
async def test_run_agent_shows_available_inputs_when_none_provided(setup_test_data):
|
||||
"""Test that run_agent returns available inputs when called without inputs or use_defaults."""
|
||||
user = setup_test_data["user"]
|
||||
store_submission = setup_test_data["store_submission"]
|
||||
|
||||
tool = RunAgentTool()
|
||||
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
|
||||
session = make_session(user_id=user.id)
|
||||
|
||||
# Execute without inputs and without use_defaults
|
||||
response = await tool.execute(
|
||||
user_id=user.id,
|
||||
session_id=str(uuid.uuid4()),
|
||||
tool_call_id=str(uuid.uuid4()),
|
||||
username_agent_slug=agent_marketplace_id,
|
||||
inputs={},
|
||||
use_defaults=False,
|
||||
session=session,
|
||||
)
|
||||
|
||||
assert response is not None
|
||||
assert hasattr(response, "result")
|
||||
assert isinstance(response.result, str)
|
||||
result_data = orjson.loads(response.result)
|
||||
|
||||
# Should return agent_details type showing available inputs
|
||||
assert result_data.get("type") == "agent_details"
|
||||
assert "agent" in result_data
|
||||
assert "message" in result_data
|
||||
# Message should mention inputs
|
||||
assert "inputs" in result_data["message"].lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio(scope="session")
|
||||
async def test_run_agent_with_use_defaults(setup_test_data):
|
||||
"""Test that run_agent executes successfully with use_defaults=True."""
|
||||
user = setup_test_data["user"]
|
||||
graph = setup_test_data["graph"]
|
||||
store_submission = setup_test_data["store_submission"]
|
||||
|
||||
tool = RunAgentTool()
|
||||
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
|
||||
session = make_session(user_id=user.id)
|
||||
|
||||
# Execute with use_defaults=True (no explicit inputs)
|
||||
response = await tool.execute(
|
||||
user_id=user.id,
|
||||
session_id=str(uuid.uuid4()),
|
||||
tool_call_id=str(uuid.uuid4()),
|
||||
username_agent_slug=agent_marketplace_id,
|
||||
inputs={},
|
||||
use_defaults=True,
|
||||
session=session,
|
||||
)
|
||||
|
||||
assert response is not None
|
||||
assert hasattr(response, "result")
|
||||
assert isinstance(response.result, str)
|
||||
result_data = orjson.loads(response.result)
|
||||
|
||||
# Should execute successfully
|
||||
assert "execution_id" in result_data
|
||||
assert result_data["graph_id"] == graph.id
|
||||
|
||||
|
||||
@pytest.mark.asyncio(scope="session")
|
||||
async def test_run_agent_missing_credentials(setup_firecrawl_test_data):
|
||||
"""Test that run_agent returns setup_requirements when credentials are missing."""
|
||||
user = setup_firecrawl_test_data["user"]
|
||||
store_submission = setup_firecrawl_test_data["store_submission"]
|
||||
|
||||
tool = RunAgentTool()
|
||||
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
|
||||
session = make_session(user_id=user.id)
|
||||
|
||||
# Execute - user doesn't have firecrawl credentials
|
||||
response = await tool.execute(
|
||||
user_id=user.id,
|
||||
session_id=str(uuid.uuid4()),
|
||||
tool_call_id=str(uuid.uuid4()),
|
||||
username_agent_slug=agent_marketplace_id,
|
||||
inputs={"url": "https://example.com"},
|
||||
session=session,
|
||||
)
|
||||
|
||||
assert response is not None
|
||||
assert hasattr(response, "result")
|
||||
assert isinstance(response.result, str)
|
||||
result_data = orjson.loads(response.result)
|
||||
|
||||
# Should return setup_requirements type with missing credentials
|
||||
assert result_data.get("type") == "setup_requirements"
|
||||
assert "setup_info" in result_data
|
||||
setup_info = result_data["setup_info"]
|
||||
assert "user_readiness" in setup_info
|
||||
assert setup_info["user_readiness"]["has_all_credentials"] is False
|
||||
assert len(setup_info["user_readiness"]["missing_credentials"]) > 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio(scope="session")
|
||||
async def test_run_agent_invalid_slug_format(setup_test_data):
|
||||
"""Test that run_agent returns error for invalid slug format (no slash)."""
|
||||
user = setup_test_data["user"]
|
||||
|
||||
tool = RunAgentTool()
|
||||
session = make_session(user_id=user.id)
|
||||
|
||||
# Execute with invalid slug format
|
||||
response = await tool.execute(
|
||||
user_id=user.id,
|
||||
session_id=str(uuid.uuid4()),
|
||||
tool_call_id=str(uuid.uuid4()),
|
||||
username_agent_slug="no-slash-here",
|
||||
inputs={},
|
||||
session=session,
|
||||
)
|
||||
|
||||
assert response is not None
|
||||
assert hasattr(response, "result")
|
||||
assert isinstance(response.result, str)
|
||||
result_data = orjson.loads(response.result)
|
||||
|
||||
# Should return error
|
||||
assert result_data.get("type") == "error"
|
||||
assert "username/agent-name" in result_data["message"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio(scope="session")
|
||||
async def test_run_agent_unauthenticated():
|
||||
"""Test that run_agent returns need_login for unauthenticated users."""
|
||||
tool = RunAgentTool()
|
||||
session = make_session(user_id=None)
|
||||
|
||||
# Execute without user_id
|
||||
response = await tool.execute(
|
||||
user_id=None,
|
||||
session_id=str(uuid.uuid4()),
|
||||
tool_call_id=str(uuid.uuid4()),
|
||||
username_agent_slug="test/test-agent",
|
||||
inputs={},
|
||||
session=session,
|
||||
)
|
||||
|
||||
assert response is not None
|
||||
assert hasattr(response, "result")
|
||||
assert isinstance(response.result, str)
|
||||
result_data = orjson.loads(response.result)
|
||||
|
||||
# Base tool returns need_login type for unauthenticated users
|
||||
assert result_data.get("type") == "need_login"
|
||||
assert "sign in" in result_data["message"].lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio(scope="session")
|
||||
async def test_run_agent_schedule_without_cron(setup_test_data):
|
||||
"""Test that run_agent returns error when scheduling without cron expression."""
|
||||
user = setup_test_data["user"]
|
||||
store_submission = setup_test_data["store_submission"]
|
||||
|
||||
tool = RunAgentTool()
|
||||
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
|
||||
session = make_session(user_id=user.id)
|
||||
|
||||
# Try to schedule without cron
|
||||
response = await tool.execute(
|
||||
user_id=user.id,
|
||||
session_id=str(uuid.uuid4()),
|
||||
tool_call_id=str(uuid.uuid4()),
|
||||
username_agent_slug=agent_marketplace_id,
|
||||
inputs={"test_input": "test"},
|
||||
schedule_name="My Schedule",
|
||||
cron="", # Empty cron
|
||||
session=session,
|
||||
)
|
||||
|
||||
assert response is not None
|
||||
assert hasattr(response, "result")
|
||||
assert isinstance(response.result, str)
|
||||
result_data = orjson.loads(response.result)
|
||||
|
||||
# Should return error about missing cron
|
||||
assert result_data.get("type") == "error"
|
||||
assert "cron" in result_data["message"].lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio(scope="session")
|
||||
async def test_run_agent_schedule_without_name(setup_test_data):
|
||||
"""Test that run_agent returns error when scheduling without schedule_name."""
|
||||
user = setup_test_data["user"]
|
||||
store_submission = setup_test_data["store_submission"]
|
||||
|
||||
tool = RunAgentTool()
|
||||
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
|
||||
session = make_session(user_id=user.id)
|
||||
|
||||
# Try to schedule without schedule_name
|
||||
response = await tool.execute(
|
||||
user_id=user.id,
|
||||
session_id=str(uuid.uuid4()),
|
||||
tool_call_id=str(uuid.uuid4()),
|
||||
username_agent_slug=agent_marketplace_id,
|
||||
inputs={"test_input": "test"},
|
||||
schedule_name="", # Empty name
|
||||
cron="0 9 * * *",
|
||||
session=session,
|
||||
)
|
||||
|
||||
assert response is not None
|
||||
assert hasattr(response, "result")
|
||||
assert isinstance(response.result, str)
|
||||
result_data = orjson.loads(response.result)
|
||||
|
||||
# Should return error about missing schedule_name
|
||||
assert result_data.get("type") == "error"
|
||||
assert "schedule_name" in result_data["message"].lower()
|
||||
@@ -0,0 +1,288 @@
|
||||
"""Shared utilities for chat tools."""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from backend.api.features.library import db as library_db
|
||||
from backend.api.features.library import model as library_model
|
||||
from backend.api.features.store import db as store_db
|
||||
from backend.data import graph as graph_db
|
||||
from backend.data.graph import GraphModel
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.util.exceptions import NotFoundError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def fetch_graph_from_store_slug(
|
||||
username: str,
|
||||
agent_name: str,
|
||||
) -> tuple[GraphModel | None, Any | None]:
|
||||
"""
|
||||
Fetch graph from store by username/agent_name slug.
|
||||
|
||||
Args:
|
||||
username: Creator's username
|
||||
agent_name: Agent name/slug
|
||||
|
||||
Returns:
|
||||
tuple[Graph | None, StoreAgentDetails | None]: The graph and store agent details,
|
||||
or (None, None) if not found.
|
||||
|
||||
Raises:
|
||||
DatabaseError: If there's a database error during lookup.
|
||||
"""
|
||||
try:
|
||||
store_agent = await store_db.get_store_agent_details(username, agent_name)
|
||||
except NotFoundError:
|
||||
return None, None
|
||||
|
||||
# Get the graph from store listing version
|
||||
graph_meta = await store_db.get_available_graph(
|
||||
store_agent.store_listing_version_id
|
||||
)
|
||||
graph = await graph_db.get_graph(
|
||||
graph_id=graph_meta.id,
|
||||
version=graph_meta.version,
|
||||
user_id=None, # Public access
|
||||
include_subgraphs=True,
|
||||
)
|
||||
return graph, store_agent
|
||||
|
||||
|
||||
def extract_credentials_from_schema(
|
||||
credentials_input_schema: dict[str, Any] | None,
|
||||
) -> list[CredentialsMetaInput]:
|
||||
"""
|
||||
Extract credential requirements from graph's credentials_input_schema.
|
||||
|
||||
This consolidates duplicated logic from get_agent_details.py and setup_agent.py.
|
||||
|
||||
Args:
|
||||
credentials_input_schema: The credentials_input_schema from a Graph object
|
||||
|
||||
Returns:
|
||||
List of CredentialsMetaInput with provider and type info
|
||||
"""
|
||||
credentials: list[CredentialsMetaInput] = []
|
||||
|
||||
if (
|
||||
not isinstance(credentials_input_schema, dict)
|
||||
or "properties" not in credentials_input_schema
|
||||
):
|
||||
return credentials
|
||||
|
||||
for cred_name, cred_schema in credentials_input_schema["properties"].items():
|
||||
provider = _extract_provider_from_schema(cred_schema)
|
||||
cred_type = _extract_credential_type_from_schema(cred_schema)
|
||||
|
||||
credentials.append(
|
||||
CredentialsMetaInput(
|
||||
id=cred_name,
|
||||
title=cred_schema.get("title", cred_name),
|
||||
provider=provider, # type: ignore
|
||||
type=cred_type, # type: ignore
|
||||
)
|
||||
)
|
||||
|
||||
return credentials
|
||||
|
||||
|
||||
def extract_credentials_as_dict(
|
||||
credentials_input_schema: dict[str, Any] | None,
|
||||
) -> dict[str, CredentialsMetaInput]:
|
||||
"""
|
||||
Extract credential requirements as a dict keyed by field name.
|
||||
|
||||
Args:
|
||||
credentials_input_schema: The credentials_input_schema from a Graph object
|
||||
|
||||
Returns:
|
||||
Dict mapping field name to CredentialsMetaInput
|
||||
"""
|
||||
credentials: dict[str, CredentialsMetaInput] = {}
|
||||
|
||||
if (
|
||||
not isinstance(credentials_input_schema, dict)
|
||||
or "properties" not in credentials_input_schema
|
||||
):
|
||||
return credentials
|
||||
|
||||
for cred_name, cred_schema in credentials_input_schema["properties"].items():
|
||||
provider = _extract_provider_from_schema(cred_schema)
|
||||
cred_type = _extract_credential_type_from_schema(cred_schema)
|
||||
|
||||
credentials[cred_name] = CredentialsMetaInput(
|
||||
id=cred_name,
|
||||
title=cred_schema.get("title", cred_name),
|
||||
provider=provider, # type: ignore
|
||||
type=cred_type, # type: ignore
|
||||
)
|
||||
|
||||
return credentials
|
||||
|
||||
|
||||
def _extract_provider_from_schema(cred_schema: dict[str, Any]) -> str:
|
||||
"""Extract provider from credential schema."""
|
||||
if "credentials_provider" in cred_schema and cred_schema["credentials_provider"]:
|
||||
return cred_schema["credentials_provider"][0]
|
||||
if "properties" in cred_schema and "provider" in cred_schema["properties"]:
|
||||
return cred_schema["properties"]["provider"].get("const", "unknown")
|
||||
return "unknown"
|
||||
|
||||
|
||||
def _extract_credential_type_from_schema(cred_schema: dict[str, Any]) -> str:
|
||||
"""Extract credential type from credential schema."""
|
||||
if "credentials_types" in cred_schema and cred_schema["credentials_types"]:
|
||||
return cred_schema["credentials_types"][0]
|
||||
if "properties" in cred_schema and "type" in cred_schema["properties"]:
|
||||
return cred_schema["properties"]["type"].get("const", "api_key")
|
||||
return "api_key"
|
||||
|
||||
|
||||
async def get_or_create_library_agent(
|
||||
graph: GraphModel,
|
||||
user_id: str,
|
||||
) -> library_model.LibraryAgent:
|
||||
"""
|
||||
Get existing library agent or create new one.
|
||||
|
||||
This consolidates duplicated logic from run_agent.py and setup_agent.py.
|
||||
|
||||
Args:
|
||||
graph: The Graph to add to library
|
||||
user_id: The user's ID
|
||||
|
||||
Returns:
|
||||
LibraryAgent instance
|
||||
"""
|
||||
existing = await library_db.get_library_agent_by_graph_id(
|
||||
graph_id=graph.id, user_id=user_id
|
||||
)
|
||||
if existing:
|
||||
return existing
|
||||
|
||||
library_agents = await library_db.create_library_agent(
|
||||
graph=graph,
|
||||
user_id=user_id,
|
||||
create_library_agents_for_sub_graphs=False,
|
||||
)
|
||||
assert len(library_agents) == 1, "Expected 1 library agent to be created"
|
||||
return library_agents[0]
|
||||
|
||||
|
||||
async def match_user_credentials_to_graph(
|
||||
user_id: str,
|
||||
graph: GraphModel,
|
||||
) -> tuple[dict[str, CredentialsMetaInput], list[str]]:
|
||||
"""
|
||||
Match user's available credentials against graph's required credentials.
|
||||
|
||||
Uses graph.aggregate_credentials_inputs() which handles credentials from
|
||||
multiple nodes and uses frozensets for provider matching.
|
||||
|
||||
Args:
|
||||
user_id: The user's ID
|
||||
graph: The Graph with credential requirements
|
||||
|
||||
Returns:
|
||||
tuple[matched_credentials dict, missing_credential_descriptions list]
|
||||
"""
|
||||
graph_credentials_inputs: dict[str, CredentialsMetaInput] = {}
|
||||
missing_creds: list[str] = []
|
||||
|
||||
# Get aggregated credentials requirements from the graph
|
||||
aggregated_creds = graph.aggregate_credentials_inputs()
|
||||
logger.debug(
|
||||
f"Matching credentials for graph {graph.id}: {len(aggregated_creds)} required"
|
||||
)
|
||||
|
||||
if not aggregated_creds:
|
||||
return graph_credentials_inputs, missing_creds
|
||||
|
||||
# Get all available credentials for the user
|
||||
creds_manager = IntegrationCredentialsManager()
|
||||
available_creds = await creds_manager.store.get_all_creds(user_id)
|
||||
|
||||
# For each required credential field, find a matching user credential
|
||||
# field_info.provider is a frozenset because aggregate_credentials_inputs()
|
||||
# combines requirements from multiple nodes. A credential matches if its
|
||||
# provider is in the set of acceptable providers.
|
||||
for credential_field_name, (
|
||||
credential_requirements,
|
||||
_node_fields,
|
||||
) in aggregated_creds.items():
|
||||
# Find first matching credential by provider and type
|
||||
matching_cred = next(
|
||||
(
|
||||
cred
|
||||
for cred in available_creds
|
||||
if cred.provider in credential_requirements.provider
|
||||
and cred.type in credential_requirements.supported_types
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
if matching_cred:
|
||||
try:
|
||||
graph_credentials_inputs[credential_field_name] = CredentialsMetaInput(
|
||||
id=matching_cred.id,
|
||||
provider=matching_cred.provider, # type: ignore
|
||||
type=matching_cred.type,
|
||||
title=matching_cred.title,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to create CredentialsMetaInput for field '{credential_field_name}': "
|
||||
f"provider={matching_cred.provider}, type={matching_cred.type}, "
|
||||
f"credential_id={matching_cred.id}",
|
||||
exc_info=True,
|
||||
)
|
||||
missing_creds.append(
|
||||
f"{credential_field_name} (validation failed: {e})"
|
||||
)
|
||||
else:
|
||||
missing_creds.append(
|
||||
f"{credential_field_name} "
|
||||
f"(requires provider in {list(credential_requirements.provider)}, "
|
||||
f"type in {list(credential_requirements.supported_types)})"
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Credential matching complete: {len(graph_credentials_inputs)}/{len(aggregated_creds)} matched"
|
||||
)
|
||||
|
||||
return graph_credentials_inputs, missing_creds
|
||||
|
||||
|
||||
async def check_user_has_required_credentials(
|
||||
user_id: str,
|
||||
required_credentials: list[CredentialsMetaInput],
|
||||
) -> list[CredentialsMetaInput]:
|
||||
"""
|
||||
Check which required credentials the user is missing.
|
||||
|
||||
Args:
|
||||
user_id: The user's ID
|
||||
required_credentials: List of required credentials
|
||||
|
||||
Returns:
|
||||
List of missing credentials (empty if user has all)
|
||||
"""
|
||||
if not required_credentials:
|
||||
return []
|
||||
|
||||
creds_manager = IntegrationCredentialsManager()
|
||||
available_creds = await creds_manager.store.get_all_creds(user_id)
|
||||
|
||||
missing: list[CredentialsMetaInput] = []
|
||||
for required in required_credentials:
|
||||
has_matching = any(
|
||||
cred.provider == required.provider and cred.type == required.type
|
||||
for cred in available_creds
|
||||
)
|
||||
if not has_matching:
|
||||
missing.append(required)
|
||||
|
||||
return missing
|
||||
@@ -0,0 +1,204 @@
|
||||
import json
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Union
|
||||
|
||||
from prisma.enums import ReviewStatus
|
||||
from pydantic import BaseModel, Field, field_validator, model_validator
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from prisma.models import PendingHumanReview
|
||||
|
||||
# SafeJson-compatible type alias for review data
|
||||
SafeJsonData = Union[Dict[str, Any], List[Any], str, int, float, bool, None]
|
||||
|
||||
|
||||
class PendingHumanReviewModel(BaseModel):
|
||||
"""Response model for pending human review data.
|
||||
|
||||
Represents a human review request that is awaiting user action.
|
||||
Contains all necessary information for a user to review and approve
|
||||
or reject data from a Human-in-the-Loop block execution.
|
||||
|
||||
Attributes:
|
||||
id: Unique identifier for the review record
|
||||
user_id: ID of the user who must perform the review
|
||||
node_exec_id: ID of the node execution that created this review
|
||||
graph_exec_id: ID of the graph execution containing the node
|
||||
graph_id: ID of the graph template being executed
|
||||
graph_version: Version number of the graph template
|
||||
payload: The actual data payload awaiting review
|
||||
instructions: Instructions or message for the reviewer
|
||||
editable: Whether the reviewer can edit the data
|
||||
status: Current review status (WAITING, APPROVED, or REJECTED)
|
||||
review_message: Optional message from the reviewer
|
||||
created_at: Timestamp when review was created
|
||||
updated_at: Timestamp when review was last modified
|
||||
reviewed_at: Timestamp when review was completed (if applicable)
|
||||
"""
|
||||
|
||||
node_exec_id: str = Field(description="Node execution ID (primary key)")
|
||||
user_id: str = Field(description="User ID associated with the review")
|
||||
graph_exec_id: str = Field(description="Graph execution ID")
|
||||
graph_id: str = Field(description="Graph ID")
|
||||
graph_version: int = Field(description="Graph version")
|
||||
payload: SafeJsonData = Field(description="The actual data payload awaiting review")
|
||||
instructions: str | None = Field(
|
||||
description="Instructions or message for the reviewer", default=None
|
||||
)
|
||||
editable: bool = Field(description="Whether the reviewer can edit the data")
|
||||
status: ReviewStatus = Field(description="Review status")
|
||||
review_message: str | None = Field(
|
||||
description="Optional message from the reviewer", default=None
|
||||
)
|
||||
was_edited: bool | None = Field(
|
||||
description="Whether the data was modified during review", default=None
|
||||
)
|
||||
processed: bool = Field(
|
||||
description="Whether the review result has been processed by the execution engine",
|
||||
default=False,
|
||||
)
|
||||
created_at: datetime = Field(description="When the review was created")
|
||||
updated_at: datetime | None = Field(
|
||||
description="When the review was last updated", default=None
|
||||
)
|
||||
reviewed_at: datetime | None = Field(
|
||||
description="When the review was completed", default=None
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_db(cls, review: "PendingHumanReview") -> "PendingHumanReviewModel":
|
||||
"""
|
||||
Convert a database model to a response model.
|
||||
|
||||
Uses the new flat database structure with separate columns for
|
||||
payload, instructions, and editable flag.
|
||||
|
||||
Handles invalid data gracefully by using safe defaults.
|
||||
"""
|
||||
return cls(
|
||||
node_exec_id=review.nodeExecId,
|
||||
user_id=review.userId,
|
||||
graph_exec_id=review.graphExecId,
|
||||
graph_id=review.graphId,
|
||||
graph_version=review.graphVersion,
|
||||
payload=review.payload,
|
||||
instructions=review.instructions,
|
||||
editable=review.editable,
|
||||
status=review.status,
|
||||
review_message=review.reviewMessage,
|
||||
was_edited=review.wasEdited,
|
||||
processed=review.processed,
|
||||
created_at=review.createdAt,
|
||||
updated_at=review.updatedAt,
|
||||
reviewed_at=review.reviewedAt,
|
||||
)
|
||||
|
||||
|
||||
class ReviewItem(BaseModel):
|
||||
"""Single review item for processing."""
|
||||
|
||||
node_exec_id: str = Field(description="Node execution ID to review")
|
||||
approved: bool = Field(
|
||||
description="Whether this review is approved (True) or rejected (False)"
|
||||
)
|
||||
message: str | None = Field(
|
||||
None, description="Optional review message", max_length=2000
|
||||
)
|
||||
reviewed_data: SafeJsonData | None = Field(
|
||||
None, description="Optional edited data (ignored if approved=False)"
|
||||
)
|
||||
|
||||
@field_validator("reviewed_data")
|
||||
@classmethod
|
||||
def validate_reviewed_data(cls, v):
|
||||
"""Validate that reviewed_data is safe and properly structured."""
|
||||
if v is None:
|
||||
return v
|
||||
|
||||
# Validate SafeJson compatibility
|
||||
def validate_safejson_type(obj):
|
||||
"""Ensure object only contains SafeJson compatible types."""
|
||||
if obj is None:
|
||||
return True
|
||||
elif isinstance(obj, (str, int, float, bool)):
|
||||
return True
|
||||
elif isinstance(obj, dict):
|
||||
return all(
|
||||
isinstance(k, str) and validate_safejson_type(v)
|
||||
for k, v in obj.items()
|
||||
)
|
||||
elif isinstance(obj, list):
|
||||
return all(validate_safejson_type(item) for item in obj)
|
||||
else:
|
||||
return False
|
||||
|
||||
if not validate_safejson_type(v):
|
||||
raise ValueError("reviewed_data contains non-SafeJson compatible types")
|
||||
|
||||
# Validate data size to prevent DoS attacks
|
||||
try:
|
||||
json_str = json.dumps(v)
|
||||
if len(json_str) > 1000000: # 1MB limit
|
||||
raise ValueError("reviewed_data is too large (max 1MB)")
|
||||
except (TypeError, ValueError) as e:
|
||||
raise ValueError(f"reviewed_data must be JSON serializable: {str(e)}")
|
||||
|
||||
# Ensure no dangerous nested structures (prevent infinite recursion)
|
||||
def check_depth(obj, max_depth=10, current_depth=0):
|
||||
"""Recursively check object nesting depth to prevent stack overflow attacks."""
|
||||
if current_depth > max_depth:
|
||||
raise ValueError("reviewed_data has excessive nesting depth")
|
||||
|
||||
if isinstance(obj, dict):
|
||||
for value in obj.values():
|
||||
check_depth(value, max_depth, current_depth + 1)
|
||||
elif isinstance(obj, list):
|
||||
for item in obj:
|
||||
check_depth(item, max_depth, current_depth + 1)
|
||||
|
||||
check_depth(v)
|
||||
return v
|
||||
|
||||
@field_validator("message")
|
||||
@classmethod
|
||||
def validate_message(cls, v):
|
||||
"""Validate and sanitize review message."""
|
||||
if v is not None and len(v.strip()) == 0:
|
||||
return None
|
||||
return v
|
||||
|
||||
|
||||
class ReviewRequest(BaseModel):
|
||||
"""Request model for processing ALL pending reviews for an execution.
|
||||
|
||||
This request must include ALL pending reviews for a graph execution.
|
||||
Each review will be either approved (with optional data modifications)
|
||||
or rejected (data ignored). The execution will resume only after ALL reviews are processed.
|
||||
"""
|
||||
|
||||
reviews: List[ReviewItem] = Field(
|
||||
description="All reviews with their approval status, data, and messages"
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_review_completeness(self):
|
||||
"""Validate that we have at least one review to process and no duplicates."""
|
||||
if not self.reviews:
|
||||
raise ValueError("At least one review must be provided")
|
||||
|
||||
# Ensure no duplicate node_exec_ids
|
||||
node_ids = [review.node_exec_id for review in self.reviews]
|
||||
if len(node_ids) != len(set(node_ids)):
|
||||
duplicates = [nid for nid in set(node_ids) if node_ids.count(nid) > 1]
|
||||
raise ValueError(f"Duplicate review IDs found: {', '.join(duplicates)}")
|
||||
|
||||
return self
|
||||
|
||||
|
||||
class ReviewResponse(BaseModel):
|
||||
"""Response from review endpoint."""
|
||||
|
||||
approved_count: int = Field(description="Number of reviews successfully approved")
|
||||
rejected_count: int = Field(description="Number of reviews successfully rejected")
|
||||
failed_count: int = Field(description="Number of reviews that failed processing")
|
||||
error: str | None = Field(None, description="Error message if operation failed")
|
||||
@@ -0,0 +1,492 @@
|
||||
import datetime
|
||||
|
||||
import fastapi
|
||||
import fastapi.testclient
|
||||
import pytest
|
||||
import pytest_mock
|
||||
from prisma.enums import ReviewStatus
|
||||
from pytest_snapshot.plugin import Snapshot
|
||||
|
||||
from backend.api.rest_api import handle_internal_http_error
|
||||
|
||||
from .model import PendingHumanReviewModel
|
||||
from .routes import router
|
||||
|
||||
# Using a fixed timestamp for reproducible tests
|
||||
FIXED_NOW = datetime.datetime(2023, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
|
||||
|
||||
app = fastapi.FastAPI()
|
||||
app.include_router(router, prefix="/api/review")
|
||||
app.add_exception_handler(ValueError, handle_internal_http_error(400))
|
||||
|
||||
client = fastapi.testclient.TestClient(app)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup_app_auth(mock_jwt_user):
|
||||
"""Setup auth overrides for all tests in this module"""
|
||||
from autogpt_libs.auth.jwt_utils import get_jwt_payload
|
||||
|
||||
app.dependency_overrides[get_jwt_payload] = mock_jwt_user["get_jwt_payload"]
|
||||
yield
|
||||
app.dependency_overrides.clear()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_pending_review(test_user_id: str) -> PendingHumanReviewModel:
|
||||
"""Create a sample pending review for testing"""
|
||||
return PendingHumanReviewModel(
|
||||
node_exec_id="test_node_123",
|
||||
user_id=test_user_id,
|
||||
graph_exec_id="test_graph_exec_456",
|
||||
graph_id="test_graph_789",
|
||||
graph_version=1,
|
||||
payload={"data": "test payload", "value": 42},
|
||||
instructions="Please review this data",
|
||||
editable=True,
|
||||
status=ReviewStatus.WAITING,
|
||||
review_message=None,
|
||||
was_edited=None,
|
||||
processed=False,
|
||||
created_at=FIXED_NOW,
|
||||
updated_at=None,
|
||||
reviewed_at=None,
|
||||
)
|
||||
|
||||
|
||||
def test_get_pending_reviews_empty(
|
||||
mocker: pytest_mock.MockerFixture,
|
||||
snapshot: Snapshot,
|
||||
test_user_id: str,
|
||||
) -> None:
|
||||
"""Test getting pending reviews when none exist"""
|
||||
mock_get_reviews = mocker.patch(
|
||||
"backend.api.features.executions.review.routes.get_pending_reviews_for_user"
|
||||
)
|
||||
mock_get_reviews.return_value = []
|
||||
|
||||
response = client.get("/api/review/pending")
|
||||
|
||||
assert response.status_code == 200
|
||||
assert response.json() == []
|
||||
mock_get_reviews.assert_called_once_with(test_user_id, 1, 25)
|
||||
|
||||
|
||||
def test_get_pending_reviews_with_data(
|
||||
mocker: pytest_mock.MockerFixture,
|
||||
sample_pending_review: PendingHumanReviewModel,
|
||||
snapshot: Snapshot,
|
||||
test_user_id: str,
|
||||
) -> None:
|
||||
"""Test getting pending reviews with data"""
|
||||
mock_get_reviews = mocker.patch(
|
||||
"backend.api.features.executions.review.routes.get_pending_reviews_for_user"
|
||||
)
|
||||
mock_get_reviews.return_value = [sample_pending_review]
|
||||
|
||||
response = client.get("/api/review/pending?page=2&page_size=10")
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert len(data) == 1
|
||||
assert data[0]["node_exec_id"] == "test_node_123"
|
||||
assert data[0]["status"] == "WAITING"
|
||||
mock_get_reviews.assert_called_once_with(test_user_id, 2, 10)
|
||||
|
||||
|
||||
def test_get_pending_reviews_for_execution_success(
|
||||
mocker: pytest_mock.MockerFixture,
|
||||
sample_pending_review: PendingHumanReviewModel,
|
||||
snapshot: Snapshot,
|
||||
test_user_id: str,
|
||||
) -> None:
|
||||
"""Test getting pending reviews for specific execution"""
|
||||
mock_get_graph_execution = mocker.patch(
|
||||
"backend.api.features.executions.review.routes.get_graph_execution_meta"
|
||||
)
|
||||
mock_get_graph_execution.return_value = {
|
||||
"id": "test_graph_exec_456",
|
||||
"user_id": test_user_id,
|
||||
}
|
||||
|
||||
mock_get_reviews = mocker.patch(
|
||||
"backend.api.features.executions.review.routes.get_pending_reviews_for_execution"
|
||||
)
|
||||
mock_get_reviews.return_value = [sample_pending_review]
|
||||
|
||||
response = client.get("/api/review/execution/test_graph_exec_456")
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert len(data) == 1
|
||||
assert data[0]["graph_exec_id"] == "test_graph_exec_456"
|
||||
|
||||
|
||||
def test_get_pending_reviews_for_execution_not_available(
|
||||
mocker: pytest_mock.MockerFixture,
|
||||
) -> None:
|
||||
"""Test access denied when user doesn't own the execution"""
|
||||
mock_get_graph_execution = mocker.patch(
|
||||
"backend.api.features.executions.review.routes.get_graph_execution_meta"
|
||||
)
|
||||
mock_get_graph_execution.return_value = None
|
||||
|
||||
response = client.get("/api/review/execution/test_graph_exec_456")
|
||||
|
||||
assert response.status_code == 404
|
||||
assert "not found" in response.json()["detail"]
|
||||
|
||||
|
||||
def test_process_review_action_approve_success(
|
||||
mocker: pytest_mock.MockerFixture,
|
||||
sample_pending_review: PendingHumanReviewModel,
|
||||
test_user_id: str,
|
||||
) -> None:
|
||||
"""Test successful review approval"""
|
||||
# Mock the route functions
|
||||
|
||||
mock_get_reviews_for_execution = mocker.patch(
|
||||
"backend.api.features.executions.review.routes.get_pending_reviews_for_execution"
|
||||
)
|
||||
mock_get_reviews_for_execution.return_value = [sample_pending_review]
|
||||
|
||||
mock_process_all_reviews = mocker.patch(
|
||||
"backend.api.features.executions.review.routes.process_all_reviews_for_execution"
|
||||
)
|
||||
# Create approved review for return
|
||||
approved_review = PendingHumanReviewModel(
|
||||
node_exec_id="test_node_123",
|
||||
user_id=test_user_id,
|
||||
graph_exec_id="test_graph_exec_456",
|
||||
graph_id="test_graph_789",
|
||||
graph_version=1,
|
||||
payload={"data": "modified payload", "value": 50},
|
||||
instructions="Please review this data",
|
||||
editable=True,
|
||||
status=ReviewStatus.APPROVED,
|
||||
review_message="Looks good",
|
||||
was_edited=True,
|
||||
processed=False,
|
||||
created_at=FIXED_NOW,
|
||||
updated_at=FIXED_NOW,
|
||||
reviewed_at=FIXED_NOW,
|
||||
)
|
||||
mock_process_all_reviews.return_value = {"test_node_123": approved_review}
|
||||
|
||||
mock_has_pending = mocker.patch(
|
||||
"backend.api.features.executions.review.routes.has_pending_reviews_for_graph_exec"
|
||||
)
|
||||
mock_has_pending.return_value = False
|
||||
|
||||
mocker.patch("backend.api.features.executions.review.routes.add_graph_execution")
|
||||
|
||||
request_data = {
|
||||
"reviews": [
|
||||
{
|
||||
"node_exec_id": "test_node_123",
|
||||
"approved": True,
|
||||
"message": "Looks good",
|
||||
"reviewed_data": {"data": "modified payload", "value": 50},
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
response = client.post("/api/review/action", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["approved_count"] == 1
|
||||
assert data["rejected_count"] == 0
|
||||
assert data["failed_count"] == 0
|
||||
assert data["error"] is None
|
||||
|
||||
|
||||
def test_process_review_action_reject_success(
|
||||
mocker: pytest_mock.MockerFixture,
|
||||
sample_pending_review: PendingHumanReviewModel,
|
||||
test_user_id: str,
|
||||
) -> None:
|
||||
"""Test successful review rejection"""
|
||||
# Mock the route functions
|
||||
|
||||
mock_get_reviews_for_execution = mocker.patch(
|
||||
"backend.api.features.executions.review.routes.get_pending_reviews_for_execution"
|
||||
)
|
||||
mock_get_reviews_for_execution.return_value = [sample_pending_review]
|
||||
|
||||
mock_process_all_reviews = mocker.patch(
|
||||
"backend.api.features.executions.review.routes.process_all_reviews_for_execution"
|
||||
)
|
||||
rejected_review = PendingHumanReviewModel(
|
||||
node_exec_id="test_node_123",
|
||||
user_id=test_user_id,
|
||||
graph_exec_id="test_graph_exec_456",
|
||||
graph_id="test_graph_789",
|
||||
graph_version=1,
|
||||
payload={"data": "test payload"},
|
||||
instructions="Please review",
|
||||
editable=True,
|
||||
status=ReviewStatus.REJECTED,
|
||||
review_message="Rejected by user",
|
||||
was_edited=False,
|
||||
processed=False,
|
||||
created_at=FIXED_NOW,
|
||||
updated_at=None,
|
||||
reviewed_at=FIXED_NOW,
|
||||
)
|
||||
mock_process_all_reviews.return_value = {"test_node_123": rejected_review}
|
||||
|
||||
mock_has_pending = mocker.patch(
|
||||
"backend.api.features.executions.review.routes.has_pending_reviews_for_graph_exec"
|
||||
)
|
||||
mock_has_pending.return_value = False
|
||||
|
||||
request_data = {
|
||||
"reviews": [
|
||||
{
|
||||
"node_exec_id": "test_node_123",
|
||||
"approved": False,
|
||||
"message": None,
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
response = client.post("/api/review/action", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["approved_count"] == 0
|
||||
assert data["rejected_count"] == 1
|
||||
assert data["failed_count"] == 0
|
||||
assert data["error"] is None
|
||||
|
||||
|
||||
def test_process_review_action_mixed_success(
|
||||
mocker: pytest_mock.MockerFixture,
|
||||
sample_pending_review: PendingHumanReviewModel,
|
||||
test_user_id: str,
|
||||
) -> None:
|
||||
"""Test mixed approve/reject operations"""
|
||||
# Create a second review
|
||||
second_review = PendingHumanReviewModel(
|
||||
node_exec_id="test_node_456",
|
||||
user_id=test_user_id,
|
||||
graph_exec_id="test_graph_exec_456",
|
||||
graph_id="test_graph_789",
|
||||
graph_version=1,
|
||||
payload={"data": "second payload"},
|
||||
instructions="Second review",
|
||||
editable=False,
|
||||
status=ReviewStatus.WAITING,
|
||||
review_message=None,
|
||||
was_edited=None,
|
||||
processed=False,
|
||||
created_at=FIXED_NOW,
|
||||
updated_at=None,
|
||||
reviewed_at=None,
|
||||
)
|
||||
|
||||
# Mock the route functions
|
||||
|
||||
mock_get_reviews_for_execution = mocker.patch(
|
||||
"backend.api.features.executions.review.routes.get_pending_reviews_for_execution"
|
||||
)
|
||||
mock_get_reviews_for_execution.return_value = [sample_pending_review, second_review]
|
||||
|
||||
mock_process_all_reviews = mocker.patch(
|
||||
"backend.api.features.executions.review.routes.process_all_reviews_for_execution"
|
||||
)
|
||||
# Create approved version of first review
|
||||
approved_review = PendingHumanReviewModel(
|
||||
node_exec_id="test_node_123",
|
||||
user_id=test_user_id,
|
||||
graph_exec_id="test_graph_exec_456",
|
||||
graph_id="test_graph_789",
|
||||
graph_version=1,
|
||||
payload={"data": "modified"},
|
||||
instructions="Please review",
|
||||
editable=True,
|
||||
status=ReviewStatus.APPROVED,
|
||||
review_message="Approved",
|
||||
was_edited=True,
|
||||
processed=False,
|
||||
created_at=FIXED_NOW,
|
||||
updated_at=None,
|
||||
reviewed_at=FIXED_NOW,
|
||||
)
|
||||
# Create rejected version of second review
|
||||
rejected_review = PendingHumanReviewModel(
|
||||
node_exec_id="test_node_456",
|
||||
user_id=test_user_id,
|
||||
graph_exec_id="test_graph_exec_456",
|
||||
graph_id="test_graph_789",
|
||||
graph_version=1,
|
||||
payload={"data": "second payload"},
|
||||
instructions="Second review",
|
||||
editable=False,
|
||||
status=ReviewStatus.REJECTED,
|
||||
review_message="Rejected by user",
|
||||
was_edited=False,
|
||||
processed=False,
|
||||
created_at=FIXED_NOW,
|
||||
updated_at=None,
|
||||
reviewed_at=FIXED_NOW,
|
||||
)
|
||||
mock_process_all_reviews.return_value = {
|
||||
"test_node_123": approved_review,
|
||||
"test_node_456": rejected_review,
|
||||
}
|
||||
|
||||
mock_has_pending = mocker.patch(
|
||||
"backend.api.features.executions.review.routes.has_pending_reviews_for_graph_exec"
|
||||
)
|
||||
mock_has_pending.return_value = False
|
||||
|
||||
request_data = {
|
||||
"reviews": [
|
||||
{
|
||||
"node_exec_id": "test_node_123",
|
||||
"approved": True,
|
||||
"message": "Approved",
|
||||
"reviewed_data": {"data": "modified"},
|
||||
},
|
||||
{
|
||||
"node_exec_id": "test_node_456",
|
||||
"approved": False,
|
||||
"message": None,
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
response = client.post("/api/review/action", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["approved_count"] == 1
|
||||
assert data["rejected_count"] == 1
|
||||
assert data["failed_count"] == 0
|
||||
assert data["error"] is None
|
||||
|
||||
|
||||
def test_process_review_action_empty_request(
|
||||
mocker: pytest_mock.MockerFixture,
|
||||
test_user_id: str,
|
||||
) -> None:
|
||||
"""Test error when no reviews provided"""
|
||||
request_data = {"reviews": []}
|
||||
|
||||
response = client.post("/api/review/action", json=request_data)
|
||||
|
||||
assert response.status_code == 422
|
||||
response_data = response.json()
|
||||
# Pydantic validation error format
|
||||
assert isinstance(response_data["detail"], list)
|
||||
assert len(response_data["detail"]) > 0
|
||||
assert "At least one review must be provided" in response_data["detail"][0]["msg"]
|
||||
|
||||
|
||||
def test_process_review_action_review_not_found(
|
||||
mocker: pytest_mock.MockerFixture,
|
||||
test_user_id: str,
|
||||
) -> None:
|
||||
"""Test error when review is not found"""
|
||||
# Mock the functions that extract graph execution ID from the request
|
||||
mock_get_reviews_for_execution = mocker.patch(
|
||||
"backend.api.features.executions.review.routes.get_pending_reviews_for_execution"
|
||||
)
|
||||
mock_get_reviews_for_execution.return_value = [] # No reviews found
|
||||
|
||||
# Mock process_all_reviews to simulate not finding reviews
|
||||
mock_process_all_reviews = mocker.patch(
|
||||
"backend.api.features.executions.review.routes.process_all_reviews_for_execution"
|
||||
)
|
||||
# This should raise a ValueError with "Reviews not found" message based on the data/human_review.py logic
|
||||
mock_process_all_reviews.side_effect = ValueError(
|
||||
"Reviews not found or access denied for IDs: nonexistent_node"
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"reviews": [
|
||||
{
|
||||
"node_exec_id": "nonexistent_node",
|
||||
"approved": True,
|
||||
"message": "Test",
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
response = client.post("/api/review/action", json=request_data)
|
||||
|
||||
assert response.status_code == 400
|
||||
assert "Reviews not found" in response.json()["detail"]
|
||||
|
||||
|
||||
def test_process_review_action_partial_failure(
|
||||
mocker: pytest_mock.MockerFixture,
|
||||
sample_pending_review: PendingHumanReviewModel,
|
||||
test_user_id: str,
|
||||
) -> None:
|
||||
"""Test handling of partial failures in review processing"""
|
||||
# Mock the route functions
|
||||
mock_get_reviews_for_execution = mocker.patch(
|
||||
"backend.api.features.executions.review.routes.get_pending_reviews_for_execution"
|
||||
)
|
||||
mock_get_reviews_for_execution.return_value = [sample_pending_review]
|
||||
|
||||
# Mock partial failure in processing
|
||||
mock_process_all_reviews = mocker.patch(
|
||||
"backend.api.features.executions.review.routes.process_all_reviews_for_execution"
|
||||
)
|
||||
mock_process_all_reviews.side_effect = ValueError("Some reviews failed validation")
|
||||
|
||||
request_data = {
|
||||
"reviews": [
|
||||
{
|
||||
"node_exec_id": "test_node_123",
|
||||
"approved": True,
|
||||
"message": "Test",
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
response = client.post("/api/review/action", json=request_data)
|
||||
|
||||
assert response.status_code == 400
|
||||
assert "Some reviews failed validation" in response.json()["detail"]
|
||||
|
||||
|
||||
def test_process_review_action_invalid_node_exec_id(
|
||||
mocker: pytest_mock.MockerFixture,
|
||||
sample_pending_review: PendingHumanReviewModel,
|
||||
test_user_id: str,
|
||||
) -> None:
|
||||
"""Test failure when trying to process review with invalid node execution ID"""
|
||||
# Mock the route functions
|
||||
mock_get_reviews_for_execution = mocker.patch(
|
||||
"backend.api.features.executions.review.routes.get_pending_reviews_for_execution"
|
||||
)
|
||||
mock_get_reviews_for_execution.return_value = [sample_pending_review]
|
||||
|
||||
# Mock validation failure - this should return 400, not 500
|
||||
mock_process_all_reviews = mocker.patch(
|
||||
"backend.api.features.executions.review.routes.process_all_reviews_for_execution"
|
||||
)
|
||||
mock_process_all_reviews.side_effect = ValueError(
|
||||
"Invalid node execution ID format"
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"reviews": [
|
||||
{
|
||||
"node_exec_id": "invalid-node-format",
|
||||
"approved": True,
|
||||
"message": "Test",
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
response = client.post("/api/review/action", json=request_data)
|
||||
|
||||
# Should be a 400 Bad Request, not 500 Internal Server Error
|
||||
assert response.status_code == 400
|
||||
assert "Invalid node execution ID format" in response.json()["detail"]
|
||||
@@ -0,0 +1,186 @@
|
||||
import logging
|
||||
from typing import List
|
||||
|
||||
import autogpt_libs.auth as autogpt_auth_lib
|
||||
from fastapi import APIRouter, HTTPException, Query, Security, status
|
||||
from prisma.enums import ReviewStatus
|
||||
|
||||
from backend.data.execution import get_graph_execution_meta
|
||||
from backend.data.human_review import (
|
||||
get_pending_reviews_for_execution,
|
||||
get_pending_reviews_for_user,
|
||||
has_pending_reviews_for_graph_exec,
|
||||
process_all_reviews_for_execution,
|
||||
)
|
||||
from backend.executor.utils import add_graph_execution
|
||||
|
||||
from .model import PendingHumanReviewModel, ReviewRequest, ReviewResponse
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
router = APIRouter(
|
||||
tags=["v2", "executions", "review"],
|
||||
dependencies=[Security(autogpt_auth_lib.requires_user)],
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/pending",
|
||||
summary="Get Pending Reviews",
|
||||
response_model=List[PendingHumanReviewModel],
|
||||
responses={
|
||||
200: {"description": "List of pending reviews"},
|
||||
500: {"description": "Server error", "content": {"application/json": {}}},
|
||||
},
|
||||
)
|
||||
async def list_pending_reviews(
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
page: int = Query(1, ge=1, description="Page number (1-indexed)"),
|
||||
page_size: int = Query(25, ge=1, le=100, description="Number of reviews per page"),
|
||||
) -> List[PendingHumanReviewModel]:
|
||||
"""Get all pending reviews for the current user.
|
||||
|
||||
Retrieves all reviews with status "WAITING" that belong to the authenticated user.
|
||||
Results are ordered by creation time (newest first).
|
||||
|
||||
Args:
|
||||
user_id: Authenticated user ID from security dependency
|
||||
|
||||
Returns:
|
||||
List of pending review objects with status converted to typed literals
|
||||
|
||||
Raises:
|
||||
HTTPException: If authentication fails or database error occurs
|
||||
|
||||
Note:
|
||||
Reviews with invalid status values are logged as warnings but excluded
|
||||
from results rather than failing the entire request.
|
||||
"""
|
||||
|
||||
return await get_pending_reviews_for_user(user_id, page, page_size)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/execution/{graph_exec_id}",
|
||||
summary="Get Pending Reviews for Execution",
|
||||
response_model=List[PendingHumanReviewModel],
|
||||
responses={
|
||||
200: {"description": "List of pending reviews for the execution"},
|
||||
404: {"description": "Graph execution not found"},
|
||||
500: {"description": "Server error", "content": {"application/json": {}}},
|
||||
},
|
||||
)
|
||||
async def list_pending_reviews_for_execution(
|
||||
graph_exec_id: str,
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
) -> List[PendingHumanReviewModel]:
|
||||
"""Get all pending reviews for a specific graph execution.
|
||||
|
||||
Retrieves all reviews with status "WAITING" for the specified graph execution
|
||||
that belong to the authenticated user. Results are ordered by creation time
|
||||
(oldest first) to preserve review order within the execution.
|
||||
|
||||
Args:
|
||||
graph_exec_id: ID of the graph execution to get reviews for
|
||||
user_id: Authenticated user ID from security dependency
|
||||
|
||||
Returns:
|
||||
List of pending review objects for the specified execution
|
||||
|
||||
Raises:
|
||||
HTTPException:
|
||||
- 404: If the graph execution doesn't exist or isn't owned by this user
|
||||
- 500: If authentication fails or database error occurs
|
||||
|
||||
Note:
|
||||
Only returns reviews owned by the authenticated user for security.
|
||||
Reviews with invalid status are excluded with warning logs.
|
||||
"""
|
||||
|
||||
# Verify user owns the graph execution before returning reviews
|
||||
graph_exec = await get_graph_execution_meta(
|
||||
user_id=user_id, execution_id=graph_exec_id
|
||||
)
|
||||
if not graph_exec:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Graph execution #{graph_exec_id} not found",
|
||||
)
|
||||
|
||||
return await get_pending_reviews_for_execution(graph_exec_id, user_id)
|
||||
|
||||
|
||||
@router.post("/action", response_model=ReviewResponse)
|
||||
async def process_review_action(
|
||||
request: ReviewRequest,
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
) -> ReviewResponse:
|
||||
"""Process reviews with approve or reject actions."""
|
||||
|
||||
# Collect all node exec IDs from the request
|
||||
all_request_node_ids = {review.node_exec_id for review in request.reviews}
|
||||
|
||||
if not all_request_node_ids:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="At least one review must be provided",
|
||||
)
|
||||
|
||||
# Build review decisions map
|
||||
review_decisions = {}
|
||||
for review in request.reviews:
|
||||
review_status = (
|
||||
ReviewStatus.APPROVED if review.approved else ReviewStatus.REJECTED
|
||||
)
|
||||
review_decisions[review.node_exec_id] = (
|
||||
review_status,
|
||||
review.reviewed_data,
|
||||
review.message,
|
||||
)
|
||||
|
||||
# Process all reviews
|
||||
updated_reviews = await process_all_reviews_for_execution(
|
||||
user_id=user_id,
|
||||
review_decisions=review_decisions,
|
||||
)
|
||||
|
||||
# Count results
|
||||
approved_count = sum(
|
||||
1
|
||||
for review in updated_reviews.values()
|
||||
if review.status == ReviewStatus.APPROVED
|
||||
)
|
||||
rejected_count = sum(
|
||||
1
|
||||
for review in updated_reviews.values()
|
||||
if review.status == ReviewStatus.REJECTED
|
||||
)
|
||||
|
||||
# Resume execution if we processed some reviews
|
||||
if updated_reviews:
|
||||
# Get graph execution ID from any processed review
|
||||
first_review = next(iter(updated_reviews.values()))
|
||||
graph_exec_id = first_review.graph_exec_id
|
||||
|
||||
# Check if any pending reviews remain for this execution
|
||||
still_has_pending = await has_pending_reviews_for_graph_exec(graph_exec_id)
|
||||
|
||||
if not still_has_pending:
|
||||
# Resume execution
|
||||
try:
|
||||
await add_graph_execution(
|
||||
graph_id=first_review.graph_id,
|
||||
user_id=user_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
)
|
||||
logger.info(f"Resumed execution {graph_exec_id}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to resume execution {graph_exec_id}: {str(e)}")
|
||||
|
||||
return ReviewResponse(
|
||||
approved_count=approved_count,
|
||||
rejected_count=rejected_count,
|
||||
failed_count=0,
|
||||
error=None,
|
||||
)
|
||||
@@ -1,7 +1,7 @@
|
||||
import asyncio
|
||||
import logging
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import TYPE_CHECKING, Annotated, Awaitable, List, Literal
|
||||
from typing import TYPE_CHECKING, Annotated, List, Literal
|
||||
|
||||
from autogpt_libs.auth import get_user_id
|
||||
from fastapi import (
|
||||
@@ -17,9 +17,12 @@ from fastapi import (
|
||||
from pydantic import BaseModel, Field, SecretStr
|
||||
from starlette.status import HTTP_500_INTERNAL_SERVER_ERROR, HTTP_502_BAD_GATEWAY
|
||||
|
||||
from backend.data.graph import get_graph, set_node_webhook
|
||||
from backend.api.features.library.db import set_preset_webhook, update_preset
|
||||
from backend.api.features.library.model import LibraryAgentPreset
|
||||
from backend.data.graph import NodeModel, get_graph, set_node_webhook
|
||||
from backend.data.integrations import (
|
||||
WebhookEvent,
|
||||
WebhookWithRelations,
|
||||
get_all_webhooks_by_creds,
|
||||
get_webhook,
|
||||
publish_webhook_event,
|
||||
@@ -32,7 +35,11 @@ from backend.data.model import (
|
||||
OAuth2Credentials,
|
||||
UserIntegrations,
|
||||
)
|
||||
from backend.data.onboarding import complete_webhook_trigger_step
|
||||
from backend.data.onboarding import (
|
||||
OnboardingStep,
|
||||
complete_onboarding_step,
|
||||
increment_runs,
|
||||
)
|
||||
from backend.data.user import get_user_integrations
|
||||
from backend.executor.utils import add_graph_execution
|
||||
from backend.integrations.ayrshare import AyrshareClient, SocialPlatform
|
||||
@@ -40,15 +47,16 @@ from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.integrations.oauth import CREDENTIALS_BY_PROVIDER, HANDLERS_BY_NAME
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.integrations.webhooks import get_webhook_manager
|
||||
from backend.server.integrations.models import (
|
||||
ProviderConstants,
|
||||
ProviderNamesResponse,
|
||||
get_all_provider_names,
|
||||
from backend.util.exceptions import (
|
||||
GraphNotInLibraryError,
|
||||
MissingConfigError,
|
||||
NeedConfirmation,
|
||||
NotFoundError,
|
||||
)
|
||||
from backend.server.v2.library.db import set_preset_webhook, update_preset
|
||||
from backend.util.exceptions import MissingConfigError, NeedConfirmation, NotFoundError
|
||||
from backend.util.settings import Settings
|
||||
|
||||
from .models import ProviderConstants, ProviderNamesResponse, get_all_provider_names
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.integrations.oauth import BaseOAuthHandler
|
||||
|
||||
@@ -369,65 +377,24 @@ async def webhook_ingress_generic(
|
||||
if not (webhook.triggered_nodes or webhook.triggered_presets):
|
||||
return
|
||||
|
||||
executions: list[Awaitable] = []
|
||||
await complete_webhook_trigger_step(user_id)
|
||||
await complete_onboarding_step(user_id, OnboardingStep.TRIGGER_WEBHOOK)
|
||||
await increment_runs(user_id)
|
||||
|
||||
for node in webhook.triggered_nodes:
|
||||
logger.debug(f"Webhook-attached node: {node}")
|
||||
if not node.is_triggered_by_event_type(event_type):
|
||||
logger.debug(f"Node #{node.id} doesn't trigger on event {event_type}")
|
||||
continue
|
||||
logger.debug(f"Executing graph #{node.graph_id} node #{node.id}")
|
||||
executions.append(
|
||||
add_graph_execution(
|
||||
user_id=webhook.user_id,
|
||||
graph_id=node.graph_id,
|
||||
graph_version=node.graph_version,
|
||||
nodes_input_masks={node.id: {"payload": payload}},
|
||||
)
|
||||
# Execute all triggers concurrently for better performance
|
||||
tasks = []
|
||||
tasks.extend(
|
||||
_execute_webhook_node_trigger(node, webhook, webhook_id, event_type, payload)
|
||||
for node in webhook.triggered_nodes
|
||||
)
|
||||
tasks.extend(
|
||||
_execute_webhook_preset_trigger(
|
||||
preset, webhook, webhook_id, event_type, payload
|
||||
)
|
||||
for preset in webhook.triggered_presets:
|
||||
logger.debug(f"Webhook-attached preset: {preset}")
|
||||
if not preset.is_active:
|
||||
logger.debug(f"Preset #{preset.id} is inactive")
|
||||
continue
|
||||
for preset in webhook.triggered_presets
|
||||
)
|
||||
|
||||
graph = await get_graph(preset.graph_id, preset.graph_version, webhook.user_id)
|
||||
if not graph:
|
||||
logger.error(
|
||||
f"User #{webhook.user_id} has preset #{preset.id} for graph "
|
||||
f"#{preset.graph_id} v{preset.graph_version}, "
|
||||
"but no access to the graph itself."
|
||||
)
|
||||
logger.info(f"Automatically deactivating broken preset #{preset.id}")
|
||||
await update_preset(preset.user_id, preset.id, is_active=False)
|
||||
continue
|
||||
if not (trigger_node := graph.webhook_input_node):
|
||||
# NOTE: this should NEVER happen, but we log and handle it gracefully
|
||||
logger.error(
|
||||
f"Preset #{preset.id} is triggered by webhook #{webhook.id}, but graph "
|
||||
f"#{preset.graph_id} v{preset.graph_version} has no webhook input node"
|
||||
)
|
||||
await set_preset_webhook(preset.user_id, preset.id, None)
|
||||
continue
|
||||
if not trigger_node.block.is_triggered_by_event_type(preset.inputs, event_type):
|
||||
logger.debug(f"Preset #{preset.id} doesn't trigger on event {event_type}")
|
||||
continue
|
||||
logger.debug(f"Executing preset #{preset.id} for webhook #{webhook.id}")
|
||||
|
||||
executions.append(
|
||||
add_graph_execution(
|
||||
user_id=webhook.user_id,
|
||||
graph_id=preset.graph_id,
|
||||
preset_id=preset.id,
|
||||
graph_version=preset.graph_version,
|
||||
graph_credentials_inputs=preset.credentials,
|
||||
nodes_input_masks={
|
||||
trigger_node.id: {**preset.inputs, "payload": payload}
|
||||
},
|
||||
)
|
||||
)
|
||||
asyncio.gather(*executions)
|
||||
if tasks:
|
||||
await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
|
||||
@router.post("/webhooks/{webhook_id}/ping")
|
||||
@@ -456,6 +423,105 @@ async def webhook_ping(
|
||||
return True
|
||||
|
||||
|
||||
async def _execute_webhook_node_trigger(
|
||||
node: NodeModel,
|
||||
webhook: WebhookWithRelations,
|
||||
webhook_id: str,
|
||||
event_type: str,
|
||||
payload: dict,
|
||||
) -> None:
|
||||
"""Execute a webhook-triggered node."""
|
||||
logger.debug(f"Webhook-attached node: {node}")
|
||||
if not node.is_triggered_by_event_type(event_type):
|
||||
logger.debug(f"Node #{node.id} doesn't trigger on event {event_type}")
|
||||
return
|
||||
logger.debug(f"Executing graph #{node.graph_id} node #{node.id}")
|
||||
try:
|
||||
await add_graph_execution(
|
||||
user_id=webhook.user_id,
|
||||
graph_id=node.graph_id,
|
||||
graph_version=node.graph_version,
|
||||
nodes_input_masks={node.id: {"payload": payload}},
|
||||
)
|
||||
except GraphNotInLibraryError as e:
|
||||
logger.warning(
|
||||
f"Webhook #{webhook_id} execution blocked for "
|
||||
f"deleted/archived graph #{node.graph_id} (node #{node.id}): {e}"
|
||||
)
|
||||
# Clean up orphaned webhook trigger for this graph
|
||||
await _cleanup_orphaned_webhook_for_graph(
|
||||
node.graph_id, webhook.user_id, webhook_id
|
||||
)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
f"Failed to execute graph #{node.graph_id} via webhook #{webhook_id}"
|
||||
)
|
||||
# Continue processing - webhook should be resilient to individual failures
|
||||
|
||||
|
||||
async def _execute_webhook_preset_trigger(
|
||||
preset: LibraryAgentPreset,
|
||||
webhook: WebhookWithRelations,
|
||||
webhook_id: str,
|
||||
event_type: str,
|
||||
payload: dict,
|
||||
) -> None:
|
||||
"""Execute a webhook-triggered preset."""
|
||||
logger.debug(f"Webhook-attached preset: {preset}")
|
||||
if not preset.is_active:
|
||||
logger.debug(f"Preset #{preset.id} is inactive")
|
||||
return
|
||||
|
||||
graph = await get_graph(
|
||||
preset.graph_id, preset.graph_version, user_id=webhook.user_id
|
||||
)
|
||||
if not graph:
|
||||
logger.error(
|
||||
f"User #{webhook.user_id} has preset #{preset.id} for graph "
|
||||
f"#{preset.graph_id} v{preset.graph_version}, "
|
||||
"but no access to the graph itself."
|
||||
)
|
||||
logger.info(f"Automatically deactivating broken preset #{preset.id}")
|
||||
await update_preset(preset.user_id, preset.id, is_active=False)
|
||||
return
|
||||
if not (trigger_node := graph.webhook_input_node):
|
||||
# NOTE: this should NEVER happen, but we log and handle it gracefully
|
||||
logger.error(
|
||||
f"Preset #{preset.id} is triggered by webhook #{webhook.id}, but graph "
|
||||
f"#{preset.graph_id} v{preset.graph_version} has no webhook input node"
|
||||
)
|
||||
await set_preset_webhook(preset.user_id, preset.id, None)
|
||||
return
|
||||
if not trigger_node.block.is_triggered_by_event_type(preset.inputs, event_type):
|
||||
logger.debug(f"Preset #{preset.id} doesn't trigger on event {event_type}")
|
||||
return
|
||||
logger.debug(f"Executing preset #{preset.id} for webhook #{webhook.id}")
|
||||
|
||||
try:
|
||||
await add_graph_execution(
|
||||
user_id=webhook.user_id,
|
||||
graph_id=preset.graph_id,
|
||||
preset_id=preset.id,
|
||||
graph_version=preset.graph_version,
|
||||
graph_credentials_inputs=preset.credentials,
|
||||
nodes_input_masks={trigger_node.id: {**preset.inputs, "payload": payload}},
|
||||
)
|
||||
except GraphNotInLibraryError as e:
|
||||
logger.warning(
|
||||
f"Webhook #{webhook_id} execution blocked for "
|
||||
f"deleted/archived graph #{preset.graph_id} (preset #{preset.id}): {e}"
|
||||
)
|
||||
# Clean up orphaned webhook trigger for this graph
|
||||
await _cleanup_orphaned_webhook_for_graph(
|
||||
preset.graph_id, webhook.user_id, webhook_id
|
||||
)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
f"Failed to execute preset #{preset.id} via webhook #{webhook_id}"
|
||||
)
|
||||
# Continue processing - webhook should be resilient to individual failures
|
||||
|
||||
|
||||
# --------------------------- UTILITIES ---------------------------- #
|
||||
|
||||
|
||||
@@ -496,6 +562,98 @@ async def remove_all_webhooks_for_credentials(
|
||||
logger.warning(f"Webhook #{webhook.id} failed to prune")
|
||||
|
||||
|
||||
async def _cleanup_orphaned_webhook_for_graph(
|
||||
graph_id: str, user_id: str, webhook_id: str
|
||||
) -> None:
|
||||
"""
|
||||
Clean up orphaned webhook connections for a specific graph when execution fails with GraphNotAccessibleError.
|
||||
This happens when an agent is pulled from the Marketplace or deleted
|
||||
but webhook triggers still exist.
|
||||
"""
|
||||
try:
|
||||
webhook = await get_webhook(webhook_id, include_relations=True)
|
||||
if not webhook or webhook.user_id != user_id:
|
||||
logger.warning(
|
||||
f"Webhook {webhook_id} not found or doesn't belong to user {user_id}"
|
||||
)
|
||||
return
|
||||
|
||||
nodes_removed = 0
|
||||
presets_removed = 0
|
||||
|
||||
# Remove triggered nodes that belong to the deleted graph
|
||||
for node in webhook.triggered_nodes:
|
||||
if node.graph_id == graph_id:
|
||||
try:
|
||||
await set_node_webhook(node.id, None)
|
||||
nodes_removed += 1
|
||||
logger.info(
|
||||
f"Removed orphaned webhook trigger from node {node.id} "
|
||||
f"in deleted/archived graph {graph_id}"
|
||||
)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
f"Failed to remove webhook trigger from node {node.id}"
|
||||
)
|
||||
|
||||
# Remove triggered presets that belong to the deleted graph
|
||||
for preset in webhook.triggered_presets:
|
||||
if preset.graph_id == graph_id:
|
||||
try:
|
||||
await set_preset_webhook(user_id, preset.id, None)
|
||||
presets_removed += 1
|
||||
logger.info(
|
||||
f"Removed orphaned webhook trigger from preset {preset.id} "
|
||||
f"for deleted/archived graph {graph_id}"
|
||||
)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
f"Failed to remove webhook trigger from preset {preset.id}"
|
||||
)
|
||||
|
||||
if nodes_removed > 0 or presets_removed > 0:
|
||||
logger.info(
|
||||
f"Cleaned up orphaned webhook #{webhook_id}: "
|
||||
f"removed {nodes_removed} nodes and {presets_removed} presets "
|
||||
f"for deleted/archived graph #{graph_id}"
|
||||
)
|
||||
|
||||
# Check if webhook has any remaining triggers, if not, prune it
|
||||
updated_webhook = await get_webhook(webhook_id, include_relations=True)
|
||||
if (
|
||||
not updated_webhook.triggered_nodes
|
||||
and not updated_webhook.triggered_presets
|
||||
):
|
||||
try:
|
||||
webhook_manager = get_webhook_manager(
|
||||
ProviderName(webhook.provider)
|
||||
)
|
||||
credentials = (
|
||||
await creds_manager.get(user_id, webhook.credentials_id)
|
||||
if webhook.credentials_id
|
||||
else None
|
||||
)
|
||||
success = await webhook_manager.prune_webhook_if_dangling(
|
||||
user_id, webhook.id, credentials
|
||||
)
|
||||
if success:
|
||||
logger.info(
|
||||
f"Pruned orphaned webhook #{webhook_id} "
|
||||
f"with no remaining triggers"
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"Failed to prune orphaned webhook #{webhook_id}"
|
||||
)
|
||||
except Exception:
|
||||
logger.exception(f"Failed to prune orphaned webhook #{webhook_id}")
|
||||
|
||||
except Exception:
|
||||
logger.exception(
|
||||
f"Failed to cleanup orphaned webhook #{webhook_id} for graph #{graph_id}"
|
||||
)
|
||||
|
||||
|
||||
def _get_provider_oauth_handler(
|
||||
req: Request, provider_name: ProviderName
|
||||
) -> "BaseOAuthHandler":
|
||||
@@ -4,27 +4,30 @@ from typing import Literal, Optional
|
||||
|
||||
import fastapi
|
||||
import prisma.errors
|
||||
import prisma.fields
|
||||
import prisma.models
|
||||
import prisma.types
|
||||
|
||||
import backend.api.features.store.exceptions as store_exceptions
|
||||
import backend.api.features.store.image_gen as store_image_gen
|
||||
import backend.api.features.store.media as store_media
|
||||
import backend.data.graph as graph_db
|
||||
import backend.server.v2.library.model as library_model
|
||||
import backend.server.v2.store.exceptions as store_exceptions
|
||||
import backend.server.v2.store.image_gen as store_image_gen
|
||||
import backend.server.v2.store.media as store_media
|
||||
import backend.data.integrations as integrations_db
|
||||
from backend.data.block import BlockInput
|
||||
from backend.data.db import transaction
|
||||
from backend.data.execution import get_graph_execution
|
||||
from backend.data.graph import GraphSettings
|
||||
from backend.data.includes import AGENT_PRESET_INCLUDE, library_agent_include
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.integrations.webhooks.graph_lifecycle_hooks import on_graph_activate
|
||||
from backend.util.clients import get_scheduler_client
|
||||
from backend.util.exceptions import DatabaseError, NotFoundError
|
||||
from backend.util.json import SafeJson
|
||||
from backend.util.models import Pagination
|
||||
from backend.util.settings import Config
|
||||
|
||||
from . import model as library_model
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
config = Config()
|
||||
integration_creds_manager = IntegrationCredentialsManager()
|
||||
@@ -260,6 +263,30 @@ async def get_library_agent(id: str, user_id: str) -> library_model.LibraryAgent
|
||||
if not library_agent:
|
||||
raise NotFoundError(f"Library agent #{id} not found")
|
||||
|
||||
# Fetch marketplace listing if the agent has been published
|
||||
store_listing = None
|
||||
profile = None
|
||||
if library_agent.AgentGraph:
|
||||
store_listing = await prisma.models.StoreListing.prisma().find_first(
|
||||
where={
|
||||
"agentGraphId": library_agent.AgentGraph.id,
|
||||
"isDeleted": False,
|
||||
"hasApprovedVersion": True,
|
||||
},
|
||||
include={
|
||||
"ActiveVersion": True,
|
||||
},
|
||||
)
|
||||
if (
|
||||
store_listing
|
||||
and store_listing.ActiveVersion
|
||||
and store_listing.owningUserId
|
||||
):
|
||||
# Fetch Profile separately since User doesn't have a direct Profile relation
|
||||
profile = await prisma.models.Profile.prisma().find_first(
|
||||
where={"userId": store_listing.owningUserId}
|
||||
)
|
||||
|
||||
return library_model.LibraryAgent.from_db(
|
||||
library_agent,
|
||||
sub_graphs=(
|
||||
@@ -267,6 +294,8 @@ async def get_library_agent(id: str, user_id: str) -> library_model.LibraryAgent
|
||||
if library_agent.AgentGraph
|
||||
else None
|
||||
),
|
||||
store_listing=store_listing,
|
||||
profile=profile,
|
||||
)
|
||||
|
||||
except prisma.errors.PrismaError as e:
|
||||
@@ -372,6 +401,24 @@ async def add_generated_agent_image(
|
||||
)
|
||||
|
||||
|
||||
def _initialize_graph_settings(graph: graph_db.GraphModel) -> GraphSettings:
|
||||
"""
|
||||
Initialize GraphSettings based on graph content.
|
||||
|
||||
Args:
|
||||
graph: The graph to analyze
|
||||
|
||||
Returns:
|
||||
GraphSettings with appropriate human_in_the_loop_safe_mode value
|
||||
"""
|
||||
if graph.has_human_in_the_loop:
|
||||
# Graph has HITL blocks - set safe mode to True by default
|
||||
return GraphSettings(human_in_the_loop_safe_mode=True)
|
||||
else:
|
||||
# Graph has no HITL blocks - keep None
|
||||
return GraphSettings(human_in_the_loop_safe_mode=None)
|
||||
|
||||
|
||||
async def create_library_agent(
|
||||
graph: graph_db.GraphModel,
|
||||
user_id: str,
|
||||
@@ -394,8 +441,7 @@ async def create_library_agent(
|
||||
DatabaseError: If there's an error during creation or if image generation fails.
|
||||
"""
|
||||
logger.info(
|
||||
f"Creating library agent for graph #{graph.id} v{graph.version}; "
|
||||
f"user #{user_id}"
|
||||
f"Creating library agent for graph #{graph.id} v{graph.version}; user:<redacted>"
|
||||
)
|
||||
graph_entries = (
|
||||
[graph, *graph.sub_graphs] if create_library_agents_for_sub_graphs else [graph]
|
||||
@@ -418,6 +464,9 @@ async def create_library_agent(
|
||||
}
|
||||
}
|
||||
},
|
||||
settings=SafeJson(
|
||||
_initialize_graph_settings(graph_entry).model_dump()
|
||||
),
|
||||
),
|
||||
include=library_agent_include(
|
||||
user_id, include_nodes=False, include_executions=False
|
||||
@@ -438,7 +487,7 @@ async def update_agent_version_in_library(
|
||||
user_id: str,
|
||||
agent_graph_id: str,
|
||||
agent_graph_version: int,
|
||||
) -> None:
|
||||
) -> library_model.LibraryAgent:
|
||||
"""
|
||||
Updates the agent version in the library if useGraphIsActiveVersion is True.
|
||||
|
||||
@@ -462,7 +511,7 @@ async def update_agent_version_in_library(
|
||||
"useGraphIsActiveVersion": True,
|
||||
},
|
||||
)
|
||||
await prisma.models.LibraryAgent.prisma().update(
|
||||
lib = await prisma.models.LibraryAgent.prisma().update(
|
||||
where={"id": library_agent.id},
|
||||
data={
|
||||
"AgentGraph": {
|
||||
@@ -474,7 +523,12 @@ async def update_agent_version_in_library(
|
||||
},
|
||||
},
|
||||
},
|
||||
include={"AgentGraph": True},
|
||||
)
|
||||
if lib is None:
|
||||
raise NotFoundError(f"Library agent {library_agent.id} not found")
|
||||
|
||||
return library_model.LibraryAgent.from_db(lib)
|
||||
except prisma.errors.PrismaError as e:
|
||||
logger.error(f"Database error updating agent version in library: {e}")
|
||||
raise DatabaseError("Failed to update agent version in library") from e
|
||||
@@ -484,9 +538,11 @@ async def update_library_agent(
|
||||
library_agent_id: str,
|
||||
user_id: str,
|
||||
auto_update_version: Optional[bool] = None,
|
||||
graph_version: Optional[int] = None,
|
||||
is_favorite: Optional[bool] = None,
|
||||
is_archived: Optional[bool] = None,
|
||||
is_deleted: Optional[Literal[False]] = None,
|
||||
settings: Optional[GraphSettings] = None,
|
||||
) -> library_model.LibraryAgent:
|
||||
"""
|
||||
Updates the specified LibraryAgent record.
|
||||
@@ -495,8 +551,10 @@ async def update_library_agent(
|
||||
library_agent_id: The ID of the LibraryAgent to update.
|
||||
user_id: The owner of this LibraryAgent.
|
||||
auto_update_version: Whether the agent should auto-update to active version.
|
||||
graph_version: Specific graph version to update to.
|
||||
is_favorite: Whether this agent is marked as a favorite.
|
||||
is_archived: Whether this agent is archived.
|
||||
settings: User-specific settings for this library agent.
|
||||
|
||||
Returns:
|
||||
The updated LibraryAgent.
|
||||
@@ -507,8 +565,8 @@ async def update_library_agent(
|
||||
"""
|
||||
logger.debug(
|
||||
f"Updating library agent {library_agent_id} for user {user_id} with "
|
||||
f"auto_update_version={auto_update_version}, is_favorite={is_favorite}, "
|
||||
f"is_archived={is_archived}"
|
||||
f"auto_update_version={auto_update_version}, graph_version={graph_version}, "
|
||||
f"is_favorite={is_favorite}, is_archived={is_archived}, settings={settings}"
|
||||
)
|
||||
update_fields: prisma.types.LibraryAgentUpdateManyMutationInput = {}
|
||||
if auto_update_version is not None:
|
||||
@@ -523,10 +581,25 @@ async def update_library_agent(
|
||||
"Use delete_library_agent() to (soft-)delete library agents"
|
||||
)
|
||||
update_fields["isDeleted"] = is_deleted
|
||||
if not update_fields:
|
||||
raise ValueError("No values were passed to update")
|
||||
if settings is not None:
|
||||
update_fields["settings"] = SafeJson(settings.model_dump())
|
||||
|
||||
try:
|
||||
# If graph_version is provided, update to that specific version
|
||||
if graph_version is not None:
|
||||
# Get the current agent to find its graph_id
|
||||
agent = await get_library_agent(id=library_agent_id, user_id=user_id)
|
||||
# Update to the specified version using existing function
|
||||
return await update_agent_version_in_library(
|
||||
user_id=user_id,
|
||||
agent_graph_id=agent.graph_id,
|
||||
agent_graph_version=graph_version,
|
||||
)
|
||||
|
||||
# Otherwise, just update the simple fields
|
||||
if not update_fields:
|
||||
raise ValueError("No values were passed to update")
|
||||
|
||||
n_updated = await prisma.models.LibraryAgent.prisma().update_many(
|
||||
where={"id": library_agent_id, "userId": user_id},
|
||||
data=update_fields,
|
||||
@@ -543,21 +616,118 @@ async def update_library_agent(
|
||||
raise DatabaseError("Failed to update library agent") from e
|
||||
|
||||
|
||||
async def update_library_agent_settings(
|
||||
user_id: str,
|
||||
agent_id: str,
|
||||
settings: GraphSettings,
|
||||
) -> library_model.LibraryAgent:
|
||||
"""
|
||||
Updates the settings for a specific LibraryAgent.
|
||||
|
||||
Args:
|
||||
user_id: The owner of the LibraryAgent.
|
||||
agent_id: The ID of the LibraryAgent to update.
|
||||
settings: New GraphSettings to apply.
|
||||
|
||||
Returns:
|
||||
The updated LibraryAgent.
|
||||
|
||||
Raises:
|
||||
NotFoundError: If the specified LibraryAgent does not exist.
|
||||
DatabaseError: If there's an error in the update operation.
|
||||
"""
|
||||
return await update_library_agent(
|
||||
library_agent_id=agent_id,
|
||||
user_id=user_id,
|
||||
settings=settings,
|
||||
)
|
||||
|
||||
|
||||
async def delete_library_agent(
|
||||
library_agent_id: str, user_id: str, soft_delete: bool = True
|
||||
) -> None:
|
||||
# First get the agent to find the graph_id for cleanup
|
||||
library_agent = await prisma.models.LibraryAgent.prisma().find_unique(
|
||||
where={"id": library_agent_id}, include={"AgentGraph": True}
|
||||
)
|
||||
|
||||
if not library_agent or library_agent.userId != user_id:
|
||||
raise NotFoundError(f"Library agent #{library_agent_id} not found")
|
||||
|
||||
graph_id = library_agent.agentGraphId
|
||||
|
||||
# Clean up associated schedules and webhooks BEFORE deleting the agent
|
||||
# This prevents executions from starting after agent deletion
|
||||
await _cleanup_schedules_for_graph(graph_id=graph_id, user_id=user_id)
|
||||
await _cleanup_webhooks_for_graph(graph_id=graph_id, user_id=user_id)
|
||||
|
||||
# Delete the library agent after cleanup
|
||||
if soft_delete:
|
||||
deleted_count = await prisma.models.LibraryAgent.prisma().update_many(
|
||||
where={"id": library_agent_id, "userId": user_id}, data={"isDeleted": True}
|
||||
where={"id": library_agent_id, "userId": user_id},
|
||||
data={"isDeleted": True},
|
||||
)
|
||||
else:
|
||||
deleted_count = await prisma.models.LibraryAgent.prisma().delete_many(
|
||||
where={"id": library_agent_id, "userId": user_id}
|
||||
)
|
||||
|
||||
if deleted_count < 1:
|
||||
raise NotFoundError(f"Library agent #{library_agent_id} not found")
|
||||
|
||||
|
||||
async def _cleanup_schedules_for_graph(graph_id: str, user_id: str) -> None:
|
||||
"""
|
||||
Clean up all schedules for a specific graph and user.
|
||||
|
||||
Args:
|
||||
graph_id: The ID of the graph
|
||||
user_id: The ID of the user
|
||||
"""
|
||||
scheduler_client = get_scheduler_client()
|
||||
schedules = await scheduler_client.get_execution_schedules(
|
||||
graph_id=graph_id, user_id=user_id
|
||||
)
|
||||
|
||||
for schedule in schedules:
|
||||
try:
|
||||
await scheduler_client.delete_schedule(
|
||||
schedule_id=schedule.id, user_id=user_id
|
||||
)
|
||||
logger.info(f"Deleted schedule {schedule.id} for graph {graph_id}")
|
||||
except Exception:
|
||||
logger.exception(
|
||||
f"Failed to delete schedule {schedule.id} for graph {graph_id}"
|
||||
)
|
||||
|
||||
|
||||
async def _cleanup_webhooks_for_graph(graph_id: str, user_id: str) -> None:
|
||||
"""
|
||||
Clean up webhook connections for a specific graph and user.
|
||||
Unlinks webhooks from this graph and deletes them if no other triggers remain.
|
||||
|
||||
Args:
|
||||
graph_id: The ID of the graph
|
||||
user_id: The ID of the user
|
||||
"""
|
||||
# Find all webhooks that trigger nodes in this graph
|
||||
webhooks = await integrations_db.find_webhooks_by_graph_id(
|
||||
graph_id=graph_id, user_id=user_id
|
||||
)
|
||||
|
||||
for webhook in webhooks:
|
||||
try:
|
||||
# Unlink webhook from this graph's nodes and presets
|
||||
await integrations_db.unlink_webhook_from_graph(
|
||||
webhook_id=webhook.id, graph_id=graph_id, user_id=user_id
|
||||
)
|
||||
logger.info(f"Unlinked webhook {webhook.id} from graph {graph_id}")
|
||||
except Exception:
|
||||
logger.exception(
|
||||
f"Failed to unlink webhook {webhook.id} from graph {graph_id}"
|
||||
)
|
||||
|
||||
|
||||
async def delete_library_agent_by_graph_id(graph_id: str, user_id: str) -> None:
|
||||
"""
|
||||
Deletes a library agent for the given user
|
||||
@@ -609,6 +779,18 @@ async def add_store_agent_to_library(
|
||||
|
||||
graph = store_listing_version.AgentGraph
|
||||
|
||||
# Convert to GraphModel to check for HITL blocks
|
||||
graph_model = await graph_db.get_graph(
|
||||
graph_id=graph.id,
|
||||
version=graph.version,
|
||||
user_id=user_id,
|
||||
include_subgraphs=False,
|
||||
)
|
||||
if not graph_model:
|
||||
raise store_exceptions.AgentNotFoundError(
|
||||
f"Graph #{graph.id} v{graph.version} not found or accessible"
|
||||
)
|
||||
|
||||
# Check if user already has this agent
|
||||
existing_library_agent = await prisma.models.LibraryAgent.prisma().find_unique(
|
||||
where={
|
||||
@@ -643,6 +825,9 @@ async def add_store_agent_to_library(
|
||||
}
|
||||
},
|
||||
"isCreatedByUser": False,
|
||||
"settings": SafeJson(
|
||||
_initialize_graph_settings(graph_model).model_dump()
|
||||
),
|
||||
},
|
||||
include=library_agent_include(
|
||||
user_id, include_nodes=False, include_executions=False
|
||||
@@ -1,16 +1,15 @@
|
||||
from datetime import datetime
|
||||
|
||||
import prisma.enums
|
||||
import prisma.errors
|
||||
import prisma.models
|
||||
import prisma.types
|
||||
import pytest
|
||||
|
||||
import backend.server.v2.library.db as db
|
||||
import backend.server.v2.store.exceptions
|
||||
import backend.api.features.store.exceptions
|
||||
from backend.data.db import connect
|
||||
from backend.data.includes import library_agent_include
|
||||
|
||||
from . import db
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_library_agents(mocker):
|
||||
@@ -32,6 +31,7 @@ async def test_get_library_agents(mocker):
|
||||
id="ua1",
|
||||
userId="test-user",
|
||||
agentGraphId="agent2",
|
||||
settings="{}", # type: ignore
|
||||
agentGraphVersion=1,
|
||||
isCreatedByUser=False,
|
||||
isDeleted=False,
|
||||
@@ -87,7 +87,7 @@ async def test_add_agent_to_library(mocker):
|
||||
await connect()
|
||||
|
||||
# Mock the transaction context
|
||||
mock_transaction = mocker.patch("backend.server.v2.library.db.transaction")
|
||||
mock_transaction = mocker.patch("backend.api.features.library.db.transaction")
|
||||
mock_transaction.return_value.__aenter__ = mocker.AsyncMock(return_value=None)
|
||||
mock_transaction.return_value.__aexit__ = mocker.AsyncMock(return_value=None)
|
||||
# Mock data
|
||||
@@ -123,6 +123,7 @@ async def test_add_agent_to_library(mocker):
|
||||
id="ua1",
|
||||
userId="test-user",
|
||||
agentGraphId=mock_store_listing_data.agentGraphId,
|
||||
settings="{}", # type: ignore
|
||||
agentGraphVersion=1,
|
||||
isCreatedByUser=False,
|
||||
isDeleted=False,
|
||||
@@ -148,8 +149,18 @@ async def test_add_agent_to_library(mocker):
|
||||
return_value=mock_library_agent_data
|
||||
)
|
||||
|
||||
# Mock graph_db.get_graph function that's called to check for HITL blocks
|
||||
mock_graph_db = mocker.patch("backend.api.features.library.db.graph_db")
|
||||
mock_graph_model = mocker.Mock()
|
||||
mock_graph_model.nodes = (
|
||||
[]
|
||||
) # Empty list so _has_human_in_the_loop_blocks returns False
|
||||
mock_graph_db.get_graph = mocker.AsyncMock(return_value=mock_graph_model)
|
||||
|
||||
# Mock the model conversion
|
||||
mock_from_db = mocker.patch("backend.server.v2.library.model.LibraryAgent.from_db")
|
||||
mock_from_db = mocker.patch(
|
||||
"backend.api.features.library.model.LibraryAgent.from_db"
|
||||
)
|
||||
mock_from_db.return_value = mocker.Mock()
|
||||
|
||||
# Call function
|
||||
@@ -169,17 +180,29 @@ async def test_add_agent_to_library(mocker):
|
||||
},
|
||||
include={"AgentGraph": True},
|
||||
)
|
||||
mock_library_agent.return_value.create.assert_called_once_with(
|
||||
data={
|
||||
"User": {"connect": {"id": "test-user"}},
|
||||
"AgentGraph": {
|
||||
"connect": {"graphVersionId": {"id": "agent1", "version": 1}}
|
||||
},
|
||||
"isCreatedByUser": False,
|
||||
},
|
||||
include=library_agent_include(
|
||||
"test-user", include_nodes=False, include_executions=False
|
||||
),
|
||||
# Check that create was called with the expected data including settings
|
||||
create_call_args = mock_library_agent.return_value.create.call_args
|
||||
assert create_call_args is not None
|
||||
|
||||
# Verify the main structure
|
||||
expected_data = {
|
||||
"User": {"connect": {"id": "test-user"}},
|
||||
"AgentGraph": {"connect": {"graphVersionId": {"id": "agent1", "version": 1}}},
|
||||
"isCreatedByUser": False,
|
||||
}
|
||||
|
||||
actual_data = create_call_args[1]["data"]
|
||||
# Check that all expected fields are present
|
||||
for key, value in expected_data.items():
|
||||
assert actual_data[key] == value
|
||||
|
||||
# Check that settings field is present and is a SafeJson object
|
||||
assert "settings" in actual_data
|
||||
assert hasattr(actual_data["settings"], "__class__") # Should be a SafeJson object
|
||||
|
||||
# Check include parameter
|
||||
assert create_call_args[1]["include"] == library_agent_include(
|
||||
"test-user", include_nodes=False, include_executions=False
|
||||
)
|
||||
|
||||
|
||||
@@ -195,7 +218,7 @@ async def test_add_agent_to_library_not_found(mocker):
|
||||
)
|
||||
|
||||
# Call function and verify exception
|
||||
with pytest.raises(backend.server.v2.store.exceptions.AgentNotFoundError):
|
||||
with pytest.raises(backend.api.features.store.exceptions.AgentNotFoundError):
|
||||
await db.add_store_agent_to_library("version123", "test-user")
|
||||
|
||||
# Verify mock called correctly
|
||||
@@ -6,8 +6,8 @@ import prisma.enums
|
||||
import prisma.models
|
||||
import pydantic
|
||||
|
||||
import backend.data.block as block_model
|
||||
import backend.data.graph as graph_model
|
||||
from backend.data.block import BlockInput
|
||||
from backend.data.graph import GraphModel, GraphSettings, GraphTriggerInfo
|
||||
from backend.data.model import CredentialsMetaInput, is_credentials_field_name
|
||||
from backend.util.models import Pagination
|
||||
|
||||
@@ -22,6 +22,23 @@ class LibraryAgentStatus(str, Enum):
|
||||
ERROR = "ERROR" # Agent is in an error state
|
||||
|
||||
|
||||
class MarketplaceListingCreator(pydantic.BaseModel):
|
||||
"""Creator information for a marketplace listing."""
|
||||
|
||||
name: str
|
||||
id: str
|
||||
slug: str
|
||||
|
||||
|
||||
class MarketplaceListing(pydantic.BaseModel):
|
||||
"""Marketplace listing information for a library agent."""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
slug: str
|
||||
creator: MarketplaceListingCreator
|
||||
|
||||
|
||||
class LibraryAgent(pydantic.BaseModel):
|
||||
"""
|
||||
Represents an agent in the library, including metadata for display and
|
||||
@@ -39,6 +56,7 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
|
||||
status: LibraryAgentStatus
|
||||
|
||||
created_at: datetime.datetime
|
||||
updated_at: datetime.datetime
|
||||
|
||||
name: str
|
||||
@@ -54,7 +72,7 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
has_external_trigger: bool = pydantic.Field(
|
||||
description="Whether the agent has an external trigger (e.g. webhook) node"
|
||||
)
|
||||
trigger_setup_info: Optional[graph_model.GraphTriggerInfo] = None
|
||||
trigger_setup_info: Optional[GraphTriggerInfo] = None
|
||||
|
||||
# Indicates whether there's a new output (based on recent runs)
|
||||
new_output: bool
|
||||
@@ -71,10 +89,18 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
# Recommended schedule cron (from marketplace agents)
|
||||
recommended_schedule_cron: str | None = None
|
||||
|
||||
# User-specific settings for this library agent
|
||||
settings: GraphSettings = pydantic.Field(default_factory=GraphSettings)
|
||||
|
||||
# Marketplace listing information if the agent has been published
|
||||
marketplace_listing: Optional["MarketplaceListing"] = None
|
||||
|
||||
@staticmethod
|
||||
def from_db(
|
||||
agent: prisma.models.LibraryAgent,
|
||||
sub_graphs: Optional[list[prisma.models.AgentGraph]] = None,
|
||||
store_listing: Optional[prisma.models.StoreListing] = None,
|
||||
profile: Optional[prisma.models.Profile] = None,
|
||||
) -> "LibraryAgent":
|
||||
"""
|
||||
Factory method that constructs a LibraryAgent from a Prisma LibraryAgent
|
||||
@@ -83,7 +109,9 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
if not agent.AgentGraph:
|
||||
raise ValueError("Associated Agent record is required.")
|
||||
|
||||
graph = graph_model.GraphModel.from_db(agent.AgentGraph, sub_graphs=sub_graphs)
|
||||
graph = GraphModel.from_db(agent.AgentGraph, sub_graphs=sub_graphs)
|
||||
|
||||
created_at = agent.createdAt
|
||||
|
||||
agent_updated_at = agent.AgentGraph.updatedAt
|
||||
lib_agent_updated_at = agent.updatedAt
|
||||
@@ -116,6 +144,21 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
# Hard-coded to True until a method to check is implemented
|
||||
is_latest_version = True
|
||||
|
||||
# Build marketplace_listing if available
|
||||
marketplace_listing_data = None
|
||||
if store_listing and store_listing.ActiveVersion and profile:
|
||||
creator_data = MarketplaceListingCreator(
|
||||
name=profile.name,
|
||||
id=profile.id,
|
||||
slug=profile.username,
|
||||
)
|
||||
marketplace_listing_data = MarketplaceListing(
|
||||
id=store_listing.id,
|
||||
name=store_listing.ActiveVersion.name,
|
||||
slug=store_listing.slug,
|
||||
creator=creator_data,
|
||||
)
|
||||
|
||||
return LibraryAgent(
|
||||
id=agent.id,
|
||||
graph_id=agent.agentGraphId,
|
||||
@@ -124,6 +167,7 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
creator_name=creator_name,
|
||||
creator_image_url=creator_image_url,
|
||||
status=status,
|
||||
created_at=created_at,
|
||||
updated_at=updated_at,
|
||||
name=graph.name,
|
||||
description=graph.description,
|
||||
@@ -140,6 +184,8 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
is_latest_version=is_latest_version,
|
||||
is_favorite=agent.isFavorite,
|
||||
recommended_schedule_cron=agent.AgentGraph.recommendedScheduleCron,
|
||||
settings=GraphSettings.model_validate(agent.settings),
|
||||
marketplace_listing=marketplace_listing_data,
|
||||
)
|
||||
|
||||
|
||||
@@ -207,7 +253,7 @@ class LibraryAgentPresetCreatable(pydantic.BaseModel):
|
||||
graph_id: str
|
||||
graph_version: int
|
||||
|
||||
inputs: block_model.BlockInput
|
||||
inputs: BlockInput
|
||||
credentials: dict[str, CredentialsMetaInput]
|
||||
|
||||
name: str
|
||||
@@ -236,7 +282,7 @@ class LibraryAgentPresetUpdatable(pydantic.BaseModel):
|
||||
Request model used when updating a preset for a library agent.
|
||||
"""
|
||||
|
||||
inputs: Optional[block_model.BlockInput] = None
|
||||
inputs: Optional[BlockInput] = None
|
||||
credentials: Optional[dict[str, CredentialsMetaInput]] = None
|
||||
|
||||
name: Optional[str] = None
|
||||
@@ -279,7 +325,7 @@ class LibraryAgentPreset(LibraryAgentPresetCreatable):
|
||||
"Webhook must be included in AgentPreset query when webhookId is set"
|
||||
)
|
||||
|
||||
input_data: block_model.BlockInput = {}
|
||||
input_data: BlockInput = {}
|
||||
input_credentials: dict[str, CredentialsMetaInput] = {}
|
||||
|
||||
for preset_input in preset.InputPresets:
|
||||
@@ -339,9 +385,15 @@ class LibraryAgentUpdateRequest(pydantic.BaseModel):
|
||||
auto_update_version: Optional[bool] = pydantic.Field(
|
||||
default=None, description="Auto-update the agent version"
|
||||
)
|
||||
graph_version: Optional[int] = pydantic.Field(
|
||||
default=None, description="Specific graph version to update to"
|
||||
)
|
||||
is_favorite: Optional[bool] = pydantic.Field(
|
||||
default=None, description="Mark the agent as a favorite"
|
||||
)
|
||||
is_archived: Optional[bool] = pydantic.Field(
|
||||
default=None, description="Archive the agent"
|
||||
)
|
||||
settings: Optional[GraphSettings] = pydantic.Field(
|
||||
default=None, description="User-specific settings for this library agent"
|
||||
)
|
||||
@@ -3,7 +3,7 @@ import datetime
|
||||
import prisma.models
|
||||
import pytest
|
||||
|
||||
import backend.server.v2.library.model as library_model
|
||||
from . import model as library_model
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -1,15 +1,18 @@
|
||||
import logging
|
||||
from typing import Optional
|
||||
from typing import Literal, Optional
|
||||
|
||||
import autogpt_libs.auth as autogpt_auth_lib
|
||||
from fastapi import APIRouter, Body, HTTPException, Query, Security, status
|
||||
from fastapi.responses import Response
|
||||
from prisma.enums import OnboardingStep
|
||||
|
||||
import backend.server.v2.library.db as library_db
|
||||
import backend.server.v2.library.model as library_model
|
||||
import backend.server.v2.store.exceptions as store_exceptions
|
||||
import backend.api.features.store.exceptions as store_exceptions
|
||||
from backend.data.onboarding import complete_onboarding_step
|
||||
from backend.util.exceptions import DatabaseError, NotFoundError
|
||||
|
||||
from .. import db as library_db
|
||||
from .. import model as library_model
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(
|
||||
@@ -22,7 +25,9 @@ router = APIRouter(
|
||||
@router.get(
|
||||
"",
|
||||
summary="List Library Agents",
|
||||
response_model=library_model.LibraryAgentResponse,
|
||||
responses={
|
||||
200: {"description": "List of library agents"},
|
||||
500: {"description": "Server error", "content": {"application/json": {}}},
|
||||
},
|
||||
)
|
||||
@@ -155,7 +160,12 @@ async def get_library_agent_by_graph_id(
|
||||
@router.get(
|
||||
"/marketplace/{store_listing_version_id}",
|
||||
summary="Get Agent By Store ID",
|
||||
tags=["store, library"],
|
||||
tags=["store", "library"],
|
||||
response_model=library_model.LibraryAgent | None,
|
||||
responses={
|
||||
200: {"description": "Library agent found"},
|
||||
404: {"description": "Agent not found"},
|
||||
},
|
||||
)
|
||||
async def get_library_agent_by_store_listing_version_id(
|
||||
store_listing_version_id: str,
|
||||
@@ -193,6 +203,9 @@ async def get_library_agent_by_store_listing_version_id(
|
||||
)
|
||||
async def add_marketplace_agent_to_library(
|
||||
store_listing_version_id: str = Body(embed=True),
|
||||
source: Literal["onboarding", "marketplace"] = Body(
|
||||
default="marketplace", embed=True
|
||||
),
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
) -> library_model.LibraryAgent:
|
||||
"""
|
||||
@@ -210,10 +223,15 @@ async def add_marketplace_agent_to_library(
|
||||
HTTPException(500): If a server/database error occurs.
|
||||
"""
|
||||
try:
|
||||
return await library_db.add_store_agent_to_library(
|
||||
agent = await library_db.add_store_agent_to_library(
|
||||
store_listing_version_id=store_listing_version_id,
|
||||
user_id=user_id,
|
||||
)
|
||||
if source != "onboarding":
|
||||
await complete_onboarding_step(
|
||||
user_id, OnboardingStep.MARKETPLACE_ADD_AGENT
|
||||
)
|
||||
return agent
|
||||
|
||||
except store_exceptions.AgentNotFoundError as e:
|
||||
logger.warning(
|
||||
@@ -267,8 +285,10 @@ async def update_library_agent(
|
||||
library_agent_id=library_agent_id,
|
||||
user_id=user_id,
|
||||
auto_update_version=payload.auto_update_version,
|
||||
graph_version=payload.graph_version,
|
||||
is_favorite=payload.is_favorite,
|
||||
is_archived=payload.is_archived,
|
||||
settings=payload.settings,
|
||||
)
|
||||
except NotFoundError as e:
|
||||
raise HTTPException(
|
||||
@@ -4,18 +4,20 @@ from typing import Any, Optional
|
||||
import autogpt_libs.auth as autogpt_auth_lib
|
||||
from fastapi import APIRouter, Body, HTTPException, Query, Security, status
|
||||
|
||||
import backend.server.v2.library.db as db
|
||||
import backend.server.v2.library.model as models
|
||||
from backend.data.execution import GraphExecutionMeta
|
||||
from backend.data.graph import get_graph
|
||||
from backend.data.integrations import get_webhook
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.data.onboarding import increment_runs
|
||||
from backend.executor.utils import add_graph_execution, make_node_credentials_input_map
|
||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.integrations.webhooks import get_webhook_manager
|
||||
from backend.integrations.webhooks.utils import setup_webhook_for_block
|
||||
from backend.util.exceptions import NotFoundError
|
||||
|
||||
from .. import db
|
||||
from .. import model as models
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
credentials_manager = IntegrationCredentialsManager()
|
||||
@@ -401,6 +403,8 @@ async def execute_preset(
|
||||
merged_node_input = preset.inputs | inputs
|
||||
merged_credential_inputs = preset.credentials | credential_inputs
|
||||
|
||||
await increment_runs(user_id)
|
||||
|
||||
return await add_graph_execution(
|
||||
user_id=user_id,
|
||||
graph_id=preset.graph_id,
|
||||
@@ -1,15 +1,17 @@
|
||||
import datetime
|
||||
import json
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
import fastapi.testclient
|
||||
import pytest
|
||||
import pytest_mock
|
||||
from pytest_snapshot.plugin import Snapshot
|
||||
|
||||
import backend.server.v2.library.model as library_model
|
||||
from backend.server.v2.library.routes import router as library_router
|
||||
from backend.util.models import Pagination
|
||||
|
||||
from . import model as library_model
|
||||
from .routes import router as library_router
|
||||
|
||||
app = fastapi.FastAPI()
|
||||
app.include_router(library_router)
|
||||
|
||||
@@ -55,6 +57,7 @@ async def test_get_library_agents_success(
|
||||
can_access_graph=True,
|
||||
is_latest_version=True,
|
||||
is_favorite=False,
|
||||
created_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
|
||||
updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
|
||||
),
|
||||
library_model.LibraryAgent(
|
||||
@@ -76,6 +79,7 @@ async def test_get_library_agents_success(
|
||||
can_access_graph=False,
|
||||
is_latest_version=True,
|
||||
is_favorite=False,
|
||||
created_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
|
||||
updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
|
||||
),
|
||||
],
|
||||
@@ -83,7 +87,7 @@ async def test_get_library_agents_success(
|
||||
total_items=2, total_pages=1, current_page=1, page_size=50
|
||||
),
|
||||
)
|
||||
mock_db_call = mocker.patch("backend.server.v2.library.db.list_library_agents")
|
||||
mock_db_call = mocker.patch("backend.api.features.library.db.list_library_agents")
|
||||
mock_db_call.return_value = mocked_value
|
||||
|
||||
response = client.get("/agents?search_term=test")
|
||||
@@ -109,7 +113,7 @@ async def test_get_library_agents_success(
|
||||
|
||||
|
||||
def test_get_library_agents_error(mocker: pytest_mock.MockFixture, test_user_id: str):
|
||||
mock_db_call = mocker.patch("backend.server.v2.library.db.list_library_agents")
|
||||
mock_db_call = mocker.patch("backend.api.features.library.db.list_library_agents")
|
||||
mock_db_call.side_effect = Exception("Test error")
|
||||
|
||||
response = client.get("/agents?search_term=test")
|
||||
@@ -149,6 +153,7 @@ async def test_get_favorite_library_agents_success(
|
||||
can_access_graph=True,
|
||||
is_latest_version=True,
|
||||
is_favorite=True,
|
||||
created_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
|
||||
updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
|
||||
),
|
||||
],
|
||||
@@ -157,7 +162,7 @@ async def test_get_favorite_library_agents_success(
|
||||
),
|
||||
)
|
||||
mock_db_call = mocker.patch(
|
||||
"backend.server.v2.library.db.list_favorite_library_agents"
|
||||
"backend.api.features.library.db.list_favorite_library_agents"
|
||||
)
|
||||
mock_db_call.return_value = mocked_value
|
||||
|
||||
@@ -180,7 +185,7 @@ def test_get_favorite_library_agents_error(
|
||||
mocker: pytest_mock.MockFixture, test_user_id: str
|
||||
):
|
||||
mock_db_call = mocker.patch(
|
||||
"backend.server.v2.library.db.list_favorite_library_agents"
|
||||
"backend.api.features.library.db.list_favorite_library_agents"
|
||||
)
|
||||
mock_db_call.side_effect = Exception("Test error")
|
||||
|
||||
@@ -214,13 +219,18 @@ def test_add_agent_to_library_success(
|
||||
can_access_graph=True,
|
||||
is_latest_version=True,
|
||||
is_favorite=False,
|
||||
created_at=FIXED_NOW,
|
||||
updated_at=FIXED_NOW,
|
||||
)
|
||||
|
||||
mock_db_call = mocker.patch(
|
||||
"backend.server.v2.library.db.add_store_agent_to_library"
|
||||
"backend.api.features.library.db.add_store_agent_to_library"
|
||||
)
|
||||
mock_db_call.return_value = mock_library_agent
|
||||
mock_complete_onboarding = mocker.patch(
|
||||
"backend.api.features.library.routes.agents.complete_onboarding_step",
|
||||
new_callable=AsyncMock,
|
||||
)
|
||||
|
||||
response = client.post(
|
||||
"/agents", json={"store_listing_version_id": "test-version-id"}
|
||||
@@ -235,11 +245,12 @@ def test_add_agent_to_library_success(
|
||||
mock_db_call.assert_called_once_with(
|
||||
store_listing_version_id="test-version-id", user_id=test_user_id
|
||||
)
|
||||
mock_complete_onboarding.assert_awaited_once()
|
||||
|
||||
|
||||
def test_add_agent_to_library_error(mocker: pytest_mock.MockFixture, test_user_id: str):
|
||||
mock_db_call = mocker.patch(
|
||||
"backend.server.v2.library.db.add_store_agent_to_library"
|
||||
"backend.api.features.library.db.add_store_agent_to_library"
|
||||
)
|
||||
mock_db_call.side_effect = Exception("Test error")
|
||||
|
||||
833
autogpt_platform/backend/backend/api/features/oauth.py
Normal file
833
autogpt_platform/backend/backend/api/features/oauth.py
Normal file
@@ -0,0 +1,833 @@
|
||||
"""
|
||||
OAuth 2.0 Provider Endpoints
|
||||
|
||||
Implements OAuth 2.0 Authorization Code flow with PKCE support.
|
||||
|
||||
Flow:
|
||||
1. User clicks "Login with AutoGPT" in 3rd party app
|
||||
2. App redirects user to /auth/authorize with client_id, redirect_uri, scope, state
|
||||
3. User sees consent screen (if not already logged in, redirects to login first)
|
||||
4. User approves → backend creates authorization code
|
||||
5. User redirected back to app with code
|
||||
6. App exchanges code for access/refresh tokens at /api/oauth/token
|
||||
7. App uses access token to call external API endpoints
|
||||
"""
|
||||
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from typing import Literal, Optional
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from autogpt_libs.auth import get_user_id
|
||||
from fastapi import APIRouter, Body, HTTPException, Security, UploadFile, status
|
||||
from gcloud.aio import storage as async_storage
|
||||
from PIL import Image
|
||||
from prisma.enums import APIKeyPermission
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from backend.data.auth.oauth import (
|
||||
InvalidClientError,
|
||||
InvalidGrantError,
|
||||
OAuthApplicationInfo,
|
||||
TokenIntrospectionResult,
|
||||
consume_authorization_code,
|
||||
create_access_token,
|
||||
create_authorization_code,
|
||||
create_refresh_token,
|
||||
get_oauth_application,
|
||||
get_oauth_application_by_id,
|
||||
introspect_token,
|
||||
list_user_oauth_applications,
|
||||
refresh_tokens,
|
||||
revoke_access_token,
|
||||
revoke_refresh_token,
|
||||
update_oauth_application,
|
||||
validate_client_credentials,
|
||||
validate_redirect_uri,
|
||||
validate_scopes,
|
||||
)
|
||||
from backend.util.settings import Settings
|
||||
from backend.util.virus_scanner import scan_content_safe
|
||||
|
||||
settings = Settings()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Request/Response Models
|
||||
# ============================================================================
|
||||
|
||||
|
||||
class TokenResponse(BaseModel):
|
||||
"""OAuth 2.0 token response"""
|
||||
|
||||
token_type: Literal["Bearer"] = "Bearer"
|
||||
access_token: str
|
||||
access_token_expires_at: datetime
|
||||
refresh_token: str
|
||||
refresh_token_expires_at: datetime
|
||||
scopes: list[str]
|
||||
|
||||
|
||||
class ErrorResponse(BaseModel):
|
||||
"""OAuth 2.0 error response"""
|
||||
|
||||
error: str
|
||||
error_description: Optional[str] = None
|
||||
|
||||
|
||||
class OAuthApplicationPublicInfo(BaseModel):
|
||||
"""Public information about an OAuth application (for consent screen)"""
|
||||
|
||||
name: str
|
||||
description: Optional[str] = None
|
||||
logo_url: Optional[str] = None
|
||||
scopes: list[str]
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Application Info Endpoint
|
||||
# ============================================================================
|
||||
|
||||
|
||||
@router.get(
|
||||
"/app/{client_id}",
|
||||
responses={
|
||||
404: {"description": "Application not found or disabled"},
|
||||
},
|
||||
)
|
||||
async def get_oauth_app_info(
|
||||
client_id: str, user_id: str = Security(get_user_id)
|
||||
) -> OAuthApplicationPublicInfo:
|
||||
"""
|
||||
Get public information about an OAuth application.
|
||||
|
||||
This endpoint is used by the consent screen to display application details
|
||||
to the user before they authorize access.
|
||||
|
||||
Returns:
|
||||
- name: Application name
|
||||
- description: Application description (if provided)
|
||||
- scopes: List of scopes the application is allowed to request
|
||||
"""
|
||||
app = await get_oauth_application(client_id)
|
||||
if not app or not app.is_active:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Application not found",
|
||||
)
|
||||
|
||||
return OAuthApplicationPublicInfo(
|
||||
name=app.name,
|
||||
description=app.description,
|
||||
logo_url=app.logo_url,
|
||||
scopes=[s.value for s in app.scopes],
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Authorization Endpoint
|
||||
# ============================================================================
|
||||
|
||||
|
||||
class AuthorizeRequest(BaseModel):
|
||||
"""OAuth 2.0 authorization request"""
|
||||
|
||||
client_id: str = Field(description="Client identifier")
|
||||
redirect_uri: str = Field(description="Redirect URI")
|
||||
scopes: list[str] = Field(description="List of scopes")
|
||||
state: str = Field(description="Anti-CSRF token from client")
|
||||
response_type: str = Field(
|
||||
default="code", description="Must be 'code' for authorization code flow"
|
||||
)
|
||||
code_challenge: str = Field(description="PKCE code challenge (required)")
|
||||
code_challenge_method: Literal["S256", "plain"] = Field(
|
||||
default="S256", description="PKCE code challenge method (S256 recommended)"
|
||||
)
|
||||
|
||||
|
||||
class AuthorizeResponse(BaseModel):
|
||||
"""OAuth 2.0 authorization response with redirect URL"""
|
||||
|
||||
redirect_url: str = Field(description="URL to redirect the user to")
|
||||
|
||||
|
||||
@router.post("/authorize")
|
||||
async def authorize(
|
||||
request: AuthorizeRequest = Body(),
|
||||
user_id: str = Security(get_user_id),
|
||||
) -> AuthorizeResponse:
|
||||
"""
|
||||
OAuth 2.0 Authorization Endpoint
|
||||
|
||||
User must be logged in (authenticated with Supabase JWT).
|
||||
This endpoint creates an authorization code and returns a redirect URL.
|
||||
|
||||
PKCE (Proof Key for Code Exchange) is REQUIRED for all authorization requests.
|
||||
|
||||
The frontend consent screen should call this endpoint after the user approves,
|
||||
then redirect the user to the returned `redirect_url`.
|
||||
|
||||
Request Body:
|
||||
- client_id: The OAuth application's client ID
|
||||
- redirect_uri: Where to redirect after authorization (must match registered URI)
|
||||
- scopes: List of permissions (e.g., "EXECUTE_GRAPH READ_GRAPH")
|
||||
- state: Anti-CSRF token provided by client (will be returned in redirect)
|
||||
- response_type: Must be "code" (for authorization code flow)
|
||||
- code_challenge: PKCE code challenge (required)
|
||||
- code_challenge_method: "S256" (recommended) or "plain"
|
||||
|
||||
Returns:
|
||||
- redirect_url: The URL to redirect the user to (includes authorization code)
|
||||
|
||||
Error cases return a redirect_url with error parameters, or raise HTTPException
|
||||
for critical errors (like invalid redirect_uri).
|
||||
"""
|
||||
try:
|
||||
# Validate response_type
|
||||
if request.response_type != "code":
|
||||
return _error_redirect_url(
|
||||
request.redirect_uri,
|
||||
request.state,
|
||||
"unsupported_response_type",
|
||||
"Only 'code' response type is supported",
|
||||
)
|
||||
|
||||
# Get application
|
||||
app = await get_oauth_application(request.client_id)
|
||||
if not app:
|
||||
return _error_redirect_url(
|
||||
request.redirect_uri,
|
||||
request.state,
|
||||
"invalid_client",
|
||||
"Unknown client_id",
|
||||
)
|
||||
|
||||
if not app.is_active:
|
||||
return _error_redirect_url(
|
||||
request.redirect_uri,
|
||||
request.state,
|
||||
"invalid_client",
|
||||
"Application is not active",
|
||||
)
|
||||
|
||||
# Validate redirect URI
|
||||
if not validate_redirect_uri(app, request.redirect_uri):
|
||||
# For invalid redirect_uri, we can't redirect safely
|
||||
# Must return error instead
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=(
|
||||
"Invalid redirect_uri. "
|
||||
f"Must be one of: {', '.join(app.redirect_uris)}"
|
||||
),
|
||||
)
|
||||
|
||||
# Parse and validate scopes
|
||||
try:
|
||||
requested_scopes = [APIKeyPermission(s.strip()) for s in request.scopes]
|
||||
except ValueError as e:
|
||||
return _error_redirect_url(
|
||||
request.redirect_uri,
|
||||
request.state,
|
||||
"invalid_scope",
|
||||
f"Invalid scope: {e}",
|
||||
)
|
||||
|
||||
if not requested_scopes:
|
||||
return _error_redirect_url(
|
||||
request.redirect_uri,
|
||||
request.state,
|
||||
"invalid_scope",
|
||||
"At least one scope is required",
|
||||
)
|
||||
|
||||
if not validate_scopes(app, requested_scopes):
|
||||
return _error_redirect_url(
|
||||
request.redirect_uri,
|
||||
request.state,
|
||||
"invalid_scope",
|
||||
"Application is not authorized for all requested scopes. "
|
||||
f"Allowed: {', '.join(s.value for s in app.scopes)}",
|
||||
)
|
||||
|
||||
# Create authorization code
|
||||
auth_code = await create_authorization_code(
|
||||
application_id=app.id,
|
||||
user_id=user_id,
|
||||
scopes=requested_scopes,
|
||||
redirect_uri=request.redirect_uri,
|
||||
code_challenge=request.code_challenge,
|
||||
code_challenge_method=request.code_challenge_method,
|
||||
)
|
||||
|
||||
# Build redirect URL with authorization code
|
||||
params = {
|
||||
"code": auth_code.code,
|
||||
"state": request.state,
|
||||
}
|
||||
redirect_url = f"{request.redirect_uri}?{urlencode(params)}"
|
||||
|
||||
logger.info(
|
||||
f"Authorization code issued for user #{user_id} "
|
||||
f"and app {app.name} (#{app.id})"
|
||||
)
|
||||
|
||||
return AuthorizeResponse(redirect_url=redirect_url)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error in authorization endpoint: {e}", exc_info=True)
|
||||
return _error_redirect_url(
|
||||
request.redirect_uri,
|
||||
request.state,
|
||||
"server_error",
|
||||
"An unexpected error occurred",
|
||||
)
|
||||
|
||||
|
||||
def _error_redirect_url(
|
||||
redirect_uri: str,
|
||||
state: str,
|
||||
error: str,
|
||||
error_description: Optional[str] = None,
|
||||
) -> AuthorizeResponse:
|
||||
"""Helper to build redirect URL with OAuth error parameters"""
|
||||
params = {
|
||||
"error": error,
|
||||
"state": state,
|
||||
}
|
||||
if error_description:
|
||||
params["error_description"] = error_description
|
||||
|
||||
redirect_url = f"{redirect_uri}?{urlencode(params)}"
|
||||
return AuthorizeResponse(redirect_url=redirect_url)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Token Endpoint
|
||||
# ============================================================================
|
||||
|
||||
|
||||
class TokenRequestByCode(BaseModel):
|
||||
grant_type: Literal["authorization_code"]
|
||||
code: str = Field(description="Authorization code")
|
||||
redirect_uri: str = Field(
|
||||
description="Redirect URI (must match authorization request)"
|
||||
)
|
||||
client_id: str
|
||||
client_secret: str
|
||||
code_verifier: str = Field(description="PKCE code verifier")
|
||||
|
||||
|
||||
class TokenRequestByRefreshToken(BaseModel):
|
||||
grant_type: Literal["refresh_token"]
|
||||
refresh_token: str
|
||||
client_id: str
|
||||
client_secret: str
|
||||
|
||||
|
||||
@router.post("/token")
|
||||
async def token(
|
||||
request: TokenRequestByCode | TokenRequestByRefreshToken = Body(),
|
||||
) -> TokenResponse:
|
||||
"""
|
||||
OAuth 2.0 Token Endpoint
|
||||
|
||||
Exchanges authorization code or refresh token for access token.
|
||||
|
||||
Grant Types:
|
||||
1. authorization_code: Exchange authorization code for tokens
|
||||
- Required: grant_type, code, redirect_uri, client_id, client_secret
|
||||
- Optional: code_verifier (required if PKCE was used)
|
||||
|
||||
2. refresh_token: Exchange refresh token for new access token
|
||||
- Required: grant_type, refresh_token, client_id, client_secret
|
||||
|
||||
Returns:
|
||||
- access_token: Bearer token for API access (1 hour TTL)
|
||||
- token_type: "Bearer"
|
||||
- expires_in: Seconds until access token expires
|
||||
- refresh_token: Token for refreshing access (30 days TTL)
|
||||
- scopes: List of scopes
|
||||
"""
|
||||
# Validate client credentials
|
||||
try:
|
||||
app = await validate_client_credentials(
|
||||
request.client_id, request.client_secret
|
||||
)
|
||||
except InvalidClientError as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=str(e),
|
||||
)
|
||||
|
||||
# Handle authorization_code grant
|
||||
if request.grant_type == "authorization_code":
|
||||
# Consume authorization code
|
||||
try:
|
||||
user_id, scopes = await consume_authorization_code(
|
||||
code=request.code,
|
||||
application_id=app.id,
|
||||
redirect_uri=request.redirect_uri,
|
||||
code_verifier=request.code_verifier,
|
||||
)
|
||||
except InvalidGrantError as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=str(e),
|
||||
)
|
||||
|
||||
# Create access and refresh tokens
|
||||
access_token = await create_access_token(app.id, user_id, scopes)
|
||||
refresh_token = await create_refresh_token(app.id, user_id, scopes)
|
||||
|
||||
logger.info(
|
||||
f"Access token issued for user #{user_id} and app {app.name} (#{app.id})"
|
||||
"via authorization code"
|
||||
)
|
||||
|
||||
if not access_token.token or not refresh_token.token:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to generate tokens",
|
||||
)
|
||||
|
||||
return TokenResponse(
|
||||
token_type="Bearer",
|
||||
access_token=access_token.token.get_secret_value(),
|
||||
access_token_expires_at=access_token.expires_at,
|
||||
refresh_token=refresh_token.token.get_secret_value(),
|
||||
refresh_token_expires_at=refresh_token.expires_at,
|
||||
scopes=list(s.value for s in scopes),
|
||||
)
|
||||
|
||||
# Handle refresh_token grant
|
||||
elif request.grant_type == "refresh_token":
|
||||
# Refresh access token
|
||||
try:
|
||||
new_access_token, new_refresh_token = await refresh_tokens(
|
||||
request.refresh_token, app.id
|
||||
)
|
||||
except InvalidGrantError as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=str(e),
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Tokens refreshed for user #{new_access_token.user_id} "
|
||||
f"by app {app.name} (#{app.id})"
|
||||
)
|
||||
|
||||
if not new_access_token.token or not new_refresh_token.token:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to generate tokens",
|
||||
)
|
||||
|
||||
return TokenResponse(
|
||||
token_type="Bearer",
|
||||
access_token=new_access_token.token.get_secret_value(),
|
||||
access_token_expires_at=new_access_token.expires_at,
|
||||
refresh_token=new_refresh_token.token.get_secret_value(),
|
||||
refresh_token_expires_at=new_refresh_token.expires_at,
|
||||
scopes=list(s.value for s in new_access_token.scopes),
|
||||
)
|
||||
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Unsupported grant_type: {request.grant_type}. "
|
||||
"Must be 'authorization_code' or 'refresh_token'",
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Token Introspection Endpoint
|
||||
# ============================================================================
|
||||
|
||||
|
||||
@router.post("/introspect")
|
||||
async def introspect(
|
||||
token: str = Body(description="Token to introspect"),
|
||||
token_type_hint: Optional[Literal["access_token", "refresh_token"]] = Body(
|
||||
None, description="Hint about token type ('access_token' or 'refresh_token')"
|
||||
),
|
||||
client_id: str = Body(description="Client identifier"),
|
||||
client_secret: str = Body(description="Client secret"),
|
||||
) -> TokenIntrospectionResult:
|
||||
"""
|
||||
OAuth 2.0 Token Introspection Endpoint (RFC 7662)
|
||||
|
||||
Allows clients to check if a token is valid and get its metadata.
|
||||
|
||||
Returns:
|
||||
- active: Whether the token is currently active
|
||||
- scopes: List of authorized scopes (if active)
|
||||
- client_id: The client the token was issued to (if active)
|
||||
- user_id: The user the token represents (if active)
|
||||
- exp: Expiration timestamp (if active)
|
||||
- token_type: "access_token" or "refresh_token" (if active)
|
||||
"""
|
||||
# Validate client credentials
|
||||
try:
|
||||
await validate_client_credentials(client_id, client_secret)
|
||||
except InvalidClientError as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=str(e),
|
||||
)
|
||||
|
||||
# Introspect the token
|
||||
return await introspect_token(token, token_type_hint)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Token Revocation Endpoint
|
||||
# ============================================================================
|
||||
|
||||
|
||||
@router.post("/revoke")
|
||||
async def revoke(
|
||||
token: str = Body(description="Token to revoke"),
|
||||
token_type_hint: Optional[Literal["access_token", "refresh_token"]] = Body(
|
||||
None, description="Hint about token type ('access_token' or 'refresh_token')"
|
||||
),
|
||||
client_id: str = Body(description="Client identifier"),
|
||||
client_secret: str = Body(description="Client secret"),
|
||||
):
|
||||
"""
|
||||
OAuth 2.0 Token Revocation Endpoint (RFC 7009)
|
||||
|
||||
Allows clients to revoke an access or refresh token.
|
||||
|
||||
Note: Revoking a refresh token does NOT revoke associated access tokens.
|
||||
Revoking an access token does NOT revoke the associated refresh token.
|
||||
"""
|
||||
# Validate client credentials
|
||||
try:
|
||||
app = await validate_client_credentials(client_id, client_secret)
|
||||
except InvalidClientError as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=str(e),
|
||||
)
|
||||
|
||||
# Try to revoke as access token first
|
||||
# Note: We pass app.id to ensure the token belongs to the authenticated app
|
||||
if token_type_hint != "refresh_token":
|
||||
revoked = await revoke_access_token(token, app.id)
|
||||
if revoked:
|
||||
logger.info(
|
||||
f"Access token revoked for app {app.name} (#{app.id}); "
|
||||
f"user #{revoked.user_id}"
|
||||
)
|
||||
return {"status": "ok"}
|
||||
|
||||
# Try to revoke as refresh token
|
||||
revoked = await revoke_refresh_token(token, app.id)
|
||||
if revoked:
|
||||
logger.info(
|
||||
f"Refresh token revoked for app {app.name} (#{app.id}); "
|
||||
f"user #{revoked.user_id}"
|
||||
)
|
||||
return {"status": "ok"}
|
||||
|
||||
# Per RFC 7009, revocation endpoint returns 200 even if token not found
|
||||
# or if token belongs to a different application.
|
||||
# This prevents token scanning attacks.
|
||||
logger.warning(f"Unsuccessful token revocation attempt by app {app.name} #{app.id}")
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Application Management Endpoints (for app owners)
|
||||
# ============================================================================
|
||||
|
||||
|
||||
@router.get("/apps/mine")
|
||||
async def list_my_oauth_apps(
|
||||
user_id: str = Security(get_user_id),
|
||||
) -> list[OAuthApplicationInfo]:
|
||||
"""
|
||||
List all OAuth applications owned by the current user.
|
||||
|
||||
Returns a list of OAuth applications with their details including:
|
||||
- id, name, description, logo_url
|
||||
- client_id (public identifier)
|
||||
- redirect_uris, grant_types, scopes
|
||||
- is_active status
|
||||
- created_at, updated_at timestamps
|
||||
|
||||
Note: client_secret is never returned for security reasons.
|
||||
"""
|
||||
return await list_user_oauth_applications(user_id)
|
||||
|
||||
|
||||
@router.patch("/apps/{app_id}/status")
|
||||
async def update_app_status(
|
||||
app_id: str,
|
||||
user_id: str = Security(get_user_id),
|
||||
is_active: bool = Body(description="Whether the app should be active", embed=True),
|
||||
) -> OAuthApplicationInfo:
|
||||
"""
|
||||
Enable or disable an OAuth application.
|
||||
|
||||
Only the application owner can update the status.
|
||||
When disabled, the application cannot be used for new authorizations
|
||||
and existing access tokens will fail validation.
|
||||
|
||||
Returns the updated application info.
|
||||
"""
|
||||
updated_app = await update_oauth_application(
|
||||
app_id=app_id,
|
||||
owner_id=user_id,
|
||||
is_active=is_active,
|
||||
)
|
||||
|
||||
if not updated_app:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Application not found or you don't have permission to update it",
|
||||
)
|
||||
|
||||
action = "enabled" if is_active else "disabled"
|
||||
logger.info(f"OAuth app {updated_app.name} (#{app_id}) {action} by user #{user_id}")
|
||||
|
||||
return updated_app
|
||||
|
||||
|
||||
class UpdateAppLogoRequest(BaseModel):
|
||||
logo_url: str = Field(description="URL of the uploaded logo image")
|
||||
|
||||
|
||||
@router.patch("/apps/{app_id}/logo")
|
||||
async def update_app_logo(
|
||||
app_id: str,
|
||||
request: UpdateAppLogoRequest = Body(),
|
||||
user_id: str = Security(get_user_id),
|
||||
) -> OAuthApplicationInfo:
|
||||
"""
|
||||
Update the logo URL for an OAuth application.
|
||||
|
||||
Only the application owner can update the logo.
|
||||
The logo should be uploaded first using the media upload endpoint,
|
||||
then this endpoint is called with the resulting URL.
|
||||
|
||||
Logo requirements:
|
||||
- Must be square (1:1 aspect ratio)
|
||||
- Minimum 512x512 pixels
|
||||
- Maximum 2048x2048 pixels
|
||||
|
||||
Returns the updated application info.
|
||||
"""
|
||||
if (
|
||||
not (app := await get_oauth_application_by_id(app_id))
|
||||
or app.owner_id != user_id
|
||||
):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="OAuth App not found",
|
||||
)
|
||||
|
||||
# Delete the current app logo file (if any and it's in our cloud storage)
|
||||
await _delete_app_current_logo_file(app)
|
||||
|
||||
updated_app = await update_oauth_application(
|
||||
app_id=app_id,
|
||||
owner_id=user_id,
|
||||
logo_url=request.logo_url,
|
||||
)
|
||||
|
||||
if not updated_app:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Application not found or you don't have permission to update it",
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"OAuth app {updated_app.name} (#{app_id}) logo updated by user #{user_id}"
|
||||
)
|
||||
|
||||
return updated_app
|
||||
|
||||
|
||||
# Logo upload constraints
|
||||
LOGO_MIN_SIZE = 512
|
||||
LOGO_MAX_SIZE = 2048
|
||||
LOGO_ALLOWED_TYPES = {"image/jpeg", "image/png", "image/webp"}
|
||||
LOGO_MAX_FILE_SIZE = 3 * 1024 * 1024 # 3MB
|
||||
|
||||
|
||||
@router.post("/apps/{app_id}/logo/upload")
|
||||
async def upload_app_logo(
|
||||
app_id: str,
|
||||
file: UploadFile,
|
||||
user_id: str = Security(get_user_id),
|
||||
) -> OAuthApplicationInfo:
|
||||
"""
|
||||
Upload a logo image for an OAuth application.
|
||||
|
||||
Requirements:
|
||||
- Image must be square (1:1 aspect ratio)
|
||||
- Minimum 512x512 pixels
|
||||
- Maximum 2048x2048 pixels
|
||||
- Allowed formats: JPEG, PNG, WebP
|
||||
- Maximum file size: 3MB
|
||||
|
||||
The image is uploaded to cloud storage and the app's logoUrl is updated.
|
||||
Returns the updated application info.
|
||||
"""
|
||||
# Verify ownership to reduce vulnerability to DoS(torage) or DoM(oney) attacks
|
||||
if (
|
||||
not (app := await get_oauth_application_by_id(app_id))
|
||||
or app.owner_id != user_id
|
||||
):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="OAuth App not found",
|
||||
)
|
||||
|
||||
# Check GCS configuration
|
||||
if not settings.config.media_gcs_bucket_name:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
||||
detail="Media storage is not configured",
|
||||
)
|
||||
|
||||
# Validate content type
|
||||
content_type = file.content_type
|
||||
if content_type not in LOGO_ALLOWED_TYPES:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Invalid file type. Allowed: JPEG, PNG, WebP. Got: {content_type}",
|
||||
)
|
||||
|
||||
# Read file content
|
||||
try:
|
||||
file_bytes = await file.read()
|
||||
except Exception as e:
|
||||
logger.error(f"Error reading logo file: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Failed to read uploaded file",
|
||||
)
|
||||
|
||||
# Check file size
|
||||
if len(file_bytes) > LOGO_MAX_FILE_SIZE:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=(
|
||||
"File too large. "
|
||||
f"Maximum size is {LOGO_MAX_FILE_SIZE // 1024 // 1024}MB"
|
||||
),
|
||||
)
|
||||
|
||||
# Validate image dimensions
|
||||
try:
|
||||
image = Image.open(io.BytesIO(file_bytes))
|
||||
width, height = image.size
|
||||
|
||||
if width != height:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Logo must be square. Got {width}x{height}",
|
||||
)
|
||||
|
||||
if width < LOGO_MIN_SIZE:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Logo too small. Minimum {LOGO_MIN_SIZE}x{LOGO_MIN_SIZE}. "
|
||||
f"Got {width}x{height}",
|
||||
)
|
||||
|
||||
if width > LOGO_MAX_SIZE:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Logo too large. Maximum {LOGO_MAX_SIZE}x{LOGO_MAX_SIZE}. "
|
||||
f"Got {width}x{height}",
|
||||
)
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error validating logo image: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Invalid image file",
|
||||
)
|
||||
|
||||
# Scan for viruses
|
||||
filename = file.filename or "logo"
|
||||
await scan_content_safe(file_bytes, filename=filename)
|
||||
|
||||
# Generate unique filename
|
||||
file_ext = os.path.splitext(filename)[1].lower() or ".png"
|
||||
unique_filename = f"{uuid.uuid4()}{file_ext}"
|
||||
storage_path = f"oauth-apps/{app_id}/logo/{unique_filename}"
|
||||
|
||||
# Upload to GCS
|
||||
try:
|
||||
async with async_storage.Storage() as async_client:
|
||||
bucket_name = settings.config.media_gcs_bucket_name
|
||||
|
||||
await async_client.upload(
|
||||
bucket_name, storage_path, file_bytes, content_type=content_type
|
||||
)
|
||||
|
||||
logo_url = f"https://storage.googleapis.com/{bucket_name}/{storage_path}"
|
||||
except Exception as e:
|
||||
logger.error(f"Error uploading logo to GCS: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to upload logo",
|
||||
)
|
||||
|
||||
# Delete the current app logo file (if any and it's in our cloud storage)
|
||||
await _delete_app_current_logo_file(app)
|
||||
|
||||
# Update the app with the new logo URL
|
||||
updated_app = await update_oauth_application(
|
||||
app_id=app_id,
|
||||
owner_id=user_id,
|
||||
logo_url=logo_url,
|
||||
)
|
||||
|
||||
if not updated_app:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Application not found or you don't have permission to update it",
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"OAuth app {updated_app.name} (#{app_id}) logo uploaded by user #{user_id}"
|
||||
)
|
||||
|
||||
return updated_app
|
||||
|
||||
|
||||
async def _delete_app_current_logo_file(app: OAuthApplicationInfo):
|
||||
"""
|
||||
Delete the current logo file for the given app, if there is one in our cloud storage
|
||||
"""
|
||||
bucket_name = settings.config.media_gcs_bucket_name
|
||||
storage_base_url = f"https://storage.googleapis.com/{bucket_name}/"
|
||||
|
||||
if app.logo_url and app.logo_url.startswith(storage_base_url):
|
||||
# Parse blob path from URL: https://storage.googleapis.com/{bucket}/{path}
|
||||
old_path = app.logo_url.replace(storage_base_url, "")
|
||||
try:
|
||||
async with async_storage.Storage() as async_client:
|
||||
await async_client.delete(bucket_name, old_path)
|
||||
logger.info(f"Deleted old logo for OAuth app #{app.id}: {old_path}")
|
||||
except Exception as e:
|
||||
# Log but don't fail - the new logo was uploaded successfully
|
||||
logger.warning(
|
||||
f"Failed to delete old logo for OAuth app #{app.id}: {e}", exc_info=e
|
||||
)
|
||||
1784
autogpt_platform/backend/backend/api/features/oauth_test.py
Normal file
1784
autogpt_platform/backend/backend/api/features/oauth_test.py
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user