Compare commits

..

8 Commits

Author SHA1 Message Date
Swifty
39839a51b3 Merge branch 'dev' into swiftyos/sqlalchemy-plumbing 2025-11-21 20:52:31 +01:00
Swifty
b114354b1f update asyncpg to version ^0.30.0 2025-11-20 11:41:23 +01:00
Swifty
0c0488ec0b Added docs and more specific error handling 2025-11-20 11:33:24 +01:00
Swifty
5388a321c7 update poetry lock 2025-11-20 11:21:56 +01:00
Swifty
8ae5cbecd6 added tests and toggle for sqlalchemy integration 2025-11-20 11:10:32 +01:00
Swifty
900b0e736f add tests 2025-11-20 11:10:32 +01:00
Swifty
0085e5e6e0 integrate into database manager and rest api. 2025-11-20 11:10:32 +01:00
Swifty
5c8dac46f5 added sqlalchemy plumbing 2025-11-20 11:10:32 +01:00
1121 changed files with 27159 additions and 130018 deletions

View File

@@ -1,37 +0,0 @@
{
"worktreeCopyPatterns": [
".env*",
".vscode/**",
".auth/**",
".claude/**",
"autogpt_platform/.env*",
"autogpt_platform/backend/.env*",
"autogpt_platform/frontend/.env*",
"autogpt_platform/frontend/.auth/**",
"autogpt_platform/db/docker/.env*"
],
"worktreeCopyIgnores": [
"**/node_modules/**",
"**/dist/**",
"**/.git/**",
"**/Thumbs.db",
"**/.DS_Store",
"**/.next/**",
"**/__pycache__/**",
"**/.ruff_cache/**",
"**/.pytest_cache/**",
"**/*.pyc",
"**/playwright-report/**",
"**/logs/**",
"**/site/**"
],
"worktreePathTemplate": "$BASE_PATH.worktree",
"postCreateCmd": [
"cd autogpt_platform/autogpt_libs && poetry install",
"cd autogpt_platform/backend && poetry install && poetry run prisma generate",
"cd autogpt_platform/frontend && pnpm install",
"cd docs && pip install -r requirements.txt"
],
"terminalCommand": "code .",
"deleteBranchWithWorktree": false
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,125 +0,0 @@
---
name: vercel-react-best-practices
description: React and Next.js performance optimization guidelines from Vercel Engineering. This skill should be used when writing, reviewing, or refactoring React/Next.js code to ensure optimal performance patterns. Triggers on tasks involving React components, Next.js pages, data fetching, bundle optimization, or performance improvements.
license: MIT
metadata:
author: vercel
version: "1.0.0"
---
# Vercel React Best Practices
Comprehensive performance optimization guide for React and Next.js applications, maintained by Vercel. Contains 45 rules across 8 categories, prioritized by impact to guide automated refactoring and code generation.
## When to Apply
Reference these guidelines when:
- Writing new React components or Next.js pages
- Implementing data fetching (client or server-side)
- Reviewing code for performance issues
- Refactoring existing React/Next.js code
- Optimizing bundle size or load times
## Rule Categories by Priority
| Priority | Category | Impact | Prefix |
|----------|----------|--------|--------|
| 1 | Eliminating Waterfalls | CRITICAL | `async-` |
| 2 | Bundle Size Optimization | CRITICAL | `bundle-` |
| 3 | Server-Side Performance | HIGH | `server-` |
| 4 | Client-Side Data Fetching | MEDIUM-HIGH | `client-` |
| 5 | Re-render Optimization | MEDIUM | `rerender-` |
| 6 | Rendering Performance | MEDIUM | `rendering-` |
| 7 | JavaScript Performance | LOW-MEDIUM | `js-` |
| 8 | Advanced Patterns | LOW | `advanced-` |
## Quick Reference
### 1. Eliminating Waterfalls (CRITICAL)
- `async-defer-await` - Move await into branches where actually used
- `async-parallel` - Use Promise.all() for independent operations
- `async-dependencies` - Use better-all for partial dependencies
- `async-api-routes` - Start promises early, await late in API routes
- `async-suspense-boundaries` - Use Suspense to stream content
### 2. Bundle Size Optimization (CRITICAL)
- `bundle-barrel-imports` - Import directly, avoid barrel files
- `bundle-dynamic-imports` - Use next/dynamic for heavy components
- `bundle-defer-third-party` - Load analytics/logging after hydration
- `bundle-conditional` - Load modules only when feature is activated
- `bundle-preload` - Preload on hover/focus for perceived speed
### 3. Server-Side Performance (HIGH)
- `server-cache-react` - Use React.cache() for per-request deduplication
- `server-cache-lru` - Use LRU cache for cross-request caching
- `server-serialization` - Minimize data passed to client components
- `server-parallel-fetching` - Restructure components to parallelize fetches
- `server-after-nonblocking` - Use after() for non-blocking operations
### 4. Client-Side Data Fetching (MEDIUM-HIGH)
- `client-swr-dedup` - Use SWR for automatic request deduplication
- `client-event-listeners` - Deduplicate global event listeners
### 5. Re-render Optimization (MEDIUM)
- `rerender-defer-reads` - Don't subscribe to state only used in callbacks
- `rerender-memo` - Extract expensive work into memoized components
- `rerender-dependencies` - Use primitive dependencies in effects
- `rerender-derived-state` - Subscribe to derived booleans, not raw values
- `rerender-functional-setstate` - Use functional setState for stable callbacks
- `rerender-lazy-state-init` - Pass function to useState for expensive values
- `rerender-transitions` - Use startTransition for non-urgent updates
### 6. Rendering Performance (MEDIUM)
- `rendering-animate-svg-wrapper` - Animate div wrapper, not SVG element
- `rendering-content-visibility` - Use content-visibility for long lists
- `rendering-hoist-jsx` - Extract static JSX outside components
- `rendering-svg-precision` - Reduce SVG coordinate precision
- `rendering-hydration-no-flicker` - Use inline script for client-only data
- `rendering-activity` - Use Activity component for show/hide
- `rendering-conditional-render` - Use ternary, not && for conditionals
### 7. JavaScript Performance (LOW-MEDIUM)
- `js-batch-dom-css` - Group CSS changes via classes or cssText
- `js-index-maps` - Build Map for repeated lookups
- `js-cache-property-access` - Cache object properties in loops
- `js-cache-function-results` - Cache function results in module-level Map
- `js-cache-storage` - Cache localStorage/sessionStorage reads
- `js-combine-iterations` - Combine multiple filter/map into one loop
- `js-length-check-first` - Check array length before expensive comparison
- `js-early-exit` - Return early from functions
- `js-hoist-regexp` - Hoist RegExp creation outside loops
- `js-min-max-loop` - Use loop for min/max instead of sort
- `js-set-map-lookups` - Use Set/Map for O(1) lookups
- `js-tosorted-immutable` - Use toSorted() for immutability
### 8. Advanced Patterns (LOW)
- `advanced-event-handler-refs` - Store event handlers in refs
- `advanced-use-latest` - useLatest for stable callback refs
## How to Use
Read individual rule files for detailed explanations and code examples:
```
rules/async-parallel.md
rules/bundle-barrel-imports.md
rules/_sections.md
```
Each rule file contains:
- Brief explanation of why it matters
- Incorrect code example with explanation
- Correct code example with explanation
- Additional context and references
## Full Compiled Document
For the complete guide with all rules expanded: `AGENTS.md`

View File

@@ -1,55 +0,0 @@
---
title: Store Event Handlers in Refs
impact: LOW
impactDescription: stable subscriptions
tags: advanced, hooks, refs, event-handlers, optimization
---
## Store Event Handlers in Refs
Store callbacks in refs when used in effects that shouldn't re-subscribe on callback changes.
**Incorrect (re-subscribes on every render):**
```tsx
function useWindowEvent(event: string, handler: () => void) {
useEffect(() => {
window.addEventListener(event, handler)
return () => window.removeEventListener(event, handler)
}, [event, handler])
}
```
**Correct (stable subscription):**
```tsx
function useWindowEvent(event: string, handler: () => void) {
const handlerRef = useRef(handler)
useEffect(() => {
handlerRef.current = handler
}, [handler])
useEffect(() => {
const listener = () => handlerRef.current()
window.addEventListener(event, listener)
return () => window.removeEventListener(event, listener)
}, [event])
}
```
**Alternative: use `useEffectEvent` if you're on latest React:**
```tsx
import { useEffectEvent } from 'react'
function useWindowEvent(event: string, handler: () => void) {
const onEvent = useEffectEvent(handler)
useEffect(() => {
window.addEventListener(event, onEvent)
return () => window.removeEventListener(event, onEvent)
}, [event])
}
```
`useEffectEvent` provides a cleaner API for the same pattern: it creates a stable function reference that always calls the latest version of the handler.

View File

@@ -1,49 +0,0 @@
---
title: useLatest for Stable Callback Refs
impact: LOW
impactDescription: prevents effect re-runs
tags: advanced, hooks, useLatest, refs, optimization
---
## useLatest for Stable Callback Refs
Access latest values in callbacks without adding them to dependency arrays. Prevents effect re-runs while avoiding stale closures.
**Implementation:**
```typescript
function useLatest<T>(value: T) {
const ref = useRef(value)
useEffect(() => {
ref.current = value
}, [value])
return ref
}
```
**Incorrect (effect re-runs on every callback change):**
```tsx
function SearchInput({ onSearch }: { onSearch: (q: string) => void }) {
const [query, setQuery] = useState('')
useEffect(() => {
const timeout = setTimeout(() => onSearch(query), 300)
return () => clearTimeout(timeout)
}, [query, onSearch])
}
```
**Correct (stable effect, fresh callback):**
```tsx
function SearchInput({ onSearch }: { onSearch: (q: string) => void }) {
const [query, setQuery] = useState('')
const onSearchRef = useLatest(onSearch)
useEffect(() => {
const timeout = setTimeout(() => onSearchRef.current(query), 300)
return () => clearTimeout(timeout)
}, [query])
}
```

View File

@@ -1,38 +0,0 @@
---
title: Prevent Waterfall Chains in API Routes
impact: CRITICAL
impactDescription: 2-10× improvement
tags: api-routes, server-actions, waterfalls, parallelization
---
## Prevent Waterfall Chains in API Routes
In API routes and Server Actions, start independent operations immediately, even if you don't await them yet.
**Incorrect (config waits for auth, data waits for both):**
```typescript
export async function GET(request: Request) {
const session = await auth()
const config = await fetchConfig()
const data = await fetchData(session.user.id)
return Response.json({ data, config })
}
```
**Correct (auth and config start immediately):**
```typescript
export async function GET(request: Request) {
const sessionPromise = auth()
const configPromise = fetchConfig()
const session = await sessionPromise
const [config, data] = await Promise.all([
configPromise,
fetchData(session.user.id)
])
return Response.json({ data, config })
}
```
For operations with more complex dependency chains, use `better-all` to automatically maximize parallelism (see Dependency-Based Parallelization).

View File

@@ -1,80 +0,0 @@
---
title: Defer Await Until Needed
impact: HIGH
impactDescription: avoids blocking unused code paths
tags: async, await, conditional, optimization
---
## Defer Await Until Needed
Move `await` operations into the branches where they're actually used to avoid blocking code paths that don't need them.
**Incorrect (blocks both branches):**
```typescript
async function handleRequest(userId: string, skipProcessing: boolean) {
const userData = await fetchUserData(userId)
if (skipProcessing) {
// Returns immediately but still waited for userData
return { skipped: true }
}
// Only this branch uses userData
return processUserData(userData)
}
```
**Correct (only blocks when needed):**
```typescript
async function handleRequest(userId: string, skipProcessing: boolean) {
if (skipProcessing) {
// Returns immediately without waiting
return { skipped: true }
}
// Fetch only when needed
const userData = await fetchUserData(userId)
return processUserData(userData)
}
```
**Another example (early return optimization):**
```typescript
// Incorrect: always fetches permissions
async function updateResource(resourceId: string, userId: string) {
const permissions = await fetchPermissions(userId)
const resource = await getResource(resourceId)
if (!resource) {
return { error: 'Not found' }
}
if (!permissions.canEdit) {
return { error: 'Forbidden' }
}
return await updateResourceData(resource, permissions)
}
// Correct: fetches only when needed
async function updateResource(resourceId: string, userId: string) {
const resource = await getResource(resourceId)
if (!resource) {
return { error: 'Not found' }
}
const permissions = await fetchPermissions(userId)
if (!permissions.canEdit) {
return { error: 'Forbidden' }
}
return await updateResourceData(resource, permissions)
}
```
This optimization is especially valuable when the skipped branch is frequently taken, or when the deferred operation is expensive.

View File

@@ -1,36 +0,0 @@
---
title: Dependency-Based Parallelization
impact: CRITICAL
impactDescription: 2-10× improvement
tags: async, parallelization, dependencies, better-all
---
## Dependency-Based Parallelization
For operations with partial dependencies, use `better-all` to maximize parallelism. It automatically starts each task at the earliest possible moment.
**Incorrect (profile waits for config unnecessarily):**
```typescript
const [user, config] = await Promise.all([
fetchUser(),
fetchConfig()
])
const profile = await fetchProfile(user.id)
```
**Correct (config and profile run in parallel):**
```typescript
import { all } from 'better-all'
const { user, config, profile } = await all({
async user() { return fetchUser() },
async config() { return fetchConfig() },
async profile() {
return fetchProfile((await this.$.user).id)
}
})
```
Reference: [https://github.com/shuding/better-all](https://github.com/shuding/better-all)

View File

@@ -1,28 +0,0 @@
---
title: Promise.all() for Independent Operations
impact: CRITICAL
impactDescription: 2-10× improvement
tags: async, parallelization, promises, waterfalls
---
## Promise.all() for Independent Operations
When async operations have no interdependencies, execute them concurrently using `Promise.all()`.
**Incorrect (sequential execution, 3 round trips):**
```typescript
const user = await fetchUser()
const posts = await fetchPosts()
const comments = await fetchComments()
```
**Correct (parallel execution, 1 round trip):**
```typescript
const [user, posts, comments] = await Promise.all([
fetchUser(),
fetchPosts(),
fetchComments()
])
```

View File

@@ -1,99 +0,0 @@
---
title: Strategic Suspense Boundaries
impact: HIGH
impactDescription: faster initial paint
tags: async, suspense, streaming, layout-shift
---
## Strategic Suspense Boundaries
Instead of awaiting data in async components before returning JSX, use Suspense boundaries to show the wrapper UI faster while data loads.
**Incorrect (wrapper blocked by data fetching):**
```tsx
async function Page() {
const data = await fetchData() // Blocks entire page
return (
<div>
<div>Sidebar</div>
<div>Header</div>
<div>
<DataDisplay data={data} />
</div>
<div>Footer</div>
</div>
)
}
```
The entire layout waits for data even though only the middle section needs it.
**Correct (wrapper shows immediately, data streams in):**
```tsx
function Page() {
return (
<div>
<div>Sidebar</div>
<div>Header</div>
<div>
<Suspense fallback={<Skeleton />}>
<DataDisplay />
</Suspense>
</div>
<div>Footer</div>
</div>
)
}
async function DataDisplay() {
const data = await fetchData() // Only blocks this component
return <div>{data.content}</div>
}
```
Sidebar, Header, and Footer render immediately. Only DataDisplay waits for data.
**Alternative (share promise across components):**
```tsx
function Page() {
// Start fetch immediately, but don't await
const dataPromise = fetchData()
return (
<div>
<div>Sidebar</div>
<div>Header</div>
<Suspense fallback={<Skeleton />}>
<DataDisplay dataPromise={dataPromise} />
<DataSummary dataPromise={dataPromise} />
</Suspense>
<div>Footer</div>
</div>
)
}
function DataDisplay({ dataPromise }: { dataPromise: Promise<Data> }) {
const data = use(dataPromise) // Unwraps the promise
return <div>{data.content}</div>
}
function DataSummary({ dataPromise }: { dataPromise: Promise<Data> }) {
const data = use(dataPromise) // Reuses the same promise
return <div>{data.summary}</div>
}
```
Both components share the same promise, so only one fetch occurs. Layout renders immediately while both components wait together.
**When NOT to use this pattern:**
- Critical data needed for layout decisions (affects positioning)
- SEO-critical content above the fold
- Small, fast queries where suspense overhead isn't worth it
- When you want to avoid layout shift (loading → content jump)
**Trade-off:** Faster initial paint vs potential layout shift. Choose based on your UX priorities.

View File

@@ -1,59 +0,0 @@
---
title: Avoid Barrel File Imports
impact: CRITICAL
impactDescription: 200-800ms import cost, slow builds
tags: bundle, imports, tree-shaking, barrel-files, performance
---
## Avoid Barrel File Imports
Import directly from source files instead of barrel files to avoid loading thousands of unused modules. **Barrel files** are entry points that re-export multiple modules (e.g., `index.js` that does `export * from './module'`).
Popular icon and component libraries can have **up to 10,000 re-exports** in their entry file. For many React packages, **it takes 200-800ms just to import them**, affecting both development speed and production cold starts.
**Why tree-shaking doesn't help:** When a library is marked as external (not bundled), the bundler can't optimize it. If you bundle it to enable tree-shaking, builds become substantially slower analyzing the entire module graph.
**Incorrect (imports entire library):**
```tsx
import { Check, X, Menu } from 'lucide-react'
// Loads 1,583 modules, takes ~2.8s extra in dev
// Runtime cost: 200-800ms on every cold start
import { Button, TextField } from '@mui/material'
// Loads 2,225 modules, takes ~4.2s extra in dev
```
**Correct (imports only what you need):**
```tsx
import Check from 'lucide-react/dist/esm/icons/check'
import X from 'lucide-react/dist/esm/icons/x'
import Menu from 'lucide-react/dist/esm/icons/menu'
// Loads only 3 modules (~2KB vs ~1MB)
import Button from '@mui/material/Button'
import TextField from '@mui/material/TextField'
// Loads only what you use
```
**Alternative (Next.js 13.5+):**
```js
// next.config.js - use optimizePackageImports
module.exports = {
experimental: {
optimizePackageImports: ['lucide-react', '@mui/material']
}
}
// Then you can keep the ergonomic barrel imports:
import { Check, X, Menu } from 'lucide-react'
// Automatically transformed to direct imports at build time
```
Direct imports provide 15-70% faster dev boot, 28% faster builds, 40% faster cold starts, and significantly faster HMR.
Libraries commonly affected: `lucide-react`, `@mui/material`, `@mui/icons-material`, `@tabler/icons-react`, `react-icons`, `@headlessui/react`, `@radix-ui/react-*`, `lodash`, `ramda`, `date-fns`, `rxjs`, `react-use`.
Reference: [How we optimized package imports in Next.js](https://vercel.com/blog/how-we-optimized-package-imports-in-next-js)

View File

@@ -1,31 +0,0 @@
---
title: Conditional Module Loading
impact: HIGH
impactDescription: loads large data only when needed
tags: bundle, conditional-loading, lazy-loading
---
## Conditional Module Loading
Load large data or modules only when a feature is activated.
**Example (lazy-load animation frames):**
```tsx
function AnimationPlayer({ enabled }: { enabled: boolean }) {
const [frames, setFrames] = useState<Frame[] | null>(null)
useEffect(() => {
if (enabled && !frames && typeof window !== 'undefined') {
import('./animation-frames.js')
.then(mod => setFrames(mod.frames))
.catch(() => setEnabled(false))
}
}, [enabled, frames])
if (!frames) return <Skeleton />
return <Canvas frames={frames} />
}
```
The `typeof window !== 'undefined'` check prevents bundling this module for SSR, optimizing server bundle size and build speed.

View File

@@ -1,49 +0,0 @@
---
title: Defer Non-Critical Third-Party Libraries
impact: MEDIUM
impactDescription: loads after hydration
tags: bundle, third-party, analytics, defer
---
## Defer Non-Critical Third-Party Libraries
Analytics, logging, and error tracking don't block user interaction. Load them after hydration.
**Incorrect (blocks initial bundle):**
```tsx
import { Analytics } from '@vercel/analytics/react'
export default function RootLayout({ children }) {
return (
<html>
<body>
{children}
<Analytics />
</body>
</html>
)
}
```
**Correct (loads after hydration):**
```tsx
import dynamic from 'next/dynamic'
const Analytics = dynamic(
() => import('@vercel/analytics/react').then(m => m.Analytics),
{ ssr: false }
)
export default function RootLayout({ children }) {
return (
<html>
<body>
{children}
<Analytics />
</body>
</html>
)
}
```

View File

@@ -1,35 +0,0 @@
---
title: Dynamic Imports for Heavy Components
impact: CRITICAL
impactDescription: directly affects TTI and LCP
tags: bundle, dynamic-import, code-splitting, next-dynamic
---
## Dynamic Imports for Heavy Components
Use `next/dynamic` to lazy-load large components not needed on initial render.
**Incorrect (Monaco bundles with main chunk ~300KB):**
```tsx
import { MonacoEditor } from './monaco-editor'
function CodePanel({ code }: { code: string }) {
return <MonacoEditor value={code} />
}
```
**Correct (Monaco loads on demand):**
```tsx
import dynamic from 'next/dynamic'
const MonacoEditor = dynamic(
() => import('./monaco-editor').then(m => m.MonacoEditor),
{ ssr: false }
)
function CodePanel({ code }: { code: string }) {
return <MonacoEditor value={code} />
}
```

View File

@@ -1,50 +0,0 @@
---
title: Preload Based on User Intent
impact: MEDIUM
impactDescription: reduces perceived latency
tags: bundle, preload, user-intent, hover
---
## Preload Based on User Intent
Preload heavy bundles before they're needed to reduce perceived latency.
**Example (preload on hover/focus):**
```tsx
function EditorButton({ onClick }: { onClick: () => void }) {
const preload = () => {
if (typeof window !== 'undefined') {
void import('./monaco-editor')
}
}
return (
<button
onMouseEnter={preload}
onFocus={preload}
onClick={onClick}
>
Open Editor
</button>
)
}
```
**Example (preload when feature flag is enabled):**
```tsx
function FlagsProvider({ children, flags }: Props) {
useEffect(() => {
if (flags.editorEnabled && typeof window !== 'undefined') {
void import('./monaco-editor').then(mod => mod.init())
}
}, [flags.editorEnabled])
return <FlagsContext.Provider value={flags}>
{children}
</FlagsContext.Provider>
}
```
The `typeof window !== 'undefined'` check prevents bundling preloaded modules for SSR, optimizing server bundle size and build speed.

View File

@@ -1,74 +0,0 @@
---
title: Deduplicate Global Event Listeners
impact: LOW
impactDescription: single listener for N components
tags: client, swr, event-listeners, subscription
---
## Deduplicate Global Event Listeners
Use `useSWRSubscription()` to share global event listeners across component instances.
**Incorrect (N instances = N listeners):**
```tsx
function useKeyboardShortcut(key: string, callback: () => void) {
useEffect(() => {
const handler = (e: KeyboardEvent) => {
if (e.metaKey && e.key === key) {
callback()
}
}
window.addEventListener('keydown', handler)
return () => window.removeEventListener('keydown', handler)
}, [key, callback])
}
```
When using the `useKeyboardShortcut` hook multiple times, each instance will register a new listener.
**Correct (N instances = 1 listener):**
```tsx
import useSWRSubscription from 'swr/subscription'
// Module-level Map to track callbacks per key
const keyCallbacks = new Map<string, Set<() => void>>()
function useKeyboardShortcut(key: string, callback: () => void) {
// Register this callback in the Map
useEffect(() => {
if (!keyCallbacks.has(key)) {
keyCallbacks.set(key, new Set())
}
keyCallbacks.get(key)!.add(callback)
return () => {
const set = keyCallbacks.get(key)
if (set) {
set.delete(callback)
if (set.size === 0) {
keyCallbacks.delete(key)
}
}
}
}, [key, callback])
useSWRSubscription('global-keydown', () => {
const handler = (e: KeyboardEvent) => {
if (e.metaKey && keyCallbacks.has(e.key)) {
keyCallbacks.get(e.key)!.forEach(cb => cb())
}
}
window.addEventListener('keydown', handler)
return () => window.removeEventListener('keydown', handler)
})
}
function Profile() {
// Multiple shortcuts will share the same listener
useKeyboardShortcut('p', () => { /* ... */ })
useKeyboardShortcut('k', () => { /* ... */ })
// ...
}
```

View File

@@ -1,56 +0,0 @@
---
title: Use SWR for Automatic Deduplication
impact: MEDIUM-HIGH
impactDescription: automatic deduplication
tags: client, swr, deduplication, data-fetching
---
## Use SWR for Automatic Deduplication
SWR enables request deduplication, caching, and revalidation across component instances.
**Incorrect (no deduplication, each instance fetches):**
```tsx
function UserList() {
const [users, setUsers] = useState([])
useEffect(() => {
fetch('/api/users')
.then(r => r.json())
.then(setUsers)
}, [])
}
```
**Correct (multiple instances share one request):**
```tsx
import useSWR from 'swr'
function UserList() {
const { data: users } = useSWR('/api/users', fetcher)
}
```
**For immutable data:**
```tsx
import { useImmutableSWR } from '@/lib/swr'
function StaticContent() {
const { data } = useImmutableSWR('/api/config', fetcher)
}
```
**For mutations:**
```tsx
import { useSWRMutation } from 'swr/mutation'
function UpdateButton() {
const { trigger } = useSWRMutation('/api/user', updateUser)
return <button onClick={() => trigger()}>Update</button>
}
```
Reference: [https://swr.vercel.app](https://swr.vercel.app)

View File

@@ -1,82 +0,0 @@
---
title: Batch DOM CSS Changes
impact: MEDIUM
impactDescription: reduces reflows/repaints
tags: javascript, dom, css, performance, reflow
---
## Batch DOM CSS Changes
Avoid changing styles one property at a time. Group multiple CSS changes together via classes or `cssText` to minimize browser reflows.
**Incorrect (multiple reflows):**
```typescript
function updateElementStyles(element: HTMLElement) {
// Each line triggers a reflow
element.style.width = '100px'
element.style.height = '200px'
element.style.backgroundColor = 'blue'
element.style.border = '1px solid black'
}
```
**Correct (add class - single reflow):**
```typescript
// CSS file
.highlighted-box {
width: 100px;
height: 200px;
background-color: blue;
border: 1px solid black;
}
// JavaScript
function updateElementStyles(element: HTMLElement) {
element.classList.add('highlighted-box')
}
```
**Correct (change cssText - single reflow):**
```typescript
function updateElementStyles(element: HTMLElement) {
element.style.cssText = `
width: 100px;
height: 200px;
background-color: blue;
border: 1px solid black;
`
}
```
**React example:**
```tsx
// Incorrect: changing styles one by one
function Box({ isHighlighted }: { isHighlighted: boolean }) {
const ref = useRef<HTMLDivElement>(null)
useEffect(() => {
if (ref.current && isHighlighted) {
ref.current.style.width = '100px'
ref.current.style.height = '200px'
ref.current.style.backgroundColor = 'blue'
}
}, [isHighlighted])
return <div ref={ref}>Content</div>
}
// Correct: toggle class
function Box({ isHighlighted }: { isHighlighted: boolean }) {
return (
<div className={isHighlighted ? 'highlighted-box' : ''}>
Content
</div>
)
}
```
Prefer CSS classes over inline styles when possible. Classes are cached by the browser and provide better separation of concerns.

View File

@@ -1,80 +0,0 @@
---
title: Cache Repeated Function Calls
impact: MEDIUM
impactDescription: avoid redundant computation
tags: javascript, cache, memoization, performance
---
## Cache Repeated Function Calls
Use a module-level Map to cache function results when the same function is called repeatedly with the same inputs during render.
**Incorrect (redundant computation):**
```typescript
function ProjectList({ projects }: { projects: Project[] }) {
return (
<div>
{projects.map(project => {
// slugify() called 100+ times for same project names
const slug = slugify(project.name)
return <ProjectCard key={project.id} slug={slug} />
})}
</div>
)
}
```
**Correct (cached results):**
```typescript
// Module-level cache
const slugifyCache = new Map<string, string>()
function cachedSlugify(text: string): string {
if (slugifyCache.has(text)) {
return slugifyCache.get(text)!
}
const result = slugify(text)
slugifyCache.set(text, result)
return result
}
function ProjectList({ projects }: { projects: Project[] }) {
return (
<div>
{projects.map(project => {
// Computed only once per unique project name
const slug = cachedSlugify(project.name)
return <ProjectCard key={project.id} slug={slug} />
})}
</div>
)
}
```
**Simpler pattern for single-value functions:**
```typescript
let isLoggedInCache: boolean | null = null
function isLoggedIn(): boolean {
if (isLoggedInCache !== null) {
return isLoggedInCache
}
isLoggedInCache = document.cookie.includes('auth=')
return isLoggedInCache
}
// Clear cache when auth changes
function onAuthChange() {
isLoggedInCache = null
}
```
Use a Map (not a hook) so it works everywhere: utilities, event handlers, not just React components.
Reference: [How we made the Vercel Dashboard twice as fast](https://vercel.com/blog/how-we-made-the-vercel-dashboard-twice-as-fast)

View File

@@ -1,28 +0,0 @@
---
title: Cache Property Access in Loops
impact: LOW-MEDIUM
impactDescription: reduces lookups
tags: javascript, loops, optimization, caching
---
## Cache Property Access in Loops
Cache object property lookups in hot paths.
**Incorrect (3 lookups × N iterations):**
```typescript
for (let i = 0; i < arr.length; i++) {
process(obj.config.settings.value)
}
```
**Correct (1 lookup total):**
```typescript
const value = obj.config.settings.value
const len = arr.length
for (let i = 0; i < len; i++) {
process(value)
}
```

View File

@@ -1,70 +0,0 @@
---
title: Cache Storage API Calls
impact: LOW-MEDIUM
impactDescription: reduces expensive I/O
tags: javascript, localStorage, storage, caching, performance
---
## Cache Storage API Calls
`localStorage`, `sessionStorage`, and `document.cookie` are synchronous and expensive. Cache reads in memory.
**Incorrect (reads storage on every call):**
```typescript
function getTheme() {
return localStorage.getItem('theme') ?? 'light'
}
// Called 10 times = 10 storage reads
```
**Correct (Map cache):**
```typescript
const storageCache = new Map<string, string | null>()
function getLocalStorage(key: string) {
if (!storageCache.has(key)) {
storageCache.set(key, localStorage.getItem(key))
}
return storageCache.get(key)
}
function setLocalStorage(key: string, value: string) {
localStorage.setItem(key, value)
storageCache.set(key, value) // keep cache in sync
}
```
Use a Map (not a hook) so it works everywhere: utilities, event handlers, not just React components.
**Cookie caching:**
```typescript
let cookieCache: Record<string, string> | null = null
function getCookie(name: string) {
if (!cookieCache) {
cookieCache = Object.fromEntries(
document.cookie.split('; ').map(c => c.split('='))
)
}
return cookieCache[name]
}
```
**Important (invalidate on external changes):**
If storage can change externally (another tab, server-set cookies), invalidate cache:
```typescript
window.addEventListener('storage', (e) => {
if (e.key) storageCache.delete(e.key)
})
document.addEventListener('visibilitychange', () => {
if (document.visibilityState === 'visible') {
storageCache.clear()
}
})
```

View File

@@ -1,32 +0,0 @@
---
title: Combine Multiple Array Iterations
impact: LOW-MEDIUM
impactDescription: reduces iterations
tags: javascript, arrays, loops, performance
---
## Combine Multiple Array Iterations
Multiple `.filter()` or `.map()` calls iterate the array multiple times. Combine into one loop.
**Incorrect (3 iterations):**
```typescript
const admins = users.filter(u => u.isAdmin)
const testers = users.filter(u => u.isTester)
const inactive = users.filter(u => !u.isActive)
```
**Correct (1 iteration):**
```typescript
const admins: User[] = []
const testers: User[] = []
const inactive: User[] = []
for (const user of users) {
if (user.isAdmin) admins.push(user)
if (user.isTester) testers.push(user)
if (!user.isActive) inactive.push(user)
}
```

View File

@@ -1,50 +0,0 @@
---
title: Early Return from Functions
impact: LOW-MEDIUM
impactDescription: avoids unnecessary computation
tags: javascript, functions, optimization, early-return
---
## Early Return from Functions
Return early when result is determined to skip unnecessary processing.
**Incorrect (processes all items even after finding answer):**
```typescript
function validateUsers(users: User[]) {
let hasError = false
let errorMessage = ''
for (const user of users) {
if (!user.email) {
hasError = true
errorMessage = 'Email required'
}
if (!user.name) {
hasError = true
errorMessage = 'Name required'
}
// Continues checking all users even after error found
}
return hasError ? { valid: false, error: errorMessage } : { valid: true }
}
```
**Correct (returns immediately on first error):**
```typescript
function validateUsers(users: User[]) {
for (const user of users) {
if (!user.email) {
return { valid: false, error: 'Email required' }
}
if (!user.name) {
return { valid: false, error: 'Name required' }
}
}
return { valid: true }
}
```

View File

@@ -1,45 +0,0 @@
---
title: Hoist RegExp Creation
impact: LOW-MEDIUM
impactDescription: avoids recreation
tags: javascript, regexp, optimization, memoization
---
## Hoist RegExp Creation
Don't create RegExp inside render. Hoist to module scope or memoize with `useMemo()`.
**Incorrect (new RegExp every render):**
```tsx
function Highlighter({ text, query }: Props) {
const regex = new RegExp(`(${query})`, 'gi')
const parts = text.split(regex)
return <>{parts.map((part, i) => ...)}</>
}
```
**Correct (memoize or hoist):**
```tsx
const EMAIL_REGEX = /^[^\s@]+@[^\s@]+\.[^\s@]+$/
function Highlighter({ text, query }: Props) {
const regex = useMemo(
() => new RegExp(`(${escapeRegex(query)})`, 'gi'),
[query]
)
const parts = text.split(regex)
return <>{parts.map((part, i) => ...)}</>
}
```
**Warning (global regex has mutable state):**
Global regex (`/g`) has mutable `lastIndex` state:
```typescript
const regex = /foo/g
regex.test('foo') // true, lastIndex = 3
regex.test('foo') // false, lastIndex = 0
```

View File

@@ -1,37 +0,0 @@
---
title: Build Index Maps for Repeated Lookups
impact: LOW-MEDIUM
impactDescription: 1M ops to 2K ops
tags: javascript, map, indexing, optimization, performance
---
## Build Index Maps for Repeated Lookups
Multiple `.find()` calls by the same key should use a Map.
**Incorrect (O(n) per lookup):**
```typescript
function processOrders(orders: Order[], users: User[]) {
return orders.map(order => ({
...order,
user: users.find(u => u.id === order.userId)
}))
}
```
**Correct (O(1) per lookup):**
```typescript
function processOrders(orders: Order[], users: User[]) {
const userById = new Map(users.map(u => [u.id, u]))
return orders.map(order => ({
...order,
user: userById.get(order.userId)
}))
}
```
Build map once (O(n)), then all lookups are O(1).
For 1000 orders × 1000 users: 1M ops → 2K ops.

View File

@@ -1,49 +0,0 @@
---
title: Early Length Check for Array Comparisons
impact: MEDIUM-HIGH
impactDescription: avoids expensive operations when lengths differ
tags: javascript, arrays, performance, optimization, comparison
---
## Early Length Check for Array Comparisons
When comparing arrays with expensive operations (sorting, deep equality, serialization), check lengths first. If lengths differ, the arrays cannot be equal.
In real-world applications, this optimization is especially valuable when the comparison runs in hot paths (event handlers, render loops).
**Incorrect (always runs expensive comparison):**
```typescript
function hasChanges(current: string[], original: string[]) {
// Always sorts and joins, even when lengths differ
return current.sort().join() !== original.sort().join()
}
```
Two O(n log n) sorts run even when `current.length` is 5 and `original.length` is 100. There is also overhead of joining the arrays and comparing the strings.
**Correct (O(1) length check first):**
```typescript
function hasChanges(current: string[], original: string[]) {
// Early return if lengths differ
if (current.length !== original.length) {
return true
}
// Only sort/join when lengths match
const currentSorted = current.toSorted()
const originalSorted = original.toSorted()
for (let i = 0; i < currentSorted.length; i++) {
if (currentSorted[i] !== originalSorted[i]) {
return true
}
}
return false
}
```
This new approach is more efficient because:
- It avoids the overhead of sorting and joining the arrays when lengths differ
- It avoids consuming memory for the joined strings (especially important for large arrays)
- It avoids mutating the original arrays
- It returns early when a difference is found

View File

@@ -1,82 +0,0 @@
---
title: Use Loop for Min/Max Instead of Sort
impact: LOW
impactDescription: O(n) instead of O(n log n)
tags: javascript, arrays, performance, sorting, algorithms
---
## Use Loop for Min/Max Instead of Sort
Finding the smallest or largest element only requires a single pass through the array. Sorting is wasteful and slower.
**Incorrect (O(n log n) - sort to find latest):**
```typescript
interface Project {
id: string
name: string
updatedAt: number
}
function getLatestProject(projects: Project[]) {
const sorted = [...projects].sort((a, b) => b.updatedAt - a.updatedAt)
return sorted[0]
}
```
Sorts the entire array just to find the maximum value.
**Incorrect (O(n log n) - sort for oldest and newest):**
```typescript
function getOldestAndNewest(projects: Project[]) {
const sorted = [...projects].sort((a, b) => a.updatedAt - b.updatedAt)
return { oldest: sorted[0], newest: sorted[sorted.length - 1] }
}
```
Still sorts unnecessarily when only min/max are needed.
**Correct (O(n) - single loop):**
```typescript
function getLatestProject(projects: Project[]) {
if (projects.length === 0) return null
let latest = projects[0]
for (let i = 1; i < projects.length; i++) {
if (projects[i].updatedAt > latest.updatedAt) {
latest = projects[i]
}
}
return latest
}
function getOldestAndNewest(projects: Project[]) {
if (projects.length === 0) return { oldest: null, newest: null }
let oldest = projects[0]
let newest = projects[0]
for (let i = 1; i < projects.length; i++) {
if (projects[i].updatedAt < oldest.updatedAt) oldest = projects[i]
if (projects[i].updatedAt > newest.updatedAt) newest = projects[i]
}
return { oldest, newest }
}
```
Single pass through the array, no copying, no sorting.
**Alternative (Math.min/Math.max for small arrays):**
```typescript
const numbers = [5, 2, 8, 1, 9]
const min = Math.min(...numbers)
const max = Math.max(...numbers)
```
This works for small arrays but can be slower for very large arrays due to spread operator limitations. Use the loop approach for reliability.

View File

@@ -1,24 +0,0 @@
---
title: Use Set/Map for O(1) Lookups
impact: LOW-MEDIUM
impactDescription: O(n) to O(1)
tags: javascript, set, map, data-structures, performance
---
## Use Set/Map for O(1) Lookups
Convert arrays to Set/Map for repeated membership checks.
**Incorrect (O(n) per check):**
```typescript
const allowedIds = ['a', 'b', 'c', ...]
items.filter(item => allowedIds.includes(item.id))
```
**Correct (O(1) per check):**
```typescript
const allowedIds = new Set(['a', 'b', 'c', ...])
items.filter(item => allowedIds.has(item.id))
```

View File

@@ -1,57 +0,0 @@
---
title: Use toSorted() Instead of sort() for Immutability
impact: MEDIUM-HIGH
impactDescription: prevents mutation bugs in React state
tags: javascript, arrays, immutability, react, state, mutation
---
## Use toSorted() Instead of sort() for Immutability
`.sort()` mutates the array in place, which can cause bugs with React state and props. Use `.toSorted()` to create a new sorted array without mutation.
**Incorrect (mutates original array):**
```typescript
function UserList({ users }: { users: User[] }) {
// Mutates the users prop array!
const sorted = useMemo(
() => users.sort((a, b) => a.name.localeCompare(b.name)),
[users]
)
return <div>{sorted.map(renderUser)}</div>
}
```
**Correct (creates new array):**
```typescript
function UserList({ users }: { users: User[] }) {
// Creates new sorted array, original unchanged
const sorted = useMemo(
() => users.toSorted((a, b) => a.name.localeCompare(b.name)),
[users]
)
return <div>{sorted.map(renderUser)}</div>
}
```
**Why this matters in React:**
1. Props/state mutations break React's immutability model - React expects props and state to be treated as read-only
2. Causes stale closure bugs - Mutating arrays inside closures (callbacks, effects) can lead to unexpected behavior
**Browser support (fallback for older browsers):**
`.toSorted()` is available in all modern browsers (Chrome 110+, Safari 16+, Firefox 115+, Node.js 20+). For older environments, use spread operator:
```typescript
// Fallback for older browsers
const sorted = [...items].sort((a, b) => a.value - b.value)
```
**Other immutable array methods:**
- `.toSorted()` - immutable sort
- `.toReversed()` - immutable reverse
- `.toSpliced()` - immutable splice
- `.with()` - immutable element replacement

View File

@@ -1,26 +0,0 @@
---
title: Use Activity Component for Show/Hide
impact: MEDIUM
impactDescription: preserves state/DOM
tags: rendering, activity, visibility, state-preservation
---
## Use Activity Component for Show/Hide
Use React's `<Activity>` to preserve state/DOM for expensive components that frequently toggle visibility.
**Usage:**
```tsx
import { Activity } from 'react'
function Dropdown({ isOpen }: Props) {
return (
<Activity mode={isOpen ? 'visible' : 'hidden'}>
<ExpensiveMenu />
</Activity>
)
}
```
Avoids expensive re-renders and state loss.

View File

@@ -1,47 +0,0 @@
---
title: Animate SVG Wrapper Instead of SVG Element
impact: LOW
impactDescription: enables hardware acceleration
tags: rendering, svg, css, animation, performance
---
## Animate SVG Wrapper Instead of SVG Element
Many browsers don't have hardware acceleration for CSS3 animations on SVG elements. Wrap SVG in a `<div>` and animate the wrapper instead.
**Incorrect (animating SVG directly - no hardware acceleration):**
```tsx
function LoadingSpinner() {
return (
<svg
className="animate-spin"
width="24"
height="24"
viewBox="0 0 24 24"
>
<circle cx="12" cy="12" r="10" stroke="currentColor" />
</svg>
)
}
```
**Correct (animating wrapper div - hardware accelerated):**
```tsx
function LoadingSpinner() {
return (
<div className="animate-spin">
<svg
width="24"
height="24"
viewBox="0 0 24 24"
>
<circle cx="12" cy="12" r="10" stroke="currentColor" />
</svg>
</div>
)
}
```
This applies to all CSS transforms and transitions (`transform`, `opacity`, `translate`, `scale`, `rotate`). The wrapper div allows browsers to use GPU acceleration for smoother animations.

View File

@@ -1,40 +0,0 @@
---
title: Use Explicit Conditional Rendering
impact: LOW
impactDescription: prevents rendering 0 or NaN
tags: rendering, conditional, jsx, falsy-values
---
## Use Explicit Conditional Rendering
Use explicit ternary operators (`? :`) instead of `&&` for conditional rendering when the condition can be `0`, `NaN`, or other falsy values that render.
**Incorrect (renders "0" when count is 0):**
```tsx
function Badge({ count }: { count: number }) {
return (
<div>
{count && <span className="badge">{count}</span>}
</div>
)
}
// When count = 0, renders: <div>0</div>
// When count = 5, renders: <div><span class="badge">5</span></div>
```
**Correct (renders nothing when count is 0):**
```tsx
function Badge({ count }: { count: number }) {
return (
<div>
{count > 0 ? <span className="badge">{count}</span> : null}
</div>
)
}
// When count = 0, renders: <div></div>
// When count = 5, renders: <div><span class="badge">5</span></div>
```

View File

@@ -1,38 +0,0 @@
---
title: CSS content-visibility for Long Lists
impact: HIGH
impactDescription: faster initial render
tags: rendering, css, content-visibility, long-lists
---
## CSS content-visibility for Long Lists
Apply `content-visibility: auto` to defer off-screen rendering.
**CSS:**
```css
.message-item {
content-visibility: auto;
contain-intrinsic-size: 0 80px;
}
```
**Example:**
```tsx
function MessageList({ messages }: { messages: Message[] }) {
return (
<div className="overflow-y-auto h-screen">
{messages.map(msg => (
<div key={msg.id} className="message-item">
<Avatar user={msg.author} />
<div>{msg.content}</div>
</div>
))}
</div>
)
}
```
For 1000 messages, browser skips layout/paint for ~990 off-screen items (10× faster initial render).

View File

@@ -1,46 +0,0 @@
---
title: Hoist Static JSX Elements
impact: LOW
impactDescription: avoids re-creation
tags: rendering, jsx, static, optimization
---
## Hoist Static JSX Elements
Extract static JSX outside components to avoid re-creation.
**Incorrect (recreates element every render):**
```tsx
function LoadingSkeleton() {
return <div className="animate-pulse h-20 bg-gray-200" />
}
function Container() {
return (
<div>
{loading && <LoadingSkeleton />}
</div>
)
}
```
**Correct (reuses same element):**
```tsx
const loadingSkeleton = (
<div className="animate-pulse h-20 bg-gray-200" />
)
function Container() {
return (
<div>
{loading && loadingSkeleton}
</div>
)
}
```
This is especially helpful for large and static SVG nodes, which can be expensive to recreate on every render.
**Note:** If your project has [React Compiler](https://react.dev/learn/react-compiler) enabled, the compiler automatically hoists static JSX elements and optimizes component re-renders, making manual hoisting unnecessary.

View File

@@ -1,82 +0,0 @@
---
title: Prevent Hydration Mismatch Without Flickering
impact: MEDIUM
impactDescription: avoids visual flicker and hydration errors
tags: rendering, ssr, hydration, localStorage, flicker
---
## Prevent Hydration Mismatch Without Flickering
When rendering content that depends on client-side storage (localStorage, cookies), avoid both SSR breakage and post-hydration flickering by injecting a synchronous script that updates the DOM before React hydrates.
**Incorrect (breaks SSR):**
```tsx
function ThemeWrapper({ children }: { children: ReactNode }) {
// localStorage is not available on server - throws error
const theme = localStorage.getItem('theme') || 'light'
return (
<div className={theme}>
{children}
</div>
)
}
```
Server-side rendering will fail because `localStorage` is undefined.
**Incorrect (visual flickering):**
```tsx
function ThemeWrapper({ children }: { children: ReactNode }) {
const [theme, setTheme] = useState('light')
useEffect(() => {
// Runs after hydration - causes visible flash
const stored = localStorage.getItem('theme')
if (stored) {
setTheme(stored)
}
}, [])
return (
<div className={theme}>
{children}
</div>
)
}
```
Component first renders with default value (`light`), then updates after hydration, causing a visible flash of incorrect content.
**Correct (no flicker, no hydration mismatch):**
```tsx
function ThemeWrapper({ children }: { children: ReactNode }) {
return (
<>
<div id="theme-wrapper">
{children}
</div>
<script
dangerouslySetInnerHTML={{
__html: `
(function() {
try {
var theme = localStorage.getItem('theme') || 'light';
var el = document.getElementById('theme-wrapper');
if (el) el.className = theme;
} catch (e) {}
})();
`,
}}
/>
</>
)
}
```
The inline script executes synchronously before showing the element, ensuring the DOM already has the correct value. No flickering, no hydration mismatch.
This pattern is especially useful for theme toggles, user preferences, authentication states, and any client-only data that should render immediately without flashing default values.

View File

@@ -1,28 +0,0 @@
---
title: Optimize SVG Precision
impact: LOW
impactDescription: reduces file size
tags: rendering, svg, optimization, svgo
---
## Optimize SVG Precision
Reduce SVG coordinate precision to decrease file size. The optimal precision depends on the viewBox size, but in general reducing precision should be considered.
**Incorrect (excessive precision):**
```svg
<path d="M 10.293847 20.847362 L 30.938472 40.192837" />
```
**Correct (1 decimal place):**
```svg
<path d="M 10.3 20.8 L 30.9 40.2" />
```
**Automate with SVGO:**
```bash
npx svgo --precision=1 --multipass icon.svg
```

View File

@@ -1,39 +0,0 @@
---
title: Defer State Reads to Usage Point
impact: MEDIUM
impactDescription: avoids unnecessary subscriptions
tags: rerender, searchParams, localStorage, optimization
---
## Defer State Reads to Usage Point
Don't subscribe to dynamic state (searchParams, localStorage) if you only read it inside callbacks.
**Incorrect (subscribes to all searchParams changes):**
```tsx
function ShareButton({ chatId }: { chatId: string }) {
const searchParams = useSearchParams()
const handleShare = () => {
const ref = searchParams.get('ref')
shareChat(chatId, { ref })
}
return <button onClick={handleShare}>Share</button>
}
```
**Correct (reads on demand, no subscription):**
```tsx
function ShareButton({ chatId }: { chatId: string }) {
const handleShare = () => {
const params = new URLSearchParams(window.location.search)
const ref = params.get('ref')
shareChat(chatId, { ref })
}
return <button onClick={handleShare}>Share</button>
}
```

View File

@@ -1,45 +0,0 @@
---
title: Narrow Effect Dependencies
impact: LOW
impactDescription: minimizes effect re-runs
tags: rerender, useEffect, dependencies, optimization
---
## Narrow Effect Dependencies
Specify primitive dependencies instead of objects to minimize effect re-runs.
**Incorrect (re-runs on any user field change):**
```tsx
useEffect(() => {
console.log(user.id)
}, [user])
```
**Correct (re-runs only when id changes):**
```tsx
useEffect(() => {
console.log(user.id)
}, [user.id])
```
**For derived state, compute outside effect:**
```tsx
// Incorrect: runs on width=767, 766, 765...
useEffect(() => {
if (width < 768) {
enableMobileMode()
}
}, [width])
// Correct: runs only on boolean transition
const isMobile = width < 768
useEffect(() => {
if (isMobile) {
enableMobileMode()
}
}, [isMobile])
```

View File

@@ -1,29 +0,0 @@
---
title: Subscribe to Derived State
impact: MEDIUM
impactDescription: reduces re-render frequency
tags: rerender, derived-state, media-query, optimization
---
## Subscribe to Derived State
Subscribe to derived boolean state instead of continuous values to reduce re-render frequency.
**Incorrect (re-renders on every pixel change):**
```tsx
function Sidebar() {
const width = useWindowWidth() // updates continuously
const isMobile = width < 768
return <nav className={isMobile ? 'mobile' : 'desktop'}>
}
```
**Correct (re-renders only when boolean changes):**
```tsx
function Sidebar() {
const isMobile = useMediaQuery('(max-width: 767px)')
return <nav className={isMobile ? 'mobile' : 'desktop'}>
}
```

View File

@@ -1,74 +0,0 @@
---
title: Use Functional setState Updates
impact: MEDIUM
impactDescription: prevents stale closures and unnecessary callback recreations
tags: react, hooks, useState, useCallback, callbacks, closures
---
## Use Functional setState Updates
When updating state based on the current state value, use the functional update form of setState instead of directly referencing the state variable. This prevents stale closures, eliminates unnecessary dependencies, and creates stable callback references.
**Incorrect (requires state as dependency):**
```tsx
function TodoList() {
const [items, setItems] = useState(initialItems)
// Callback must depend on items, recreated on every items change
const addItems = useCallback((newItems: Item[]) => {
setItems([...items, ...newItems])
}, [items]) // ❌ items dependency causes recreations
// Risk of stale closure if dependency is forgotten
const removeItem = useCallback((id: string) => {
setItems(items.filter(item => item.id !== id))
}, []) // ❌ Missing items dependency - will use stale items!
return <ItemsEditor items={items} onAdd={addItems} onRemove={removeItem} />
}
```
The first callback is recreated every time `items` changes, which can cause child components to re-render unnecessarily. The second callback has a stale closure bug—it will always reference the initial `items` value.
**Correct (stable callbacks, no stale closures):**
```tsx
function TodoList() {
const [items, setItems] = useState(initialItems)
// Stable callback, never recreated
const addItems = useCallback((newItems: Item[]) => {
setItems(curr => [...curr, ...newItems])
}, []) // ✅ No dependencies needed
// Always uses latest state, no stale closure risk
const removeItem = useCallback((id: string) => {
setItems(curr => curr.filter(item => item.id !== id))
}, []) // ✅ Safe and stable
return <ItemsEditor items={items} onAdd={addItems} onRemove={removeItem} />
}
```
**Benefits:**
1. **Stable callback references** - Callbacks don't need to be recreated when state changes
2. **No stale closures** - Always operates on the latest state value
3. **Fewer dependencies** - Simplifies dependency arrays and reduces memory leaks
4. **Prevents bugs** - Eliminates the most common source of React closure bugs
**When to use functional updates:**
- Any setState that depends on the current state value
- Inside useCallback/useMemo when state is needed
- Event handlers that reference state
- Async operations that update state
**When direct updates are fine:**
- Setting state to a static value: `setCount(0)`
- Setting state from props/arguments only: `setName(newName)`
- State doesn't depend on previous value
**Note:** If your project has [React Compiler](https://react.dev/learn/react-compiler) enabled, the compiler can automatically optimize some cases, but functional updates are still recommended for correctness and to prevent stale closure bugs.

View File

@@ -1,58 +0,0 @@
---
title: Use Lazy State Initialization
impact: MEDIUM
impactDescription: wasted computation on every render
tags: react, hooks, useState, performance, initialization
---
## Use Lazy State Initialization
Pass a function to `useState` for expensive initial values. Without the function form, the initializer runs on every render even though the value is only used once.
**Incorrect (runs on every render):**
```tsx
function FilteredList({ items }: { items: Item[] }) {
// buildSearchIndex() runs on EVERY render, even after initialization
const [searchIndex, setSearchIndex] = useState(buildSearchIndex(items))
const [query, setQuery] = useState('')
// When query changes, buildSearchIndex runs again unnecessarily
return <SearchResults index={searchIndex} query={query} />
}
function UserProfile() {
// JSON.parse runs on every render
const [settings, setSettings] = useState(
JSON.parse(localStorage.getItem('settings') || '{}')
)
return <SettingsForm settings={settings} onChange={setSettings} />
}
```
**Correct (runs only once):**
```tsx
function FilteredList({ items }: { items: Item[] }) {
// buildSearchIndex() runs ONLY on initial render
const [searchIndex, setSearchIndex] = useState(() => buildSearchIndex(items))
const [query, setQuery] = useState('')
return <SearchResults index={searchIndex} query={query} />
}
function UserProfile() {
// JSON.parse runs only on initial render
const [settings, setSettings] = useState(() => {
const stored = localStorage.getItem('settings')
return stored ? JSON.parse(stored) : {}
})
return <SettingsForm settings={settings} onChange={setSettings} />
}
```
Use lazy initialization when computing initial values from localStorage/sessionStorage, building data structures (indexes, maps), reading from the DOM, or performing heavy transformations.
For simple primitives (`useState(0)`), direct references (`useState(props.value)`), or cheap literals (`useState({})`), the function form is unnecessary.

View File

@@ -1,44 +0,0 @@
---
title: Extract to Memoized Components
impact: MEDIUM
impactDescription: enables early returns
tags: rerender, memo, useMemo, optimization
---
## Extract to Memoized Components
Extract expensive work into memoized components to enable early returns before computation.
**Incorrect (computes avatar even when loading):**
```tsx
function Profile({ user, loading }: Props) {
const avatar = useMemo(() => {
const id = computeAvatarId(user)
return <Avatar id={id} />
}, [user])
if (loading) return <Skeleton />
return <div>{avatar}</div>
}
```
**Correct (skips computation when loading):**
```tsx
const UserAvatar = memo(function UserAvatar({ user }: { user: User }) {
const id = useMemo(() => computeAvatarId(user), [user])
return <Avatar id={id} />
})
function Profile({ user, loading }: Props) {
if (loading) return <Skeleton />
return (
<div>
<UserAvatar user={user} />
</div>
)
}
```
**Note:** If your project has [React Compiler](https://react.dev/learn/react-compiler) enabled, manual memoization with `memo()` and `useMemo()` is not necessary. The compiler automatically optimizes re-renders.

View File

@@ -1,40 +0,0 @@
---
title: Use Transitions for Non-Urgent Updates
impact: MEDIUM
impactDescription: maintains UI responsiveness
tags: rerender, transitions, startTransition, performance
---
## Use Transitions for Non-Urgent Updates
Mark frequent, non-urgent state updates as transitions to maintain UI responsiveness.
**Incorrect (blocks UI on every scroll):**
```tsx
function ScrollTracker() {
const [scrollY, setScrollY] = useState(0)
useEffect(() => {
const handler = () => setScrollY(window.scrollY)
window.addEventListener('scroll', handler, { passive: true })
return () => window.removeEventListener('scroll', handler)
}, [])
}
```
**Correct (non-blocking updates):**
```tsx
import { startTransition } from 'react'
function ScrollTracker() {
const [scrollY, setScrollY] = useState(0)
useEffect(() => {
const handler = () => {
startTransition(() => setScrollY(window.scrollY))
}
window.addEventListener('scroll', handler, { passive: true })
return () => window.removeEventListener('scroll', handler)
}, [])
}
```

View File

@@ -1,73 +0,0 @@
---
title: Use after() for Non-Blocking Operations
impact: MEDIUM
impactDescription: faster response times
tags: server, async, logging, analytics, side-effects
---
## Use after() for Non-Blocking Operations
Use Next.js's `after()` to schedule work that should execute after a response is sent. This prevents logging, analytics, and other side effects from blocking the response.
**Incorrect (blocks response):**
```tsx
import { logUserAction } from '@/app/utils'
export async function POST(request: Request) {
// Perform mutation
await updateDatabase(request)
// Logging blocks the response
const userAgent = request.headers.get('user-agent') || 'unknown'
await logUserAction({ userAgent })
return new Response(JSON.stringify({ status: 'success' }), {
status: 200,
headers: { 'Content-Type': 'application/json' }
})
}
```
**Correct (non-blocking):**
```tsx
import { after } from 'next/server'
import { headers, cookies } from 'next/headers'
import { logUserAction } from '@/app/utils'
export async function POST(request: Request) {
// Perform mutation
await updateDatabase(request)
// Log after response is sent
after(async () => {
const userAgent = (await headers()).get('user-agent') || 'unknown'
const sessionCookie = (await cookies()).get('session-id')?.value || 'anonymous'
logUserAction({ sessionCookie, userAgent })
})
return new Response(JSON.stringify({ status: 'success' }), {
status: 200,
headers: { 'Content-Type': 'application/json' }
})
}
```
The response is sent immediately while logging happens in the background.
**Common use cases:**
- Analytics tracking
- Audit logging
- Sending notifications
- Cache invalidation
- Cleanup tasks
**Important notes:**
- `after()` runs even if the response fails or redirects
- Works in Server Actions, Route Handlers, and Server Components
Reference: [https://nextjs.org/docs/app/api-reference/functions/after](https://nextjs.org/docs/app/api-reference/functions/after)

View File

@@ -1,41 +0,0 @@
---
title: Cross-Request LRU Caching
impact: HIGH
impactDescription: caches across requests
tags: server, cache, lru, cross-request
---
## Cross-Request LRU Caching
`React.cache()` only works within one request. For data shared across sequential requests (user clicks button A then button B), use an LRU cache.
**Implementation:**
```typescript
import { LRUCache } from 'lru-cache'
const cache = new LRUCache<string, any>({
max: 1000,
ttl: 5 * 60 * 1000 // 5 minutes
})
export async function getUser(id: string) {
const cached = cache.get(id)
if (cached) return cached
const user = await db.user.findUnique({ where: { id } })
cache.set(id, user)
return user
}
// Request 1: DB query, result cached
// Request 2: cache hit, no DB query
```
Use when sequential user actions hit multiple endpoints needing the same data within seconds.
**With Vercel's [Fluid Compute](https://vercel.com/docs/fluid-compute):** LRU caching is especially effective because multiple concurrent requests can share the same function instance and cache. This means the cache persists across requests without needing external storage like Redis.
**In traditional serverless:** Each invocation runs in isolation, so consider Redis for cross-process caching.
Reference: [https://github.com/isaacs/node-lru-cache](https://github.com/isaacs/node-lru-cache)

View File

@@ -1,26 +0,0 @@
---
title: Per-Request Deduplication with React.cache()
impact: MEDIUM
impactDescription: deduplicates within request
tags: server, cache, react-cache, deduplication
---
## Per-Request Deduplication with React.cache()
Use `React.cache()` for server-side request deduplication. Authentication and database queries benefit most.
**Usage:**
```typescript
import { cache } from 'react'
export const getCurrentUser = cache(async () => {
const session = await auth()
if (!session?.user?.id) return null
return await db.user.findUnique({
where: { id: session.user.id }
})
})
```
Within a single request, multiple calls to `getCurrentUser()` execute the query only once.

View File

@@ -1,79 +0,0 @@
---
title: Parallel Data Fetching with Component Composition
impact: CRITICAL
impactDescription: eliminates server-side waterfalls
tags: server, rsc, parallel-fetching, composition
---
## Parallel Data Fetching with Component Composition
React Server Components execute sequentially within a tree. Restructure with composition to parallelize data fetching.
**Incorrect (Sidebar waits for Page's fetch to complete):**
```tsx
export default async function Page() {
const header = await fetchHeader()
return (
<div>
<div>{header}</div>
<Sidebar />
</div>
)
}
async function Sidebar() {
const items = await fetchSidebarItems()
return <nav>{items.map(renderItem)}</nav>
}
```
**Correct (both fetch simultaneously):**
```tsx
async function Header() {
const data = await fetchHeader()
return <div>{data}</div>
}
async function Sidebar() {
const items = await fetchSidebarItems()
return <nav>{items.map(renderItem)}</nav>
}
export default function Page() {
return (
<div>
<Header />
<Sidebar />
</div>
)
}
```
**Alternative with children prop:**
```tsx
async function Layout({ children }: { children: ReactNode }) {
const header = await fetchHeader()
return (
<div>
<div>{header}</div>
{children}
</div>
)
}
async function Sidebar() {
const items = await fetchSidebarItems()
return <nav>{items.map(renderItem)}</nav>
}
export default function Page() {
return (
<Layout>
<Sidebar />
</Layout>
)
}
```

View File

@@ -1,38 +0,0 @@
---
title: Minimize Serialization at RSC Boundaries
impact: HIGH
impactDescription: reduces data transfer size
tags: server, rsc, serialization, props
---
## Minimize Serialization at RSC Boundaries
The React Server/Client boundary serializes all object properties into strings and embeds them in the HTML response and subsequent RSC requests. This serialized data directly impacts page weight and load time, so **size matters a lot**. Only pass fields that the client actually uses.
**Incorrect (serializes all 50 fields):**
```tsx
async function Page() {
const user = await fetchUser() // 50 fields
return <Profile user={user} />
}
'use client'
function Profile({ user }: { user: User }) {
return <div>{user.name}</div> // uses 1 field
}
```
**Correct (serializes only 1 field):**
```tsx
async function Page() {
const user = await fetchUser()
return <Profile name={user.name} />
}
'use client'
function Profile({ name }: { name: string }) {
return <div>{name}</div>
}
```

View File

@@ -1,9 +1,6 @@
# Ignore everything by default, selectively add things to context
*
# Documentation (for embeddings/search)
!docs/
# Platform - Libs
!autogpt_platform/autogpt_libs/autogpt_libs/
!autogpt_platform/autogpt_libs/pyproject.toml
@@ -19,7 +16,6 @@
!autogpt_platform/backend/poetry.lock
!autogpt_platform/backend/README.md
!autogpt_platform/backend/.env
!autogpt_platform/backend/gen_prisma_types_stub.py
# Platform - Market
!autogpt_platform/market/market/

View File

@@ -74,13 +74,13 @@ jobs:
- name: Generate Prisma Client
working-directory: autogpt_platform/backend
run: poetry run prisma generate && poetry run gen-prisma-stub
run: poetry run prisma generate
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "22"
node-version: "21"
- name: Enable corepack
run: corepack enable

View File

@@ -44,12 +44,6 @@ jobs:
with:
fetch-depth: 1
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@v1.3.1
with:
large-packages: false # slow
docker-images: false # limited benefit
# Backend Python/Poetry setup (mirrors platform-backend-ci.yml)
- name: Set up Python
uses: actions/setup-python@v5
@@ -90,13 +84,13 @@ jobs:
- name: Generate Prisma Client
working-directory: autogpt_platform/backend
run: poetry run prisma generate && poetry run gen-prisma-stub
run: poetry run prisma generate
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "22"
node-version: "21"
- name: Enable corepack
run: corepack enable

View File

@@ -72,13 +72,13 @@ jobs:
- name: Generate Prisma Client
working-directory: autogpt_platform/backend
run: poetry run prisma generate && poetry run gen-prisma-stub
run: poetry run prisma generate
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "22"
node-version: "21"
- name: Enable corepack
run: corepack enable
@@ -108,16 +108,6 @@ jobs:
# run: pnpm playwright install --with-deps chromium
# Docker setup for development environment
- name: Free up disk space
run: |
# Remove large unused tools to free disk space for Docker builds
sudo rm -rf /usr/share/dotnet
sudo rm -rf /usr/local/lib/android
sudo rm -rf /opt/ghc
sudo rm -rf /opt/hostedtoolcache/CodeQL
sudo docker system prune -af
df -h
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
@@ -309,4 +299,4 @@ jobs:
echo "✅ AutoGPT Platform development environment setup complete!"
echo "🚀 Ready for development with Docker services running"
echo "📝 Backend server: poetry run serve (port 8000)"
echo "🌐 Frontend server: pnpm dev (port 3000)"
echo "🌐 Frontend server: pnpm dev (port 3000)"

View File

@@ -134,7 +134,7 @@ jobs:
run: poetry install
- name: Generate Prisma Client
run: poetry run prisma generate && poetry run gen-prisma-stub
run: poetry run prisma generate
- id: supabase
name: Start Supabase
@@ -176,7 +176,7 @@ jobs:
}
- name: Run Database Migrations
run: poetry run prisma migrate deploy
run: poetry run prisma migrate dev --name updates
env:
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
DIRECT_URL: ${{ steps.supabase.outputs.DB_URL }}

View File

@@ -11,11 +11,6 @@ on:
- ".github/workflows/platform-frontend-ci.yml"
- "autogpt_platform/frontend/**"
merge_group:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event_name == 'merge_group' && format('merge-queue-{0}', github.ref) || format('{0}-{1}', github.ref, github.event.pull_request.number || github.sha) }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
defaults:
run:
@@ -152,14 +147,6 @@ jobs:
run: |
cp ../.env.default ../.env
- name: Copy backend .env and set OpenAI API key
run: |
cp ../backend/.env.default ../backend/.env
echo "OPENAI_INTERNAL_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> ../backend/.env
env:
# Used by E2E test data script to generate embeddings for approved store agents
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
@@ -235,25 +222,13 @@ jobs:
- name: Run Playwright tests
run: pnpm test:no-build
continue-on-error: false
- name: Upload Playwright report
if: always()
- name: Upload Playwright artifacts
if: failure()
uses: actions/upload-artifact@v4
with:
name: playwright-report
path: playwright-report
if-no-files-found: ignore
retention-days: 3
- name: Upload Playwright test results
if: always()
uses: actions/upload-artifact@v4
with:
name: playwright-test-results
path: test-results
if-no-files-found: ignore
retention-days: 3
- name: Print Final Docker Compose logs
if: always()

View File

@@ -12,10 +12,6 @@ on:
- "autogpt_platform/**"
merge_group:
concurrency:
group: ${{ github.workflow }}-${{ github.event_name == 'merge_group' && format('merge-queue-{0}', github.ref) || github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
defaults:
run:
shell: bash

View File

@@ -11,7 +11,7 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v10
- uses: actions/stale@v9
with:
# operations-per-run: 5000
stale-issue-message: >

View File

@@ -61,6 +61,6 @@ jobs:
pull-requests: write
runs-on: ubuntu-latest
steps:
- uses: actions/labeler@v6
- uses: actions/labeler@v5
with:
sync-labels: true

View File

@@ -1,4 +1,4 @@
.PHONY: start-core stop-core logs-core format lint migrate run-backend run-frontend load-store-agents
.PHONY: start-core stop-core logs-core format lint migrate run-backend run-frontend
# Run just Supabase + Redis + RabbitMQ
start-core:
@@ -6,14 +6,12 @@ start-core:
# Stop core services
stop-core:
docker compose stop
docker compose stop deps
reset-db:
docker compose stop db
rm -rf db/docker/volumes/db/data
cd backend && poetry run prisma migrate deploy
cd backend && poetry run prisma generate
cd backend && poetry run gen-prisma-stub
# View logs for core services
logs-core:
@@ -35,7 +33,6 @@ init-env:
migrate:
cd backend && poetry run prisma migrate deploy
cd backend && poetry run prisma generate
cd backend && poetry run gen-prisma-stub
run-backend:
cd backend && poetry run app
@@ -45,10 +42,7 @@ run-frontend:
test-data:
cd backend && poetry run python test/test_data_creator.py
load-store-agents:
cd backend && poetry run load-store-agents
help:
@echo "Usage: make <target>"
@echo "Targets:"
@@ -60,5 +54,4 @@ help:
@echo " migrate - Run backend database migrations"
@echo " run-backend - Run the backend FastAPI server"
@echo " run-frontend - Run the frontend Next.js development server"
@echo " test-data - Run the test data creator"
@echo " load-store-agents - Load store agents from agents/ folder into test database"
@echo " test-data - Run the test data creator"

View File

@@ -57,9 +57,6 @@ class APIKeySmith:
def hash_key(self, raw_key: str) -> tuple[str, str]:
"""Migrate a legacy hash to secure hash format."""
if not raw_key.startswith(self.PREFIX):
raise ValueError("Key without 'agpt_' prefix would fail validation")
salt = self._generate_salt()
hash = self._hash_key_with_salt(raw_key, salt)
return hash, salt.hex()

View File

@@ -1,25 +1,29 @@
from fastapi import FastAPI
from fastapi.openapi.utils import get_openapi
from .jwt_utils import bearer_jwt_auth
def add_auth_responses_to_openapi(app: FastAPI) -> None:
"""
Patch a FastAPI instance's `openapi()` method to add 401 responses
Set up custom OpenAPI schema generation that adds 401 responses
to all authenticated endpoints.
This is needed when using HTTPBearer with auto_error=False to get proper
401 responses instead of 403, but FastAPI only automatically adds security
responses when auto_error=True.
"""
# Wrap current method to allow stacking OpenAPI schema modifiers like this
wrapped_openapi = app.openapi
def custom_openapi():
if app.openapi_schema:
return app.openapi_schema
openapi_schema = wrapped_openapi()
openapi_schema = get_openapi(
title=app.title,
version=app.version,
description=app.description,
routes=app.routes,
)
# Add 401 response to all endpoints that have security requirements
for path, methods in openapi_schema["paths"].items():

View File

@@ -17,6 +17,41 @@ DATABASE_URL="postgresql://${DB_USER}:${DB_PASS}@${DB_HOST}:${DB_PORT}/${DB_NAME
DIRECT_URL="postgresql://${DB_USER}:${DB_PASS}@${DB_HOST}:${DB_PORT}/${DB_NAME}?schema=${DB_SCHEMA}&connect_timeout=${DB_CONNECT_TIMEOUT}"
PRISMA_SCHEMA="postgres/schema.prisma"
# SQLAlchemy Configuration (for gradual migration from Prisma)
# Set to true to enable SQLAlchemy alongside Prisma (both ORMs coexist during migration)
ENABLE_SQLALCHEMY=false
# Connection Pool Configuration
# IMPORTANT: With 6 backend processes, total connections = 6 × (POOL_SIZE + MAX_OVERFLOW)
# Must stay under PostgreSQL max_connections (default: 100)
#
# Environment-specific recommendations:
# Development: POOL_SIZE=2-3, MAX_OVERFLOW=1-2 (lightweight, fast startup)
# Test/CI: POOL_SIZE=2, MAX_OVERFLOW=1 (minimal resources, parallel test safety)
# Production: POOL_SIZE=10-20, MAX_OVERFLOW=5-10 (handle real traffic and bursts)
#
# Default values below are suitable for production use:
SQLALCHEMY_POOL_SIZE=10
SQLALCHEMY_MAX_OVERFLOW=5
# Timeout Configuration
# POOL_TIMEOUT: How long to wait for an available connection from the pool (when all connections busy)
# CONNECT_TIMEOUT: How long to wait when establishing a NEW connection to PostgreSQL
#
# Environment-specific recommendations:
# Development: POOL_TIMEOUT=10-30s, CONNECT_TIMEOUT=5-10s
# Test/CI: POOL_TIMEOUT=5-10s, CONNECT_TIMEOUT=5-10s (fail fast)
# Production: POOL_TIMEOUT=30s, CONNECT_TIMEOUT=10-15s
#
# Default values below are suitable for production use:
SQLALCHEMY_POOL_TIMEOUT=30
SQLALCHEMY_CONNECT_TIMEOUT=10
# SQL Query Logging
# Set to true to log ALL SQL statements (very verbose, useful for debugging)
# Should always be false in production
SQLALCHEMY_ECHO=false
## ===== REQUIRED SERVICE CREDENTIALS ===== ##
# Redis Configuration
REDIS_HOST=localhost
@@ -58,13 +93,6 @@ V0_API_KEY=
OPEN_ROUTER_API_KEY=
NVIDIA_API_KEY=
# Langfuse Prompt Management
# Used for managing the CoPilot system prompt externally
# Get credentials from https://cloud.langfuse.com or your self-hosted instance
LANGFUSE_PUBLIC_KEY=
LANGFUSE_SECRET_KEY=
LANGFUSE_HOST=https://cloud.langfuse.com
# OAuth Credentials
# For the OAuth callback URL, use <your_frontend_url>/auth/integrations/oauth_callback,
# e.g. http://localhost:3000/auth/integrations/oauth_callback

View File

@@ -18,4 +18,3 @@ load-tests/results/
load-tests/*.json
load-tests/*.log
load-tests/node_modules/*
migrations/*/rollback*.sql

View File

@@ -48,8 +48,7 @@ RUN poetry install --no-ansi --no-root
# Generate Prisma client
COPY autogpt_platform/backend/schema.prisma ./
COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/partial_types.py
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
RUN poetry run prisma generate && poetry run gen-prisma-stub
RUN poetry run prisma generate
FROM debian:13-slim AS server_dependencies
@@ -100,7 +99,6 @@ COPY autogpt_platform/backend/migrations /app/autogpt_platform/backend/migration
FROM server_dependencies AS server
COPY autogpt_platform/backend /app/autogpt_platform/backend
COPY docs /app/docs
RUN poetry install --no-ansi --only-root
ENV PORT=8000

View File

@@ -108,7 +108,7 @@ import fastapi.testclient
import pytest
from pytest_snapshot.plugin import Snapshot
from backend.api.features.myroute import router
from backend.server.v2.myroute import router
app = fastapi.FastAPI()
app.include_router(router)
@@ -149,7 +149,7 @@ These provide the easiest way to set up authentication mocking in test modules:
import fastapi
import fastapi.testclient
import pytest
from backend.api.features.myroute import router
from backend.server.v2.myroute import router
app = fastapi.FastAPI()
app.include_router(router)

View File

@@ -1,242 +0,0 @@
listing_id,storeListingVersionId,slug,agent_name,agent_video,agent_image,featured,sub_heading,description,categories,useForOnboarding,is_available
6e60a900-9d7d-490e-9af2-a194827ed632,d85882b8-633f-44ce-a315-c20a8c123d19,flux-ai-image-generator,Flux AI Image Generator,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/ca154dd1-140e-454c-91bd-2d8a00de3f08.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/577d995d-bc38-40a9-a23f-1f30f5774bdb.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/415db1b7-115c-43ab-bd6c-4e9f7ef95be1.jpg""]",false,Transform ideas into breathtaking images,"Transform ideas into breathtaking images with this AI-powered Image Generator. Using cutting-edge Flux AI technology, the tool crafts highly detailed, photorealistic visuals from simple text prompts. Perfect for artists, marketers, and content creators, this generator produces unique images tailored to user specifications. From fantastical scenes to lifelike portraits, users can unleash creativity with professional-quality results in seconds. Easy to use and endlessly versatile, bring imagination to life with the AI Image Generator today!","[""creative""]",false,true
f11fc6e9-6166-4676-ac5d-f07127b270c1,c775f60d-b99f-418b-8fe0-53172258c3ce,youtube-transcription-scraper,YouTube Transcription Scraper,https://youtu.be/H8S3pU68lGE,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/65bce54b-0124-4b0d-9e3e-f9b89d0dc99e.jpg""]",false,Fetch the transcriptions from the most popular YouTube videos in your chosen topic,"Effortlessly gather transcriptions from multiple YouTube videos with this agent. It scrapes and compiles video transcripts into a clean, organized list, making it easy to extract insights, quotes, or content from various sources in one go. Ideal for researchers, content creators, and marketers looking to quickly analyze or repurpose video content.","[""writing""]",false,true
17908889-b599-4010-8e4f-bed19b8f3446,6e16e65a-ad34-4108-b4fd-4a23fced5ea2,business-ownerceo-finder,Decision Maker Lead Finder,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/1020d94e-b6a2-4fa7-bbdf-2c218b0de563.jpg""]",false,Contact CEOs today,"Find the key decision-makers you need, fast.
This agent identifies business owners or CEOs of local companies in any area you choose. Simply enter what kind of businesses youre looking for and where, and it will:
* Search the area and gather public information
* Return names, roles, and contact details when available
* Provide smart Google search suggestions if details arent found
Perfect for:
* B2B sales teams seeking verified leads
* Recruiters sourcing local talent
* Researchers looking to connect with business leaders
Save hours of manual searching and get straight to the people who matter most.","[""business""]",true,true
72beca1d-45ea-4403-a7ce-e2af168ee428,415b7352-0dc6-4214-9d87-0ad3751b711d,smart-meeting-brief,Smart Meeting Prep,https://youtu.be/9ydZR2hkxaY,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/2f116ce1-63ae-4d39-a5cd-f514defc2b97.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/0a71a60a-2263-4f12-9836-9c76ab49f155.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/95327695-9184-403c-907a-a9d3bdafa6a5.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/2bc77788-790b-47d4-8a61-ce97b695e9f5.png""]",true,Business meeting briefings delivered daily,"Never walk into a meeting unprepared again. Every day at 4 pm, the Smart Meeting Prep Agent scans your calendar for tomorrow's external meetings. It reviews your past email exchanges, researches each participant's background and role, and compiles the insights into a concise briefing, so you can close your workday ready for tomorrow's calls.
How It Works
1. At 4 pm, the agent scans your calendar and identifies external meetings scheduled for the next day.
2. It reviews recent email threads with each participant to surface key relationship history and communication context.
3. It conducts online research to gather publicly available information on roles, company backgrounds, and relevant professional data.
4. It produces a unified briefing for each participant, including past exchange highlights, profile notes, and strategic conversation points.","[""personal""]",true,true
9fa5697a-617b-4fae-aea0-7dbbed279976,b8ceb480-a7a2-4c90-8513-181a49f7071f,automated-support-ai,Automated Support Agent,https://youtu.be/nBMfu_5sgDA,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/ed56febc-2205-4179-9e7e-505d8500b66c.png""]",true,Automate up to 80 percent of inbound support emails,"Overview:
Support teams spend countless hours on basic tickets. This agent automates repetitive customer support tasks. It reads incoming requests, researches your knowledge base, and responds automatically when confident. When unsure, it escalates to a human for final resolution.
How it Works:
New support emails are routed to the agent.
The agent checks internal documentation for answers.
It measures confidence in the answer found and either replies directly or escalates to a human.
Business Value:
Automating the easy 80 percent of support tickets allows your team to focus on high-value, complex customer issues, improving efficiency and response times.","[""business""]",false,true
2bdac92b-a12c-4131-bb46-0e3b89f61413,31daf49d-31d3-476b-aa4c-099abc59b458,unspirational-poster-maker,Unspirational Poster Maker,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/6a490dac-27e5-405f-a4c4-8d1c55b85060.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/d343fbb5-478c-4e38-94df-4337293b61f1.jpg""]",false,Because adulting is hard,"This witty AI agent generates hilariously relatable ""motivational"" posters that tackle the everyday struggles of procrastination, overthinking, and workplace chaos with a blend of absurdity and sarcasm. From goldfish facing impossible tasks to cats in existential crises, The Unspirational Poster Maker designs tongue-in-cheek graphics and captions that mock productivity clichés and embrace our collective struggles to ""get it together."" Perfect for adding a touch of humour to the workday, these posters remind us that sometimes, all we can do is laugh at the chaos.","[""creative""]",false,true
9adf005e-2854-4cc7-98cf-f7103b92a7b7,a03b0d8c-4751-43d6-a54e-c3b7856ba4e3,ai-shortform-video-generator-create-viral-ready-content,AI Video Generator,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/8d2670b9-fea5-4966-a597-0a4511bffdc3.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/aabe8aec-0110-4ce7-a259-4f86fe8fe07d.png""]",false,Create Viral-Ready Shorts Content in Seconds,"OVERVIEW
Transform any trending headline or broad topic into a polished, vertical short-form video in a single run.
The agent automates research, scriptwriting, metadata creation, and Revid.ai rendering, returning one ready-to-publish MP4 plus its title, script and hashtags.
HOW IT WORKS
1. Input a topic or an exact news headline.
2. The agent fetches live search results and selects the most engaging related story.
3. Key facts are summarised into concise research notes.
4. Claude writes a 3035 second script with visual cues, a three-second hook, tension loops, and a call-to-action.
5. GPT-4o generates an eye-catching title and one or two discoverability hashtags.
6. The script is sent to a state-of-the-art AI video generator to render a single 9:16 MP4 (default: 720 p, 30 fps, voice “Brian”, style “movingImage”, music “Bladerunner 2049”).
All voice, style and resolution settings can be adjusted in the Builder before you press ""Run"".
7. Output delivered: Title, Script, Hashtags, Video URL.
KEY USE CASES
- Broad-topic explainers (e.g. “Artificial Intelligence” or “Climate Tech”).
- Real-time newsjacking with a specific breaking headline.
- Product-launch spotlights and quick event recaps while interest is high.
BUSINESS VALUE
- One-click speed: from idea to finished video in minutes.
- Consistent brand look: Revid presets keep voice, style and aspect ratio on spec.
- No-code workflow: marketers create social video without design or development queues.
- Cloud convenience: Auto-GPT Cloud users are pre-configured with all required keys.
Self-hosted users simply add OpenAI, Anthropic, Perplexity (OpenRouter/Jina) and Revid keys once.
IMPORTANT NOTES
- The agent outputs exactly one video per execution. Run it again for additional shorts.
- Video rendering time varies; AI-generated footage may take several minutes.","[""writing""]",false,true
864e48ef-fee5-42c1-b6a4-2ae139db9fc1,55d40473-0f31-4ada-9e40-d3a7139fcbd4,automated-blog-writer,Automated SEO Blog Writer,https://youtu.be/nKcDCbDVobs,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/2dd5f95b-5b30-4bf8-a11b-bac776c5141a.jpg""]",true,"Automate research, writing, and publishing for high-ranking blog posts","Scale your blog with a fully automated content engine. The Automated SEO Blog Writer learns your brand voice, finds high-demand keywords, and creates SEO-optimized articles that attract organic traffic and boost visibility.
How it works:
1. Share your pitch, website, and values.
2. The agent studies your site and uncovers proven SEO opportunities.
3. It spends two hours researching and drafting each post.
4. You set the cadence—publishing runs on autopilot.
Business value: Consistently publish research-backed, optimized posts that build domain authority, rankings, and thought leadership while you focus on what matters most.
Use cases:
• Founders: Keep your blog active with no time drain.
• Agencies: Deliver scalable SEO content for clients.
• Strategists: Automate execution, focus on strategy.
• Marketers: Drive steady organic growth.
• Local businesses: Capture nearby search traffic.","[""writing""]",false,true
6046f42e-eb84-406f-bae0-8e052064a4fa,a548e507-09a7-4b30-909c-f63fcda10fff,lead-finder-local-businesses,Lead Finder,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/abd6605f-d5f8-426b-af36-052e8ba5044f.webp""]",false,Auto-Prospect Like a Pro,"Turbo-charge your local lead generation with the AutoGPT Marketplaces top Google Maps prospecting agent. “Lead Finder: Local Businesses” delivers verified, ready-to-contact prospects in any niche and city—so you can focus on closing, not searching.
**WHAT IT DOES**
• Searches Google Maps via the official API (no scraping)
• Prompts like “dentists in Chicago” or “coffee shops near me”
• Returns: Name, Website, Rating, Reviews, **Phone & Address**
• Exports instantly to your CRM, sheet, or outreach workflow
**WHY YOULL LOVE IT**
✓ Hyper-targeted leads in minutes
✓ Unlimited searches & locations
✓ Zero CAPTCHAs or IP blocks
✓ Works on AutoGPT Cloud or self-hosted (with your API key)
✓ Cut prospecting time by 90%
**PERFECT FOR**
— Marketers & PPC agencies
— SEO consultants & designers
— SaaS founders & sales teams
Stop scrolling directories—start filling your pipeline. Start now and let AI prospect while you profit.
→ Click *Add to Library* and own your market today.","[""business""]",true,true
f623c862-24e9-44fc-8ce8-d8282bb51ad2,eafa21d3-bf14-4f63-a97f-a5ee41df83b3,linkedin-post-generator,LinkedIn Post Generator,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/297f6a8e-81a8-43e2-b106-c7ad4a5662df.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/fceebdc1-aef6-4000-97fc-4ef587f56bda.png""]",false,Autocraft LinkedIn gold,"Create researchdriven, highimpact LinkedIn posts in minutes. This agent searches YouTube for the best videos on your chosen topic, pulls their transcripts, and distils the most valuable insights into a polished post ready for your company page or personal feed.
FEATURES
• Automated YouTube research discovers and analyses topranked videos so you dont have to
• AIcurated synthesis combines multiple transcripts into one authoritative narrative
• Full creative control adjust style, tone, objective, opinion, clarity, target word count and number of videos
• LinkedInoptimised output hook, 23 key points, CTA, strategic line breaks, 35 hashtags, no markdown
• Oneclick publish returns a readytopost text block (≤1 300 characters)
HOW IT WORKS
1. Enter a topic and your preferred writing parameters.
2. The agent builds a YouTube search, fetches the page, and extracts the top N video URLs.
3. It pulls each transcript, then feeds them—plus your settings—into Claude 3.5 Sonnet.
4. The model writes a concise, engaging post designed for maximum LinkedIn engagement.
USE CASES
• Thoughtleadership updates backed by fresh video research
• Rapid industry summaries after major events, webinars, or conferences
• Consistent LinkedIn content for busy founders, marketers, and creators
WHY YOULL LOVE IT
Save hours of manual research, avoid surfacelevel hottakes, and publish posts that showcase real expertise—without the heavy lift.","[""writing""]",true,true
7d4120ad-b6b3-4419-8bdb-7dd7d350ef32,e7bb29a1-23c7-4fee-aa3b-5426174b8c52,youtube-to-linkedin-post-converter,YouTube to LinkedIn Post Converter,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/f084b326-a708-4396-be51-7ba59ad2ef32.png""]",false,Transform Your YouTube Videos into Engaging LinkedIn Posts with AI,"WHAT IT DOES:
This agent converts YouTube video content into a LinkedIn post by analyzing the video's transcript. It provides you with a tailored post that reflects the core ideas, key takeaways, and tone of the original video, optimizing it for engagement on LinkedIn.
HOW IT WORKS:
- You provide the URL to the YouTube video (required)
- You can choose the structure for the LinkedIn post (e.g., Personal Achievement Story, Lesson Learned, Thought Leadership, etc.)
- You can also select the tone (e.g., Inspirational, Analytical, Conversational, etc.)
- The transcript of the video is analyzed by the GPT-4 model and the Claude 3.5 Sonnet model
- The models extract key insights, memorable quotes, and the main points from the video
- Youll receive a LinkedIn post, formatted according to your chosen structure and tone, optimized for professional engagement
INPUTS:
- Source YouTube Video Provide the URL to the YouTube video
- Structure Choose the post format (e.g., Personal Achievement Story, Thought Leadership, etc.)
- Content Specify the main message or idea of the post (e.g., Hot Take, Key Takeaways, etc.)
- Tone Select the tone for the post (e.g., Conversational, Inspirational, etc.)
OUTPUT:
- LinkedIn Post A well-crafted, AI-generated LinkedIn post with a professional tone, based on the video content and your specified preferences
Perfect for content creators, marketers, and professionals who want to repurpose YouTube videos for LinkedIn and boost their professional branding.","[""writing""]",false,true
c61d6a83-ea48-4df8-b447-3da2d9fe5814,00fdd42c-a14c-4d19-a567-65374ea0e87f,personalized-morning-coffee-newsletter,Personal Newsletter,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/f4b38e4c-8166-4caf-9411-96c9c4c82d4c.png""]",false,Start your day with personalized AI newsletters that deliver credibility and context for every interest or mood.,"This Personal Newsletter Agent provides a bespoke daily digest on your favorite topics and tone. Whether you prefer industry insights, lighthearted reads, or breaking news, this agent crafts your own unique newsletter to keep you informed and entertained.
How It Works
1. Enter your favorite topics, industries, or areas of interest.
2. Choose your tone—professional, casual, or humorous.
3. Set your preferred delivery cadence: daily or weekly.
4. The agent scans top sources and compiles 35 engaging stories, insights, and fun facts into a conversational newsletter.
Skip the morning scroll and enjoy a thoughtfully curated newsletter designed just for you. Stay ahead of trends, spark creative ideas, and enjoy an effortless, informed start to your day.
Use Cases
• Executives: Get a daily digest of market updates and leadership insights.
• Marketers: Receive curated creative trends and campaign inspiration.
• Entrepreneurs: Stay updated on your industry without information overload.","[""research""]",true,true
e2e49cfc-4a39-4d62-a6b3-c095f6d025ff,fc2c9976-0962-4625-a27b-d316573a9e7f,email-address-finder,Email Scout - Contact Finder Assistant,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/da8a690a-7a8b-4c1d-b6f8-e2f840c0205d.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/6a2ac25c-1609-4881-8140-e6da2421afb3.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/26179263-fe06-45bd-b6a0-0754660a0a46.jpg""]",false,Find contact details from name and location using AI search,"Finding someone's professional email address can be time-consuming and frustrating. Manual searching across multiple websites, social profiles, and business directories often leads to dead ends or outdated information.
Email Scout automates this process by intelligently searching across publicly available sources when you provide a person's name and location. Simply input basic information like ""Tim Cook, USA"" or ""Sarah Smith, London"" and let the AI assistant do the work of finding potential contact details.
Key Features:
- Quick search from just name and location
- Scans multiple public sources
- Automated AI-powered search process
- Easy to use with simple inputs
Perfect for recruiters, business development professionals, researchers, and anyone needing to establish professional contact.
Note: This tool searches only publicly available information. Search results depend on what contact information people have made public. Some searches may not yield results if the information isn't publicly accessible.","[""""]",false,true
81bcc372-0922-4a36-bc35-f7b1e51d6939,e437cc95-e671-489d-b915-76561fba8c7f,ai-youtube-to-blog-converter,YouTube Video to SEO Blog Writer,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/239e5a41-2515-4e1c-96ef-31d0d37ecbeb.webp"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/c7d96966-786f-4be6-ad7d-3a51c84efc0e.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/0275a74c-e2c2-4e29-a6e4-3a616c3c35dd.png""]",false,One link. One click. One powerful blog post.,"Effortlessly transform your YouTube videos into high-quality, SEO-optimized blog posts.
Your videos deserve a second life—in writing.
Make your content work twice as hard by repurposing it into engaging, searchable articles.
Perfect for content creators, marketers, and bloggers, this tool analyzes video content and generates well-structured blog posts tailored to your tone, audience, and word count. Just paste a YouTube URL and let the AI handle the rest.
FEATURES
• CONTENT ANALYSIS
Extracts key points from the video while preserving your message and intent.
• CUSTOMIZABLE OUTPUT
Select a tone that fits your audience: casual, professional, educational, or formal.
• SEO OPTIMIZATION
Automatically creates engaging titles and structured subheadings for better search visibility.
• USER-FRIENDLY
Repurpose your videos into written content to expand your reach and improve accessibility.
Whether you're looking to grow your blog, boost SEO, or simply get more out of your content, the AI YouTube-to-Blog Converter makes it effortless.
","[""writing""]",true,true
5c3510d2-fc8b-4053-8e19-67f53c86eb1a,f2cc74bb-f43f-4395-9c35-ecb30b5b4fc9,ai-webpage-copy-improver,AI Webpage Copy Improver,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/d562d26f-5891-4b09-8859-fbb205972313.jpg""]",false,Boost Your Website's Search Engine Performance,"Elevate your web content with this powerful AI Webpage Copy Improver. Designed for marketers, SEO specialists, and web developers, this tool analyses and enhances website copy for maximum impact. Using advanced language models, it optimizes text for better clarity, SEO performance, and increased conversion rates. The AI examines your existing content, identifies areas for improvement, and generates refined copy that maintains your brand voice while boosting engagement. From homepage headlines to product descriptions, transform your web presence with AI-driven insights. Improve readability, incorporate targeted keywords, and craft compelling calls-to-action - all with the click of a button. Take your digital marketing to the next level with the AI Webpage Copy Improver.","[""marketing""]",true,true
94d03bd3-7d44-4d47-b60c-edb2f89508d6,b6f6f0d3-49f4-4e3b-8155-ffe9141b32c0,domain-name-finder,Domain Name Finder,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/28545e09-b2b8-4916-b4c6-67f982510a78.jpeg""]",false,Instantly generate brand-ready domain names that are actually available,"Overview:
Finding a domain name that fits your brand shouldnt take hours of searching and failed checks. The Domain Name Finder Agent turns your pitch into hundreds of creative, brand-ready domain ideas—filtered by live availability so every result is actionable.
How It Works
1. Input your product pitch, company name, or core keywords.
2. The agent analyzes brand tone, audience, and industry context.
3. It generates a list of unique, memorable domains that match your criteria.
4. All names are pre-filtered for real-time availability, so you can register immediately.
Business Value
Save hours of guesswork and eliminate dead ends. Accelerate brand launches, startup naming, and campaign creation with ready-to-claim domains.
Key Use Cases
• Startup Founders: Quickly find brand-ready domains for MVP launches or rebrands.
• Marketers: Test name options across campaigns with instant availability data.
• Entrepreneurs: Validate ideas faster with instant domain options.","[""business""]",false,true
7a831906-daab-426f-9d66-bcf98d869426,516d813b-d1bc-470f-add7-c63a4b2c2bad,ai-function,AI Function,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/620e8117-2ee1-4384-89e6-c2ef4ec3d9c9.webp"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/476259e2-5a79-4a7b-8e70-deeebfca70d7.png""]",false,Never Code Again,"AI FUNCTION MAGIC
Your AIpowered assistant for turning plainEnglish descriptions into working Python functions.
HOW IT WORKS
1. Describe what the function should do.
2. Specify the inputs it needs.
3. Receive the generated Python code.
FEATURES
- Effortless Function Generation: convert naturallanguage specs into complete functions.
- Customizable Inputs: define the parameters that matter to you.
- Versatile Use Cases: simulate data, automate tasks, prototype ideas.
- Seamless Integration: add the generated function directly to your codebase.
EXAMPLE
Request: “Create a function that generates 20 examples of fake people, each with a name, date of birth, job title, and age.”
Input parameter: number_of_people (default 20)
Result: a list of dictionaries such as
[
{ ""name"": ""Emma Martinez"", ""date_of_birth"": ""19921103"", ""job_title"": ""Data Analyst"", ""age"": 32 },
{ ""name"": ""Liam OConnor"", ""date_of_birth"": ""19850719"", ""job_title"": ""Marketing Manager"", ""age"": 39 },
…18 more entries…
]","[""development""]",false,true
1 listing_id storeListingVersionId slug agent_name agent_video agent_image featured sub_heading description categories useForOnboarding is_available
2 6e60a900-9d7d-490e-9af2-a194827ed632 d85882b8-633f-44ce-a315-c20a8c123d19 flux-ai-image-generator Flux AI Image Generator ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/ca154dd1-140e-454c-91bd-2d8a00de3f08.jpg","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/577d995d-bc38-40a9-a23f-1f30f5774bdb.jpg","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/415db1b7-115c-43ab-bd6c-4e9f7ef95be1.jpg"] false Transform ideas into breathtaking images Transform ideas into breathtaking images with this AI-powered Image Generator. Using cutting-edge Flux AI technology, the tool crafts highly detailed, photorealistic visuals from simple text prompts. Perfect for artists, marketers, and content creators, this generator produces unique images tailored to user specifications. From fantastical scenes to lifelike portraits, users can unleash creativity with professional-quality results in seconds. Easy to use and endlessly versatile, bring imagination to life with the AI Image Generator today! ["creative"] false true
3 f11fc6e9-6166-4676-ac5d-f07127b270c1 c775f60d-b99f-418b-8fe0-53172258c3ce youtube-transcription-scraper YouTube Transcription Scraper https://youtu.be/H8S3pU68lGE ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/65bce54b-0124-4b0d-9e3e-f9b89d0dc99e.jpg"] false Fetch the transcriptions from the most popular YouTube videos in your chosen topic Effortlessly gather transcriptions from multiple YouTube videos with this agent. It scrapes and compiles video transcripts into a clean, organized list, making it easy to extract insights, quotes, or content from various sources in one go. Ideal for researchers, content creators, and marketers looking to quickly analyze or repurpose video content. ["writing"] false true
4 17908889-b599-4010-8e4f-bed19b8f3446 6e16e65a-ad34-4108-b4fd-4a23fced5ea2 business-ownerceo-finder Decision Maker Lead Finder ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/1020d94e-b6a2-4fa7-bbdf-2c218b0de563.jpg"] false Contact CEOs today Find the key decision-makers you need, fast. This agent identifies business owners or CEOs of local companies in any area you choose. Simply enter what kind of businesses you’re looking for and where, and it will: * Search the area and gather public information * Return names, roles, and contact details when available * Provide smart Google search suggestions if details aren’t found Perfect for: * B2B sales teams seeking verified leads * Recruiters sourcing local talent * Researchers looking to connect with business leaders Save hours of manual searching and get straight to the people who matter most. ["business"] true true
5 72beca1d-45ea-4403-a7ce-e2af168ee428 415b7352-0dc6-4214-9d87-0ad3751b711d smart-meeting-brief Smart Meeting Prep https://youtu.be/9ydZR2hkxaY ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/2f116ce1-63ae-4d39-a5cd-f514defc2b97.png","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/0a71a60a-2263-4f12-9836-9c76ab49f155.png","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/95327695-9184-403c-907a-a9d3bdafa6a5.png","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/2bc77788-790b-47d4-8a61-ce97b695e9f5.png"] true Business meeting briefings delivered daily Never walk into a meeting unprepared again. Every day at 4 pm, the Smart Meeting Prep Agent scans your calendar for tomorrow's external meetings. It reviews your past email exchanges, researches each participant's background and role, and compiles the insights into a concise briefing, so you can close your workday ready for tomorrow's calls. How It Works 1. At 4 pm, the agent scans your calendar and identifies external meetings scheduled for the next day. 2. It reviews recent email threads with each participant to surface key relationship history and communication context. 3. It conducts online research to gather publicly available information on roles, company backgrounds, and relevant professional data. 4. It produces a unified briefing for each participant, including past exchange highlights, profile notes, and strategic conversation points. ["personal"] true true
6 9fa5697a-617b-4fae-aea0-7dbbed279976 b8ceb480-a7a2-4c90-8513-181a49f7071f automated-support-ai Automated Support Agent https://youtu.be/nBMfu_5sgDA ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/ed56febc-2205-4179-9e7e-505d8500b66c.png"] true Automate up to 80 percent of inbound support emails Overview: Support teams spend countless hours on basic tickets. This agent automates repetitive customer support tasks. It reads incoming requests, researches your knowledge base, and responds automatically when confident. When unsure, it escalates to a human for final resolution. How it Works: New support emails are routed to the agent. The agent checks internal documentation for answers. It measures confidence in the answer found and either replies directly or escalates to a human. Business Value: Automating the easy 80 percent of support tickets allows your team to focus on high-value, complex customer issues, improving efficiency and response times. ["business"] false true
7 2bdac92b-a12c-4131-bb46-0e3b89f61413 31daf49d-31d3-476b-aa4c-099abc59b458 unspirational-poster-maker Unspirational Poster Maker ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/6a490dac-27e5-405f-a4c4-8d1c55b85060.jpg","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/d343fbb5-478c-4e38-94df-4337293b61f1.jpg"] false Because adulting is hard This witty AI agent generates hilariously relatable "motivational" posters that tackle the everyday struggles of procrastination, overthinking, and workplace chaos with a blend of absurdity and sarcasm. From goldfish facing impossible tasks to cats in existential crises, The Unspirational Poster Maker designs tongue-in-cheek graphics and captions that mock productivity clichés and embrace our collective struggles to "get it together." Perfect for adding a touch of humour to the workday, these posters remind us that sometimes, all we can do is laugh at the chaos. ["creative"] false true
8 9adf005e-2854-4cc7-98cf-f7103b92a7b7 a03b0d8c-4751-43d6-a54e-c3b7856ba4e3 ai-shortform-video-generator-create-viral-ready-content AI Video Generator ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/8d2670b9-fea5-4966-a597-0a4511bffdc3.png","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/aabe8aec-0110-4ce7-a259-4f86fe8fe07d.png"] false Create Viral-Ready Shorts Content in Seconds OVERVIEW Transform any trending headline or broad topic into a polished, vertical short-form video in a single run. The agent automates research, scriptwriting, metadata creation, and Revid.ai rendering, returning one ready-to-publish MP4 plus its title, script and hashtags. HOW IT WORKS 1. Input a topic or an exact news headline. 2. The agent fetches live search results and selects the most engaging related story. 3. Key facts are summarised into concise research notes. 4. Claude writes a 30–35 second script with visual cues, a three-second hook, tension loops, and a call-to-action. 5. GPT-4o generates an eye-catching title and one or two discoverability hashtags. 6. The script is sent to a state-of-the-art AI video generator to render a single 9:16 MP4 (default: 720 p, 30 fps, voice “Brian”, style “movingImage”, music “Bladerunner 2049”). – All voice, style and resolution settings can be adjusted in the Builder before you press "Run". 7. Output delivered: Title, Script, Hashtags, Video URL. KEY USE CASES - Broad-topic explainers (e.g. “Artificial Intelligence” or “Climate Tech”). - Real-time newsjacking with a specific breaking headline. - Product-launch spotlights and quick event recaps while interest is high. BUSINESS VALUE - One-click speed: from idea to finished video in minutes. - Consistent brand look: Revid presets keep voice, style and aspect ratio on spec. - No-code workflow: marketers create social video without design or development queues. - Cloud convenience: Auto-GPT Cloud users are pre-configured with all required keys. Self-hosted users simply add OpenAI, Anthropic, Perplexity (OpenRouter/Jina) and Revid keys once. IMPORTANT NOTES - The agent outputs exactly one video per execution. Run it again for additional shorts. - Video rendering time varies; AI-generated footage may take several minutes. ["writing"] false true
9 864e48ef-fee5-42c1-b6a4-2ae139db9fc1 55d40473-0f31-4ada-9e40-d3a7139fcbd4 automated-blog-writer Automated SEO Blog Writer https://youtu.be/nKcDCbDVobs ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/2dd5f95b-5b30-4bf8-a11b-bac776c5141a.jpg"] true Automate research, writing, and publishing for high-ranking blog posts Scale your blog with a fully automated content engine. The Automated SEO Blog Writer learns your brand voice, finds high-demand keywords, and creates SEO-optimized articles that attract organic traffic and boost visibility. How it works: 1. Share your pitch, website, and values. 2. The agent studies your site and uncovers proven SEO opportunities. 3. It spends two hours researching and drafting each post. 4. You set the cadence—publishing runs on autopilot. Business value: Consistently publish research-backed, optimized posts that build domain authority, rankings, and thought leadership while you focus on what matters most. Use cases: • Founders: Keep your blog active with no time drain. • Agencies: Deliver scalable SEO content for clients. • Strategists: Automate execution, focus on strategy. • Marketers: Drive steady organic growth. • Local businesses: Capture nearby search traffic. ["writing"] false true
10 6046f42e-eb84-406f-bae0-8e052064a4fa a548e507-09a7-4b30-909c-f63fcda10fff lead-finder-local-businesses Lead Finder ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/abd6605f-d5f8-426b-af36-052e8ba5044f.webp"] false Auto-Prospect Like a Pro Turbo-charge your local lead generation with the AutoGPT Marketplace’s top Google Maps prospecting agent. “Lead Finder: Local Businesses” delivers verified, ready-to-contact prospects in any niche and city—so you can focus on closing, not searching. **WHAT IT DOES** • Searches Google Maps via the official API (no scraping) • Prompts like “dentists in Chicago” or “coffee shops near me” • Returns: Name, Website, Rating, Reviews, **Phone & Address** • Exports instantly to your CRM, sheet, or outreach workflow **WHY YOU’LL LOVE IT** ✓ Hyper-targeted leads in minutes ✓ Unlimited searches & locations ✓ Zero CAPTCHAs or IP blocks ✓ Works on AutoGPT Cloud or self-hosted (with your API key) ✓ Cut prospecting time by 90% **PERFECT FOR** — Marketers & PPC agencies — SEO consultants & designers — SaaS founders & sales teams Stop scrolling directories—start filling your pipeline. Start now and let AI prospect while you profit. → Click *Add to Library* and own your market today. ["business"] true true
11 f623c862-24e9-44fc-8ce8-d8282bb51ad2 eafa21d3-bf14-4f63-a97f-a5ee41df83b3 linkedin-post-generator LinkedIn Post Generator ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/297f6a8e-81a8-43e2-b106-c7ad4a5662df.png","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/fceebdc1-aef6-4000-97fc-4ef587f56bda.png"] false Auto‑craft LinkedIn gold Create research‑driven, high‑impact LinkedIn posts in minutes. This agent searches YouTube for the best videos on your chosen topic, pulls their transcripts, and distils the most valuable insights into a polished post ready for your company page or personal feed. FEATURES • Automated YouTube research – discovers and analyses top‑ranked videos so you don’t have to • AI‑curated synthesis – combines multiple transcripts into one authoritative narrative • Full creative control – adjust style, tone, objective, opinion, clarity, target word count and number of videos • LinkedIn‑optimised output – hook, 2‑3 key points, CTA, strategic line breaks, 3‑5 hashtags, no markdown • One‑click publish – returns a ready‑to‑post text block (≤1 300 characters) HOW IT WORKS 1. Enter a topic and your preferred writing parameters. 2. The agent builds a YouTube search, fetches the page, and extracts the top N video URLs. 3. It pulls each transcript, then feeds them—plus your settings—into Claude 3.5 Sonnet. 4. The model writes a concise, engaging post designed for maximum LinkedIn engagement. USE CASES • Thought‑leadership updates backed by fresh video research • Rapid industry summaries after major events, webinars, or conferences • Consistent LinkedIn content for busy founders, marketers, and creators WHY YOU’LL LOVE IT Save hours of manual research, avoid surface‑level hot‑takes, and publish posts that showcase real expertise—without the heavy lift. ["writing"] true true
12 7d4120ad-b6b3-4419-8bdb-7dd7d350ef32 e7bb29a1-23c7-4fee-aa3b-5426174b8c52 youtube-to-linkedin-post-converter YouTube to LinkedIn Post Converter ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/f084b326-a708-4396-be51-7ba59ad2ef32.png"] false Transform Your YouTube Videos into Engaging LinkedIn Posts with AI WHAT IT DOES: This agent converts YouTube video content into a LinkedIn post by analyzing the video's transcript. It provides you with a tailored post that reflects the core ideas, key takeaways, and tone of the original video, optimizing it for engagement on LinkedIn. HOW IT WORKS: - You provide the URL to the YouTube video (required) - You can choose the structure for the LinkedIn post (e.g., Personal Achievement Story, Lesson Learned, Thought Leadership, etc.) - You can also select the tone (e.g., Inspirational, Analytical, Conversational, etc.) - The transcript of the video is analyzed by the GPT-4 model and the Claude 3.5 Sonnet model - The models extract key insights, memorable quotes, and the main points from the video - You’ll receive a LinkedIn post, formatted according to your chosen structure and tone, optimized for professional engagement INPUTS: - Source YouTube Video – Provide the URL to the YouTube video - Structure – Choose the post format (e.g., Personal Achievement Story, Thought Leadership, etc.) - Content – Specify the main message or idea of the post (e.g., Hot Take, Key Takeaways, etc.) - Tone – Select the tone for the post (e.g., Conversational, Inspirational, etc.) OUTPUT: - LinkedIn Post – A well-crafted, AI-generated LinkedIn post with a professional tone, based on the video content and your specified preferences Perfect for content creators, marketers, and professionals who want to repurpose YouTube videos for LinkedIn and boost their professional branding. ["writing"] false true
13 c61d6a83-ea48-4df8-b447-3da2d9fe5814 00fdd42c-a14c-4d19-a567-65374ea0e87f personalized-morning-coffee-newsletter Personal Newsletter ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/f4b38e4c-8166-4caf-9411-96c9c4c82d4c.png"] false Start your day with personalized AI newsletters that deliver credibility and context for every interest or mood. This Personal Newsletter Agent provides a bespoke daily digest on your favorite topics and tone. Whether you prefer industry insights, lighthearted reads, or breaking news, this agent crafts your own unique newsletter to keep you informed and entertained. How It Works 1. Enter your favorite topics, industries, or areas of interest. 2. Choose your tone—professional, casual, or humorous. 3. Set your preferred delivery cadence: daily or weekly. 4. The agent scans top sources and compiles 3–5 engaging stories, insights, and fun facts into a conversational newsletter. Skip the morning scroll and enjoy a thoughtfully curated newsletter designed just for you. Stay ahead of trends, spark creative ideas, and enjoy an effortless, informed start to your day. Use Cases • Executives: Get a daily digest of market updates and leadership insights. • Marketers: Receive curated creative trends and campaign inspiration. • Entrepreneurs: Stay updated on your industry without information overload. ["research"] true true
14 e2e49cfc-4a39-4d62-a6b3-c095f6d025ff fc2c9976-0962-4625-a27b-d316573a9e7f email-address-finder Email Scout - Contact Finder Assistant ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/da8a690a-7a8b-4c1d-b6f8-e2f840c0205d.jpg","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/6a2ac25c-1609-4881-8140-e6da2421afb3.jpg","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/26179263-fe06-45bd-b6a0-0754660a0a46.jpg"] false Find contact details from name and location using AI search Finding someone's professional email address can be time-consuming and frustrating. Manual searching across multiple websites, social profiles, and business directories often leads to dead ends or outdated information. Email Scout automates this process by intelligently searching across publicly available sources when you provide a person's name and location. Simply input basic information like "Tim Cook, USA" or "Sarah Smith, London" and let the AI assistant do the work of finding potential contact details. Key Features: - Quick search from just name and location - Scans multiple public sources - Automated AI-powered search process - Easy to use with simple inputs Perfect for recruiters, business development professionals, researchers, and anyone needing to establish professional contact. Note: This tool searches only publicly available information. Search results depend on what contact information people have made public. Some searches may not yield results if the information isn't publicly accessible. [""] false true
15 81bcc372-0922-4a36-bc35-f7b1e51d6939 e437cc95-e671-489d-b915-76561fba8c7f ai-youtube-to-blog-converter YouTube Video to SEO Blog Writer ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/239e5a41-2515-4e1c-96ef-31d0d37ecbeb.webp","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/c7d96966-786f-4be6-ad7d-3a51c84efc0e.png","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/0275a74c-e2c2-4e29-a6e4-3a616c3c35dd.png"] false One link. One click. One powerful blog post. Effortlessly transform your YouTube videos into high-quality, SEO-optimized blog posts. Your videos deserve a second life—in writing. Make your content work twice as hard by repurposing it into engaging, searchable articles. Perfect for content creators, marketers, and bloggers, this tool analyzes video content and generates well-structured blog posts tailored to your tone, audience, and word count. Just paste a YouTube URL and let the AI handle the rest. FEATURES • CONTENT ANALYSIS Extracts key points from the video while preserving your message and intent. • CUSTOMIZABLE OUTPUT Select a tone that fits your audience: casual, professional, educational, or formal. • SEO OPTIMIZATION Automatically creates engaging titles and structured subheadings for better search visibility. • USER-FRIENDLY Repurpose your videos into written content to expand your reach and improve accessibility. Whether you're looking to grow your blog, boost SEO, or simply get more out of your content, the AI YouTube-to-Blog Converter makes it effortless. ["writing"] true true
16 5c3510d2-fc8b-4053-8e19-67f53c86eb1a f2cc74bb-f43f-4395-9c35-ecb30b5b4fc9 ai-webpage-copy-improver AI Webpage Copy Improver ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/d562d26f-5891-4b09-8859-fbb205972313.jpg"] false Boost Your Website's Search Engine Performance Elevate your web content with this powerful AI Webpage Copy Improver. Designed for marketers, SEO specialists, and web developers, this tool analyses and enhances website copy for maximum impact. Using advanced language models, it optimizes text for better clarity, SEO performance, and increased conversion rates. The AI examines your existing content, identifies areas for improvement, and generates refined copy that maintains your brand voice while boosting engagement. From homepage headlines to product descriptions, transform your web presence with AI-driven insights. Improve readability, incorporate targeted keywords, and craft compelling calls-to-action - all with the click of a button. Take your digital marketing to the next level with the AI Webpage Copy Improver. ["marketing"] true true
17 94d03bd3-7d44-4d47-b60c-edb2f89508d6 b6f6f0d3-49f4-4e3b-8155-ffe9141b32c0 domain-name-finder Domain Name Finder ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/28545e09-b2b8-4916-b4c6-67f982510a78.jpeg"] false Instantly generate brand-ready domain names that are actually available Overview: Finding a domain name that fits your brand shouldn’t take hours of searching and failed checks. The Domain Name Finder Agent turns your pitch into hundreds of creative, brand-ready domain ideas—filtered by live availability so every result is actionable. How It Works 1. Input your product pitch, company name, or core keywords. 2. The agent analyzes brand tone, audience, and industry context. 3. It generates a list of unique, memorable domains that match your criteria. 4. All names are pre-filtered for real-time availability, so you can register immediately. Business Value Save hours of guesswork and eliminate dead ends. Accelerate brand launches, startup naming, and campaign creation with ready-to-claim domains. Key Use Cases • Startup Founders: Quickly find brand-ready domains for MVP launches or rebrands. • Marketers: Test name options across campaigns with instant availability data. • Entrepreneurs: Validate ideas faster with instant domain options. ["business"] false true
18 7a831906-daab-426f-9d66-bcf98d869426 516d813b-d1bc-470f-add7-c63a4b2c2bad ai-function AI Function ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/620e8117-2ee1-4384-89e6-c2ef4ec3d9c9.webp","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/476259e2-5a79-4a7b-8e70-deeebfca70d7.png"] false Never Code Again AI FUNCTION MAGIC Your AI‑powered assistant for turning plain‑English descriptions into working Python functions. HOW IT WORKS 1. Describe what the function should do. 2. Specify the inputs it needs. 3. Receive the generated Python code. FEATURES - Effortless Function Generation: convert natural‑language specs into complete functions. - Customizable Inputs: define the parameters that matter to you. - Versatile Use Cases: simulate data, automate tasks, prototype ideas. - Seamless Integration: add the generated function directly to your codebase. EXAMPLE Request: “Create a function that generates 20 examples of fake people, each with a name, date of birth, job title, and age.” Input parameter: number_of_people (default 20) Result: a list of dictionaries such as [ { "name": "Emma Martinez", "date_of_birth": "1992‑11‑03", "job_title": "Data Analyst", "age": 32 }, { "name": "Liam O’Connor", "date_of_birth": "1985‑07‑19", "job_title": "Marketing Manager", "age": 39 }, …18 more entries… ] ["development"] false true

View File

@@ -1,590 +0,0 @@
{
"id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
"version": 29,
"is_active": false,
"name": "Unspirational Poster Maker",
"description": "This witty AI agent generates hilariously relatable \"motivational\" posters that tackle the everyday struggles of procrastination, overthinking, and workplace chaos with a blend of absurdity and sarcasm. From goldfish facing impossible tasks to cats in existential crises, The Unspirational Poster Maker designs tongue-in-cheek graphics and captions that mock productivity clich\u00e9s and embrace our collective struggles to \"get it together.\" Perfect for adding a touch of humour to the workday, these posters remind us that sometimes, all we can do is laugh at the chaos.",
"instructions": null,
"recommended_schedule_cron": null,
"nodes": [
{
"id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
"input_default": {
"name": "Generated Image",
"description": "The resulting generated image ready for you to review and post."
},
"metadata": {
"position": {
"x": 2329.937006807125,
"y": 80.49068076698347
}
},
"input_links": [
{
"id": "c6c511e8-e6a4-4969-9bc8-f67d60c1e229",
"source_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"source_name": "result",
"sink_name": "value",
"is_static": false
},
{
"id": "20845dda-91de-4508-8077-0504b1a5ae03",
"source_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"source_name": "result",
"sink_name": "value",
"is_static": false
},
{
"id": "6524c611-774b-45e9-899d-9a6aa80c549c",
"source_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"source_name": "result",
"sink_name": "value",
"is_static": false
},
{
"id": "714a0821-e5ba-4af7-9432-50491adda7b1",
"source_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"source_name": "result",
"sink_name": "value",
"is_static": false
}
],
"output_links": [],
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "7e026d19-f9a6-412f-8082-610f9ba0c410",
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
"input_default": {
"name": "Theme",
"value": "Cooking"
},
"metadata": {
"position": {
"x": -1219.5966324967521,
"y": 80.50339731789956
}
},
"input_links": [],
"output_links": [
{
"id": "8c2bd1f7-b17b-4835-81b6-bb336097aa7a",
"source_id": "7e026d19-f9a6-412f-8082-610f9ba0c410",
"sink_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"source_name": "result",
"sink_name": "prompt_values_#_THEME",
"is_static": true
}
],
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "28bda769-b88b-44c9-be5c-52c2667f137e",
"block_id": "6ab085e2-20b3-4055-bc3e-08036e01eca6",
"input_default": {
"upscale": "No Upscale"
},
"metadata": {
"position": {
"x": 1132.373897280427,
"y": 88.44610377514573
}
},
"input_links": [
{
"id": "54588c74-e090-4e49-89e4-844b9952a585",
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"sink_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
}
],
"output_links": [
{
"id": "20845dda-91de-4508-8077-0504b1a5ae03",
"source_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"source_name": "result",
"sink_name": "value",
"is_static": false
}
],
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
"block_id": "6ab085e2-20b3-4055-bc3e-08036e01eca6",
"input_default": {
"upscale": "No Upscale"
},
"metadata": {
"position": {
"x": 590.7543882245375,
"y": 85.69546832466654
}
},
"input_links": [
{
"id": "66646786-3006-4417-a6b7-0158f2603d1d",
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"sink_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
}
],
"output_links": [
{
"id": "6524c611-774b-45e9-899d-9a6aa80c549c",
"source_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"source_name": "result",
"sink_name": "value",
"is_static": false
}
],
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "576c5677-9050-4d1c-aad4-36b820c04fef",
"block_id": "6ab085e2-20b3-4055-bc3e-08036e01eca6",
"input_default": {
"upscale": "No Upscale"
},
"metadata": {
"position": {
"x": 60.48904654237981,
"y": 86.06183359510214
}
},
"input_links": [
{
"id": "201d3e03-bc06-4cee-846d-4c3c804d8857",
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"sink_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
}
],
"output_links": [
{
"id": "714a0821-e5ba-4af7-9432-50491adda7b1",
"source_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"source_name": "result",
"sink_name": "value",
"is_static": false
}
],
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
"block_id": "6ab085e2-20b3-4055-bc3e-08036e01eca6",
"input_default": {
"prompt": "A cat sprawled dramatically across an important-looking document during a work-from-home meeting, making direct eye contact with the camera while knocking over a coffee mug in slow motion. Text Overlay: \"Chaos is a career path. Be the obstacle everyone has to work around.\"",
"upscale": "No Upscale"
},
"metadata": {
"position": {
"x": 1668.3572666956795,
"y": 89.69665262457966
}
},
"input_links": [
{
"id": "509b7587-1940-4a06-808d-edde9a74f400",
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"sink_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
}
],
"output_links": [
{
"id": "c6c511e8-e6a4-4969-9bc8-f67d60c1e229",
"source_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"source_name": "result",
"sink_name": "value",
"is_static": false
}
],
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
"input_default": {
"model": "gpt-4o",
"prompt": "<example_output>\nA photo of a sloth lounging on a desk, with its head resting on a keyboard. The keyboard is on top of a laptop with a blank spreadsheet open. A to-do list is placed beside the laptop, with the top item written as \"Do literally anything\". There is a text overlay that says \"If you can't outwork them, outnap them.\".\n</example_output>\n\nCreate a relatable satirical, snarky, user-deprecating motivational style image based on the theme: \"{{THEME}}\".\n\nOutput only the image description and caption, without any additional commentary or formatting.",
"prompt_values": {}
},
"metadata": {
"position": {
"x": -561.1139207164056,
"y": 78.60434452403524
}
},
"input_links": [
{
"id": "8c2bd1f7-b17b-4835-81b6-bb336097aa7a",
"source_id": "7e026d19-f9a6-412f-8082-610f9ba0c410",
"sink_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"source_name": "result",
"sink_name": "prompt_values_#_THEME",
"is_static": true
}
],
"output_links": [
{
"id": "54588c74-e090-4e49-89e4-844b9952a585",
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"sink_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
},
{
"id": "201d3e03-bc06-4cee-846d-4c3c804d8857",
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"sink_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
},
{
"id": "509b7587-1940-4a06-808d-edde9a74f400",
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"sink_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
},
{
"id": "66646786-3006-4417-a6b7-0158f2603d1d",
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"sink_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
}
],
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
"graph_version": 29,
"webhook_id": null,
"webhook": null
}
],
"links": [
{
"id": "66646786-3006-4417-a6b7-0158f2603d1d",
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"sink_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
},
{
"id": "c6c511e8-e6a4-4969-9bc8-f67d60c1e229",
"source_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"source_name": "result",
"sink_name": "value",
"is_static": false
},
{
"id": "6524c611-774b-45e9-899d-9a6aa80c549c",
"source_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"source_name": "result",
"sink_name": "value",
"is_static": false
},
{
"id": "20845dda-91de-4508-8077-0504b1a5ae03",
"source_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"source_name": "result",
"sink_name": "value",
"is_static": false
},
{
"id": "8c2bd1f7-b17b-4835-81b6-bb336097aa7a",
"source_id": "7e026d19-f9a6-412f-8082-610f9ba0c410",
"sink_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"source_name": "result",
"sink_name": "prompt_values_#_THEME",
"is_static": true
},
{
"id": "201d3e03-bc06-4cee-846d-4c3c804d8857",
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"sink_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
},
{
"id": "714a0821-e5ba-4af7-9432-50491adda7b1",
"source_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"source_name": "result",
"sink_name": "value",
"is_static": false
},
{
"id": "54588c74-e090-4e49-89e4-844b9952a585",
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"sink_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
},
{
"id": "509b7587-1940-4a06-808d-edde9a74f400",
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"sink_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
}
],
"forked_from_id": null,
"forked_from_version": null,
"sub_graphs": [],
"user_id": "",
"created_at": "2024-12-20T19:58:34.390Z",
"input_schema": {
"type": "object",
"properties": {
"Theme": {
"advanced": false,
"secret": false,
"title": "Theme",
"default": "Cooking"
}
},
"required": []
},
"output_schema": {
"type": "object",
"properties": {
"Generated Image": {
"advanced": false,
"secret": false,
"title": "Generated Image",
"description": "The resulting generated image ready for you to review and post."
}
},
"required": [
"Generated Image"
]
},
"has_external_trigger": false,
"has_human_in_the_loop": false,
"trigger_setup_info": null,
"credentials_input_schema": {
"properties": {
"ideogram_api_key_credentials": {
"credentials_provider": [
"ideogram"
],
"credentials_types": [
"api_key"
],
"properties": {
"id": {
"title": "Id",
"type": "string"
},
"title": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"title": "Title"
},
"provider": {
"const": "ideogram",
"title": "Provider",
"type": "string"
},
"type": {
"const": "api_key",
"title": "Type",
"type": "string"
}
},
"required": [
"id",
"provider",
"type"
],
"title": "CredentialsMetaInput[Literal[<ProviderName.IDEOGRAM: 'ideogram'>], Literal['api_key']]",
"type": "object",
"discriminator_values": []
},
"openai_api_key_credentials": {
"credentials_provider": [
"openai"
],
"credentials_types": [
"api_key"
],
"properties": {
"id": {
"title": "Id",
"type": "string"
},
"title": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"title": "Title"
},
"provider": {
"const": "openai",
"title": "Provider",
"type": "string"
},
"type": {
"const": "api_key",
"title": "Type",
"type": "string"
}
},
"required": [
"id",
"provider",
"type"
],
"title": "CredentialsMetaInput[Literal[<ProviderName.OPENAI: 'openai'>], Literal['api_key']]",
"type": "object",
"discriminator": "model",
"discriminator_mapping": {
"Llama-3.3-70B-Instruct": "llama_api",
"Llama-3.3-8B-Instruct": "llama_api",
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
"amazon/nova-lite-v1": "open_router",
"amazon/nova-micro-v1": "open_router",
"amazon/nova-pro-v1": "open_router",
"claude-3-7-sonnet-20250219": "anthropic",
"claude-3-haiku-20240307": "anthropic",
"claude-haiku-4-5-20251001": "anthropic",
"claude-opus-4-1-20250805": "anthropic",
"claude-opus-4-20250514": "anthropic",
"claude-opus-4-5-20251101": "anthropic",
"claude-sonnet-4-20250514": "anthropic",
"claude-sonnet-4-5-20250929": "anthropic",
"cohere/command-r-08-2024": "open_router",
"cohere/command-r-plus-08-2024": "open_router",
"deepseek/deepseek-chat": "open_router",
"deepseek/deepseek-r1-0528": "open_router",
"dolphin-mistral:latest": "ollama",
"google/gemini-2.0-flash-001": "open_router",
"google/gemini-2.0-flash-lite-001": "open_router",
"google/gemini-2.5-flash": "open_router",
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
"google/gemini-2.5-pro-preview-03-25": "open_router",
"google/gemini-3-pro-preview": "open_router",
"gpt-3.5-turbo": "openai",
"gpt-4-turbo": "openai",
"gpt-4.1-2025-04-14": "openai",
"gpt-4.1-mini-2025-04-14": "openai",
"gpt-4o": "openai",
"gpt-4o-mini": "openai",
"gpt-5-2025-08-07": "openai",
"gpt-5-chat-latest": "openai",
"gpt-5-mini-2025-08-07": "openai",
"gpt-5-nano-2025-08-07": "openai",
"gpt-5.1-2025-11-13": "openai",
"gryphe/mythomax-l2-13b": "open_router",
"llama-3.1-8b-instant": "groq",
"llama-3.3-70b-versatile": "groq",
"llama3": "ollama",
"llama3.1:405b": "ollama",
"llama3.2": "ollama",
"llama3.3": "ollama",
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
"meta-llama/llama-4-maverick": "open_router",
"meta-llama/llama-4-scout": "open_router",
"microsoft/wizardlm-2-8x22b": "open_router",
"mistralai/mistral-nemo": "open_router",
"moonshotai/kimi-k2": "open_router",
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
"o1": "openai",
"o1-mini": "openai",
"o3-2025-04-16": "openai",
"o3-mini": "openai",
"openai/gpt-oss-120b": "open_router",
"openai/gpt-oss-20b": "open_router",
"perplexity/sonar": "open_router",
"perplexity/sonar-deep-research": "open_router",
"perplexity/sonar-pro": "open_router",
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
"qwen/qwen3-coder": "open_router",
"v0-1.0-md": "v0",
"v0-1.5-lg": "v0",
"v0-1.5-md": "v0",
"x-ai/grok-4": "open_router",
"x-ai/grok-4-fast": "open_router",
"x-ai/grok-4.1-fast": "open_router",
"x-ai/grok-code-fast-1": "open_router"
},
"discriminator_values": [
"gpt-4o"
]
}
},
"required": [
"ideogram_api_key_credentials",
"openai_api_key_credentials"
],
"title": "UnspirationalPosterMakerCredentialsInputSchema",
"type": "object"
}
}

View File

@@ -1,447 +0,0 @@
{
"id": "622849a7-5848-4838-894d-01f8f07e3fad",
"version": 18,
"is_active": true,
"name": "AI Function",
"description": "## AI-Powered Function Magic: Never code again!\nProvide a description of a python function and your inputs and AI will provide the results.",
"instructions": null,
"recommended_schedule_cron": null,
"nodes": [
{
"id": "26ff2973-3f9a-451d-b902-d45e5da0a7fe",
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
"input_default": {
"name": "return",
"title": null,
"value": null,
"format": "",
"secret": false,
"advanced": false,
"description": "The value returned by the function"
},
"metadata": {
"position": {
"x": 1598.8622921127233,
"y": 291.59140862204725
}
},
"input_links": [
{
"id": "caecc1de-fdbc-4fd9-9570-074057bb15f9",
"source_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"sink_id": "26ff2973-3f9a-451d-b902-d45e5da0a7fe",
"source_name": "response",
"sink_name": "value",
"is_static": false
}
],
"output_links": [],
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
"graph_version": 18,
"webhook_id": null,
"webhook": null
},
{
"id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
"input_default": {
"model": "o3-mini",
"retry": 3,
"prompt": "{{ARGS}}",
"sys_prompt": "You are now the following python function:\n\n```\n# {{DESCRIPTION}}\n{{FUNCTION}}\n```\n\nThe user will provide your input arguments.\nOnly respond with your `return` value.\nDo not include any commentary or additional text in your response. \nDo not include ``` backticks or any other decorators.",
"ollama_host": "localhost:11434",
"prompt_values": {}
},
"metadata": {
"position": {
"x": 995,
"y": 290.50000000000006
}
},
"input_links": [
{
"id": "dc7cb15f-76cc-4533-b96c-dd9e3f7f75ed",
"source_id": "4eab3a55-20f2-4c1d-804c-7377ba8202d2",
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"source_name": "result",
"sink_name": "prompt_values_#_FUNCTION",
"is_static": true
},
{
"id": "093bdca5-9f44-42f9-8e1c-276dd2971675",
"source_id": "844530de-2354-46d8-b748-67306b7bbca1",
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"source_name": "result",
"sink_name": "prompt_values_#_ARGS",
"is_static": true
},
{
"id": "6c63d8ee-b63d-4ff6-bae0-7db8f99bb7af",
"source_id": "0fd6ef54-c1cd-478d-b764-17e40f882b99",
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"source_name": "result",
"sink_name": "prompt_values_#_DESCRIPTION",
"is_static": true
}
],
"output_links": [
{
"id": "caecc1de-fdbc-4fd9-9570-074057bb15f9",
"source_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"sink_id": "26ff2973-3f9a-451d-b902-d45e5da0a7fe",
"source_name": "response",
"sink_name": "value",
"is_static": false
}
],
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
"graph_version": 18,
"webhook_id": null,
"webhook": null
},
{
"id": "4eab3a55-20f2-4c1d-804c-7377ba8202d2",
"block_id": "7fcd3bcb-8e1b-4e69-903d-32d3d4a92158",
"input_default": {
"name": "Function Definition",
"title": null,
"value": "def fake_people(n: int) -> list[dict]:",
"secret": false,
"advanced": false,
"description": "The function definition (text). This is what you would type on the first line of the function when programming.\n\ne.g \"def fake_people(n: int) -> list[dict]:\"",
"placeholder_values": []
},
"metadata": {
"position": {
"x": -672.6908629664215,
"y": 302.42044359789116
}
},
"input_links": [],
"output_links": [
{
"id": "dc7cb15f-76cc-4533-b96c-dd9e3f7f75ed",
"source_id": "4eab3a55-20f2-4c1d-804c-7377ba8202d2",
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"source_name": "result",
"sink_name": "prompt_values_#_FUNCTION",
"is_static": true
}
],
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
"graph_version": 18,
"webhook_id": null,
"webhook": null
},
{
"id": "844530de-2354-46d8-b748-67306b7bbca1",
"block_id": "7fcd3bcb-8e1b-4e69-903d-32d3d4a92158",
"input_default": {
"name": "Arguments",
"title": null,
"value": "20",
"secret": false,
"advanced": false,
"description": "The function's inputs\n\ne.g \"20\"",
"placeholder_values": []
},
"metadata": {
"position": {
"x": -158.1623599617334,
"y": 295.410856928333
}
},
"input_links": [],
"output_links": [
{
"id": "093bdca5-9f44-42f9-8e1c-276dd2971675",
"source_id": "844530de-2354-46d8-b748-67306b7bbca1",
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"source_name": "result",
"sink_name": "prompt_values_#_ARGS",
"is_static": true
}
],
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
"graph_version": 18,
"webhook_id": null,
"webhook": null
},
{
"id": "0fd6ef54-c1cd-478d-b764-17e40f882b99",
"block_id": "90a56ffb-7024-4b2b-ab50-e26c5e5ab8ba",
"input_default": {
"name": "Description",
"title": null,
"value": "Generates n examples of fake data representing people, each with a name, DoB, Job title, and an age.",
"secret": false,
"advanced": false,
"description": "Describe what the function does.\n\ne.g \"Generates n examples of fake data representing people, each with a name, DoB, Job title, and an age.\"",
"placeholder_values": []
},
"metadata": {
"position": {
"x": 374.4548658057796,
"y": 290.3779121974126
}
},
"input_links": [],
"output_links": [
{
"id": "6c63d8ee-b63d-4ff6-bae0-7db8f99bb7af",
"source_id": "0fd6ef54-c1cd-478d-b764-17e40f882b99",
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"source_name": "result",
"sink_name": "prompt_values_#_DESCRIPTION",
"is_static": true
}
],
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
"graph_version": 18,
"webhook_id": null,
"webhook": null
}
],
"links": [
{
"id": "caecc1de-fdbc-4fd9-9570-074057bb15f9",
"source_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"sink_id": "26ff2973-3f9a-451d-b902-d45e5da0a7fe",
"source_name": "response",
"sink_name": "value",
"is_static": false
},
{
"id": "6c63d8ee-b63d-4ff6-bae0-7db8f99bb7af",
"source_id": "0fd6ef54-c1cd-478d-b764-17e40f882b99",
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"source_name": "result",
"sink_name": "prompt_values_#_DESCRIPTION",
"is_static": true
},
{
"id": "093bdca5-9f44-42f9-8e1c-276dd2971675",
"source_id": "844530de-2354-46d8-b748-67306b7bbca1",
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"source_name": "result",
"sink_name": "prompt_values_#_ARGS",
"is_static": true
},
{
"id": "dc7cb15f-76cc-4533-b96c-dd9e3f7f75ed",
"source_id": "4eab3a55-20f2-4c1d-804c-7377ba8202d2",
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"source_name": "result",
"sink_name": "prompt_values_#_FUNCTION",
"is_static": true
}
],
"forked_from_id": null,
"forked_from_version": null,
"sub_graphs": [],
"user_id": "",
"created_at": "2025-04-19T17:10:48.857Z",
"input_schema": {
"type": "object",
"properties": {
"Function Definition": {
"advanced": false,
"anyOf": [
{
"format": "short-text",
"type": "string"
},
{
"type": "null"
}
],
"secret": false,
"title": "Function Definition",
"description": "The function definition (text). This is what you would type on the first line of the function when programming.\n\ne.g \"def fake_people(n: int) -> list[dict]:\"",
"default": "def fake_people(n: int) -> list[dict]:"
},
"Arguments": {
"advanced": false,
"anyOf": [
{
"format": "short-text",
"type": "string"
},
{
"type": "null"
}
],
"secret": false,
"title": "Arguments",
"description": "The function's inputs\n\ne.g \"20\"",
"default": "20"
},
"Description": {
"advanced": false,
"anyOf": [
{
"format": "long-text",
"type": "string"
},
{
"type": "null"
}
],
"secret": false,
"title": "Description",
"description": "Describe what the function does.\n\ne.g \"Generates n examples of fake data representing people, each with a name, DoB, Job title, and an age.\"",
"default": "Generates n examples of fake data representing people, each with a name, DoB, Job title, and an age."
}
},
"required": []
},
"output_schema": {
"type": "object",
"properties": {
"return": {
"advanced": false,
"secret": false,
"title": "return",
"description": "The value returned by the function"
}
},
"required": [
"return"
]
},
"has_external_trigger": false,
"has_human_in_the_loop": false,
"trigger_setup_info": null,
"credentials_input_schema": {
"properties": {
"openai_api_key_credentials": {
"credentials_provider": [
"openai"
],
"credentials_types": [
"api_key"
],
"properties": {
"id": {
"title": "Id",
"type": "string"
},
"title": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"title": "Title"
},
"provider": {
"const": "openai",
"title": "Provider",
"type": "string"
},
"type": {
"const": "api_key",
"title": "Type",
"type": "string"
}
},
"required": [
"id",
"provider",
"type"
],
"title": "CredentialsMetaInput[Literal[<ProviderName.OPENAI: 'openai'>], Literal['api_key']]",
"type": "object",
"discriminator": "model",
"discriminator_mapping": {
"Llama-3.3-70B-Instruct": "llama_api",
"Llama-3.3-8B-Instruct": "llama_api",
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
"amazon/nova-lite-v1": "open_router",
"amazon/nova-micro-v1": "open_router",
"amazon/nova-pro-v1": "open_router",
"claude-3-7-sonnet-20250219": "anthropic",
"claude-3-haiku-20240307": "anthropic",
"claude-haiku-4-5-20251001": "anthropic",
"claude-opus-4-1-20250805": "anthropic",
"claude-opus-4-20250514": "anthropic",
"claude-opus-4-5-20251101": "anthropic",
"claude-sonnet-4-20250514": "anthropic",
"claude-sonnet-4-5-20250929": "anthropic",
"cohere/command-r-08-2024": "open_router",
"cohere/command-r-plus-08-2024": "open_router",
"deepseek/deepseek-chat": "open_router",
"deepseek/deepseek-r1-0528": "open_router",
"dolphin-mistral:latest": "ollama",
"google/gemini-2.0-flash-001": "open_router",
"google/gemini-2.0-flash-lite-001": "open_router",
"google/gemini-2.5-flash": "open_router",
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
"google/gemini-2.5-pro-preview-03-25": "open_router",
"google/gemini-3-pro-preview": "open_router",
"gpt-3.5-turbo": "openai",
"gpt-4-turbo": "openai",
"gpt-4.1-2025-04-14": "openai",
"gpt-4.1-mini-2025-04-14": "openai",
"gpt-4o": "openai",
"gpt-4o-mini": "openai",
"gpt-5-2025-08-07": "openai",
"gpt-5-chat-latest": "openai",
"gpt-5-mini-2025-08-07": "openai",
"gpt-5-nano-2025-08-07": "openai",
"gpt-5.1-2025-11-13": "openai",
"gryphe/mythomax-l2-13b": "open_router",
"llama-3.1-8b-instant": "groq",
"llama-3.3-70b-versatile": "groq",
"llama3": "ollama",
"llama3.1:405b": "ollama",
"llama3.2": "ollama",
"llama3.3": "ollama",
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
"meta-llama/llama-4-maverick": "open_router",
"meta-llama/llama-4-scout": "open_router",
"microsoft/wizardlm-2-8x22b": "open_router",
"mistralai/mistral-nemo": "open_router",
"moonshotai/kimi-k2": "open_router",
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
"o1": "openai",
"o1-mini": "openai",
"o3-2025-04-16": "openai",
"o3-mini": "openai",
"openai/gpt-oss-120b": "open_router",
"openai/gpt-oss-20b": "open_router",
"perplexity/sonar": "open_router",
"perplexity/sonar-deep-research": "open_router",
"perplexity/sonar-pro": "open_router",
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
"qwen/qwen3-coder": "open_router",
"v0-1.0-md": "v0",
"v0-1.5-lg": "v0",
"v0-1.5-md": "v0",
"x-ai/grok-4": "open_router",
"x-ai/grok-4-fast": "open_router",
"x-ai/grok-4.1-fast": "open_router",
"x-ai/grok-code-fast-1": "open_router"
},
"discriminator_values": [
"o3-mini"
]
}
},
"required": [
"openai_api_key_credentials"
],
"title": "AIFunctionCredentialsInputSchema",
"type": "object"
}
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -1,403 +0,0 @@
{
"id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
"version": 12,
"is_active": true,
"name": "Flux AI Image Generator",
"description": "Transform ideas into breathtaking images with this AI-powered Image Generator. Using cutting-edge Flux AI technology, the tool crafts highly detailed, photorealistic visuals from simple text prompts. Perfect for artists, marketers, and content creators, this generator produces unique images tailored to user specifications. From fantastical scenes to lifelike portraits, users can unleash creativity with professional-quality results in seconds. Easy to use and endlessly versatile, bring imagination to life with the AI Image Generator today!",
"instructions": null,
"recommended_schedule_cron": null,
"nodes": [
{
"id": "7482c59d-725f-4686-82b9-0dfdc4e92316",
"block_id": "cc10ff7b-7753-4ff2-9af6-9399b1a7eddc",
"input_default": {
"text": "Press the \"Advanced\" toggle and input your replicate API key.\n\nYou can get one here:\nhttps://replicate.com/account/api-tokens\n"
},
"metadata": {
"position": {
"x": 872.8268131538296,
"y": 614.9436919065381
}
},
"input_links": [],
"output_links": [],
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
"graph_version": 12,
"webhook_id": null,
"webhook": null
},
{
"id": "0d1dec1a-e4ee-4349-9673-449a01bbf14e",
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
"input_default": {
"name": "Generated Image"
},
"metadata": {
"position": {
"x": 1453.6844137728922,
"y": 963.2466395125115
}
},
"input_links": [
{
"id": "06665d23-2f3d-4445-8f22-573446fcff5b",
"source_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
"sink_id": "0d1dec1a-e4ee-4349-9673-449a01bbf14e",
"source_name": "result",
"sink_name": "value",
"is_static": false
}
],
"output_links": [],
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
"graph_version": 12,
"webhook_id": null,
"webhook": null
},
{
"id": "6f24c45f-1548-4eda-9784-da06ce0abef8",
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
"input_default": {
"name": "Image Subject",
"value": "Otto the friendly, purple \"Chief Automation Octopus\" helping people automate their tedious tasks.",
"description": "The subject of the image"
},
"metadata": {
"position": {
"x": -314.43009631839783,
"y": 962.935949165938
}
},
"input_links": [],
"output_links": [
{
"id": "1077c61a-a32a-4ed7-becf-11bcf835b914",
"source_id": "6f24c45f-1548-4eda-9784-da06ce0abef8",
"sink_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
"source_name": "result",
"sink_name": "prompt_values_#_TOPIC",
"is_static": true
}
],
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
"graph_version": 12,
"webhook_id": null,
"webhook": null
},
{
"id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
"block_id": "90f8c45e-e983-4644-aa0b-b4ebe2f531bc",
"input_default": {
"prompt": "dog",
"output_format": "png",
"replicate_model_name": "Flux Pro 1.1"
},
"metadata": {
"position": {
"x": 873.0119949791526,
"y": 966.1604399052493
}
},
"input_links": [
{
"id": "a17ec505-9377-4700-8fe0-124ca81d43a9",
"source_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
"sink_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
}
],
"output_links": [
{
"id": "06665d23-2f3d-4445-8f22-573446fcff5b",
"source_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
"sink_id": "0d1dec1a-e4ee-4349-9673-449a01bbf14e",
"source_name": "result",
"sink_name": "value",
"is_static": false
}
],
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
"graph_version": 12,
"webhook_id": null,
"webhook": null
},
{
"id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
"input_default": {
"model": "gpt-4o-mini",
"prompt": "Generate an incredibly detailed, photorealistic image prompt about {{TOPIC}}, describing the camera it's taken with and prompting the diffusion model to use all the best quality techniques.\n\nOutput only the prompt with no additional commentary.",
"prompt_values": {}
},
"metadata": {
"position": {
"x": 277.3057034159709,
"y": 962.8382498113764
}
},
"input_links": [
{
"id": "1077c61a-a32a-4ed7-becf-11bcf835b914",
"source_id": "6f24c45f-1548-4eda-9784-da06ce0abef8",
"sink_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
"source_name": "result",
"sink_name": "prompt_values_#_TOPIC",
"is_static": true
}
],
"output_links": [
{
"id": "a17ec505-9377-4700-8fe0-124ca81d43a9",
"source_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
"sink_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
}
],
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
"graph_version": 12,
"webhook_id": null,
"webhook": null
}
],
"links": [
{
"id": "1077c61a-a32a-4ed7-becf-11bcf835b914",
"source_id": "6f24c45f-1548-4eda-9784-da06ce0abef8",
"sink_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
"source_name": "result",
"sink_name": "prompt_values_#_TOPIC",
"is_static": true
},
{
"id": "06665d23-2f3d-4445-8f22-573446fcff5b",
"source_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
"sink_id": "0d1dec1a-e4ee-4349-9673-449a01bbf14e",
"source_name": "result",
"sink_name": "value",
"is_static": false
},
{
"id": "a17ec505-9377-4700-8fe0-124ca81d43a9",
"source_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
"sink_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
}
],
"forked_from_id": null,
"forked_from_version": null,
"sub_graphs": [],
"user_id": "",
"created_at": "2024-12-20T18:46:11.492Z",
"input_schema": {
"type": "object",
"properties": {
"Image Subject": {
"advanced": false,
"secret": false,
"title": "Image Subject",
"description": "The subject of the image",
"default": "Otto the friendly, purple \"Chief Automation Octopus\" helping people automate their tedious tasks."
}
},
"required": []
},
"output_schema": {
"type": "object",
"properties": {
"Generated Image": {
"advanced": false,
"secret": false,
"title": "Generated Image"
}
},
"required": [
"Generated Image"
]
},
"has_external_trigger": false,
"has_human_in_the_loop": false,
"trigger_setup_info": null,
"credentials_input_schema": {
"properties": {
"replicate_api_key_credentials": {
"credentials_provider": [
"replicate"
],
"credentials_types": [
"api_key"
],
"properties": {
"id": {
"title": "Id",
"type": "string"
},
"title": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"title": "Title"
},
"provider": {
"const": "replicate",
"title": "Provider",
"type": "string"
},
"type": {
"const": "api_key",
"title": "Type",
"type": "string"
}
},
"required": [
"id",
"provider",
"type"
],
"title": "CredentialsMetaInput[Literal[<ProviderName.REPLICATE: 'replicate'>], Literal['api_key']]",
"type": "object",
"discriminator_values": []
},
"openai_api_key_credentials": {
"credentials_provider": [
"openai"
],
"credentials_types": [
"api_key"
],
"properties": {
"id": {
"title": "Id",
"type": "string"
},
"title": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"title": "Title"
},
"provider": {
"const": "openai",
"title": "Provider",
"type": "string"
},
"type": {
"const": "api_key",
"title": "Type",
"type": "string"
}
},
"required": [
"id",
"provider",
"type"
],
"title": "CredentialsMetaInput[Literal[<ProviderName.OPENAI: 'openai'>], Literal['api_key']]",
"type": "object",
"discriminator": "model",
"discriminator_mapping": {
"Llama-3.3-70B-Instruct": "llama_api",
"Llama-3.3-8B-Instruct": "llama_api",
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
"amazon/nova-lite-v1": "open_router",
"amazon/nova-micro-v1": "open_router",
"amazon/nova-pro-v1": "open_router",
"claude-3-7-sonnet-20250219": "anthropic",
"claude-3-haiku-20240307": "anthropic",
"claude-haiku-4-5-20251001": "anthropic",
"claude-opus-4-1-20250805": "anthropic",
"claude-opus-4-20250514": "anthropic",
"claude-opus-4-5-20251101": "anthropic",
"claude-sonnet-4-20250514": "anthropic",
"claude-sonnet-4-5-20250929": "anthropic",
"cohere/command-r-08-2024": "open_router",
"cohere/command-r-plus-08-2024": "open_router",
"deepseek/deepseek-chat": "open_router",
"deepseek/deepseek-r1-0528": "open_router",
"dolphin-mistral:latest": "ollama",
"google/gemini-2.0-flash-001": "open_router",
"google/gemini-2.0-flash-lite-001": "open_router",
"google/gemini-2.5-flash": "open_router",
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
"google/gemini-2.5-pro-preview-03-25": "open_router",
"google/gemini-3-pro-preview": "open_router",
"gpt-3.5-turbo": "openai",
"gpt-4-turbo": "openai",
"gpt-4.1-2025-04-14": "openai",
"gpt-4.1-mini-2025-04-14": "openai",
"gpt-4o": "openai",
"gpt-4o-mini": "openai",
"gpt-5-2025-08-07": "openai",
"gpt-5-chat-latest": "openai",
"gpt-5-mini-2025-08-07": "openai",
"gpt-5-nano-2025-08-07": "openai",
"gpt-5.1-2025-11-13": "openai",
"gryphe/mythomax-l2-13b": "open_router",
"llama-3.1-8b-instant": "groq",
"llama-3.3-70b-versatile": "groq",
"llama3": "ollama",
"llama3.1:405b": "ollama",
"llama3.2": "ollama",
"llama3.3": "ollama",
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
"meta-llama/llama-4-maverick": "open_router",
"meta-llama/llama-4-scout": "open_router",
"microsoft/wizardlm-2-8x22b": "open_router",
"mistralai/mistral-nemo": "open_router",
"moonshotai/kimi-k2": "open_router",
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
"o1": "openai",
"o1-mini": "openai",
"o3-2025-04-16": "openai",
"o3-mini": "openai",
"openai/gpt-oss-120b": "open_router",
"openai/gpt-oss-20b": "open_router",
"perplexity/sonar": "open_router",
"perplexity/sonar-deep-research": "open_router",
"perplexity/sonar-pro": "open_router",
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
"qwen/qwen3-coder": "open_router",
"v0-1.0-md": "v0",
"v0-1.5-lg": "v0",
"v0-1.5-md": "v0",
"x-ai/grok-4": "open_router",
"x-ai/grok-4-fast": "open_router",
"x-ai/grok-4.1-fast": "open_router",
"x-ai/grok-code-fast-1": "open_router"
},
"discriminator_values": [
"gpt-4o-mini"
]
}
},
"required": [
"replicate_api_key_credentials",
"openai_api_key_credentials"
],
"title": "FluxAIImageGeneratorCredentialsInputSchema",
"type": "object"
}
}

View File

@@ -1,505 +0,0 @@
{
"id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
"version": 12,
"is_active": true,
"name": "AI Webpage Copy Improver",
"description": "Elevate your web content with this powerful AI Webpage Copy Improver. Designed for marketers, SEO specialists, and web developers, this tool analyses and enhances website copy for maximum impact. Using advanced language models, it optimizes text for better clarity, SEO performance, and increased conversion rates. The AI examines your existing content, identifies areas for improvement, and generates refined copy that maintains your brand voice while boosting engagement. From homepage headlines to product descriptions, transform your web presence with AI-driven insights. Improve readability, incorporate targeted keywords, and craft compelling calls-to-action - all with the click of a button. Take your digital marketing to the next level with the AI Webpage Copy Improver.",
"instructions": null,
"recommended_schedule_cron": null,
"nodes": [
{
"id": "130ec496-f75d-4fe2-9cd6-8c00d08ea4a7",
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
"input_default": {
"name": "Improved Webpage Copy"
},
"metadata": {
"position": {
"x": 1039.5884372540172,
"y": -0.8359099621230968
}
},
"input_links": [
{
"id": "d4334477-3616-454f-a430-614ca27f5b36",
"source_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
"sink_id": "130ec496-f75d-4fe2-9cd6-8c00d08ea4a7",
"source_name": "response",
"sink_name": "value",
"is_static": false
}
],
"output_links": [],
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
"graph_version": 12,
"webhook_id": null,
"webhook": null
},
{
"id": "cefccd07-fe70-4feb-bf76-46b20aaa5d35",
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
"input_default": {
"name": "Original Page Analysis",
"description": "Analysis of the webpage as it currently stands."
},
"metadata": {
"position": {
"x": 1037.7724103954706,
"y": -606.5934325506903
}
},
"input_links": [
{
"id": "f979ab78-0903-4f19-a7c2-a419d5d81aef",
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
"sink_id": "cefccd07-fe70-4feb-bf76-46b20aaa5d35",
"source_name": "response",
"sink_name": "value",
"is_static": false
}
],
"output_links": [],
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
"graph_version": 12,
"webhook_id": null,
"webhook": null
},
{
"id": "375f8bc3-afd9-4025-ad8e-9aeb329af7ce",
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
"input_default": {
"name": "Homepage URL",
"value": "https://agpt.co",
"description": "Enter the URL of the homepage you want to improve"
},
"metadata": {
"position": {
"x": -1195.1455674454749,
"y": 0
}
},
"input_links": [],
"output_links": [
{
"id": "cbb12335-fefd-4560-9fff-98675130fbad",
"source_id": "375f8bc3-afd9-4025-ad8e-9aeb329af7ce",
"sink_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
"source_name": "result",
"sink_name": "url",
"is_static": true
}
],
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
"graph_version": 12,
"webhook_id": null,
"webhook": null
},
{
"id": "b40595c6-dba3-4779-a129-cd4f01fff103",
"block_id": "436c3984-57fd-4b85-8e9a-459b356883bd",
"input_default": {
"raw_content": false
},
"metadata": {
"position": {
"x": -631.7330786555249,
"y": 1.9638396496230826
}
},
"input_links": [
{
"id": "cbb12335-fefd-4560-9fff-98675130fbad",
"source_id": "375f8bc3-afd9-4025-ad8e-9aeb329af7ce",
"sink_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
"source_name": "result",
"sink_name": "url",
"is_static": true
}
],
"output_links": [
{
"id": "adfa6113-77b3-4e32-b136-3e694b87553e",
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
"source_name": "content",
"sink_name": "prompt_values_#_CONTENT",
"is_static": false
},
{
"id": "5d5656fd-4208-4296-bc70-e39cc31caada",
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
"sink_id": "08612ce2-625b-4c17-accd-3acace7b6477",
"source_name": "content",
"sink_name": "prompt_values_#_CONTENT",
"is_static": false
}
],
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
"graph_version": 12,
"webhook_id": null,
"webhook": null
},
{
"id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
"input_default": {
"model": "gpt-4o",
"prompt": "Current Webpage Content:\n```\n{{CONTENT}}\n```\n\nBased on the following analysis of the webpage content:\n\n```\n{{ANALYSIS}}\n```\n\nRewrite and improve the content to address the identified issues. Focus on:\n1. Enhancing clarity and readability\n2. Optimizing for SEO (suggest and incorporate relevant keywords)\n3. Improving calls-to-action for better conversion rates\n4. Refining the structure and organization\n5. Maintaining brand consistency while improving the overall tone\n\nProvide the improved content in HTML format inside a code-block with \"```\" backticks, preserving the original structure where appropriate. Also, include a brief summary of the changes made and their potential impact.",
"prompt_values": {}
},
"metadata": {
"position": {
"x": 488.37278423303917,
"y": 0
}
},
"input_links": [
{
"id": "adfa6113-77b3-4e32-b136-3e694b87553e",
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
"source_name": "content",
"sink_name": "prompt_values_#_CONTENT",
"is_static": false
},
{
"id": "6bcca45d-c9d5-439e-ac43-e4a1264d8f57",
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
"source_name": "response",
"sink_name": "prompt_values_#_ANALYSIS",
"is_static": false
}
],
"output_links": [
{
"id": "d4334477-3616-454f-a430-614ca27f5b36",
"source_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
"sink_id": "130ec496-f75d-4fe2-9cd6-8c00d08ea4a7",
"source_name": "response",
"sink_name": "value",
"is_static": false
}
],
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
"graph_version": 12,
"webhook_id": null,
"webhook": null
},
{
"id": "08612ce2-625b-4c17-accd-3acace7b6477",
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
"input_default": {
"model": "gpt-4o",
"prompt": "Analyze the following webpage content and provide a detailed report on its current state, including strengths and weaknesses in terms of clarity, SEO optimization, and potential for conversion:\n\n{{CONTENT}}\n\nInclude observations on:\n1. Overall readability and clarity\n2. Use of keywords and SEO-friendly language\n3. Effectiveness of calls-to-action\n4. Structure and organization of content\n5. Tone and brand consistency",
"prompt_values": {}
},
"metadata": {
"position": {
"x": -72.66206703605442,
"y": -0.58403945075381
}
},
"input_links": [
{
"id": "5d5656fd-4208-4296-bc70-e39cc31caada",
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
"sink_id": "08612ce2-625b-4c17-accd-3acace7b6477",
"source_name": "content",
"sink_name": "prompt_values_#_CONTENT",
"is_static": false
}
],
"output_links": [
{
"id": "f979ab78-0903-4f19-a7c2-a419d5d81aef",
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
"sink_id": "cefccd07-fe70-4feb-bf76-46b20aaa5d35",
"source_name": "response",
"sink_name": "value",
"is_static": false
},
{
"id": "6bcca45d-c9d5-439e-ac43-e4a1264d8f57",
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
"source_name": "response",
"sink_name": "prompt_values_#_ANALYSIS",
"is_static": false
}
],
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
"graph_version": 12,
"webhook_id": null,
"webhook": null
}
],
"links": [
{
"id": "adfa6113-77b3-4e32-b136-3e694b87553e",
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
"source_name": "content",
"sink_name": "prompt_values_#_CONTENT",
"is_static": false
},
{
"id": "d4334477-3616-454f-a430-614ca27f5b36",
"source_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
"sink_id": "130ec496-f75d-4fe2-9cd6-8c00d08ea4a7",
"source_name": "response",
"sink_name": "value",
"is_static": false
},
{
"id": "5d5656fd-4208-4296-bc70-e39cc31caada",
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
"sink_id": "08612ce2-625b-4c17-accd-3acace7b6477",
"source_name": "content",
"sink_name": "prompt_values_#_CONTENT",
"is_static": false
},
{
"id": "f979ab78-0903-4f19-a7c2-a419d5d81aef",
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
"sink_id": "cefccd07-fe70-4feb-bf76-46b20aaa5d35",
"source_name": "response",
"sink_name": "value",
"is_static": false
},
{
"id": "6bcca45d-c9d5-439e-ac43-e4a1264d8f57",
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
"source_name": "response",
"sink_name": "prompt_values_#_ANALYSIS",
"is_static": false
},
{
"id": "cbb12335-fefd-4560-9fff-98675130fbad",
"source_id": "375f8bc3-afd9-4025-ad8e-9aeb329af7ce",
"sink_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
"source_name": "result",
"sink_name": "url",
"is_static": true
}
],
"forked_from_id": null,
"forked_from_version": null,
"sub_graphs": [],
"user_id": "",
"created_at": "2024-12-20T19:47:22.036Z",
"input_schema": {
"type": "object",
"properties": {
"Homepage URL": {
"advanced": false,
"secret": false,
"title": "Homepage URL",
"description": "Enter the URL of the homepage you want to improve",
"default": "https://agpt.co"
}
},
"required": []
},
"output_schema": {
"type": "object",
"properties": {
"Improved Webpage Copy": {
"advanced": false,
"secret": false,
"title": "Improved Webpage Copy"
},
"Original Page Analysis": {
"advanced": false,
"secret": false,
"title": "Original Page Analysis",
"description": "Analysis of the webpage as it currently stands."
}
},
"required": [
"Improved Webpage Copy",
"Original Page Analysis"
]
},
"has_external_trigger": false,
"has_human_in_the_loop": false,
"trigger_setup_info": null,
"credentials_input_schema": {
"properties": {
"jina_api_key_credentials": {
"credentials_provider": [
"jina"
],
"credentials_types": [
"api_key"
],
"properties": {
"id": {
"title": "Id",
"type": "string"
},
"title": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"title": "Title"
},
"provider": {
"const": "jina",
"title": "Provider",
"type": "string"
},
"type": {
"const": "api_key",
"title": "Type",
"type": "string"
}
},
"required": [
"id",
"provider",
"type"
],
"title": "CredentialsMetaInput[Literal[<ProviderName.JINA: 'jina'>], Literal['api_key']]",
"type": "object",
"discriminator_values": []
},
"openai_api_key_credentials": {
"credentials_provider": [
"openai"
],
"credentials_types": [
"api_key"
],
"properties": {
"id": {
"title": "Id",
"type": "string"
},
"title": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"title": "Title"
},
"provider": {
"const": "openai",
"title": "Provider",
"type": "string"
},
"type": {
"const": "api_key",
"title": "Type",
"type": "string"
}
},
"required": [
"id",
"provider",
"type"
],
"title": "CredentialsMetaInput[Literal[<ProviderName.OPENAI: 'openai'>], Literal['api_key']]",
"type": "object",
"discriminator": "model",
"discriminator_mapping": {
"Llama-3.3-70B-Instruct": "llama_api",
"Llama-3.3-8B-Instruct": "llama_api",
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
"amazon/nova-lite-v1": "open_router",
"amazon/nova-micro-v1": "open_router",
"amazon/nova-pro-v1": "open_router",
"claude-3-7-sonnet-20250219": "anthropic",
"claude-3-haiku-20240307": "anthropic",
"claude-haiku-4-5-20251001": "anthropic",
"claude-opus-4-1-20250805": "anthropic",
"claude-opus-4-20250514": "anthropic",
"claude-opus-4-5-20251101": "anthropic",
"claude-sonnet-4-20250514": "anthropic",
"claude-sonnet-4-5-20250929": "anthropic",
"cohere/command-r-08-2024": "open_router",
"cohere/command-r-plus-08-2024": "open_router",
"deepseek/deepseek-chat": "open_router",
"deepseek/deepseek-r1-0528": "open_router",
"dolphin-mistral:latest": "ollama",
"google/gemini-2.0-flash-001": "open_router",
"google/gemini-2.0-flash-lite-001": "open_router",
"google/gemini-2.5-flash": "open_router",
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
"google/gemini-2.5-pro-preview-03-25": "open_router",
"google/gemini-3-pro-preview": "open_router",
"gpt-3.5-turbo": "openai",
"gpt-4-turbo": "openai",
"gpt-4.1-2025-04-14": "openai",
"gpt-4.1-mini-2025-04-14": "openai",
"gpt-4o": "openai",
"gpt-4o-mini": "openai",
"gpt-5-2025-08-07": "openai",
"gpt-5-chat-latest": "openai",
"gpt-5-mini-2025-08-07": "openai",
"gpt-5-nano-2025-08-07": "openai",
"gpt-5.1-2025-11-13": "openai",
"gryphe/mythomax-l2-13b": "open_router",
"llama-3.1-8b-instant": "groq",
"llama-3.3-70b-versatile": "groq",
"llama3": "ollama",
"llama3.1:405b": "ollama",
"llama3.2": "ollama",
"llama3.3": "ollama",
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
"meta-llama/llama-4-maverick": "open_router",
"meta-llama/llama-4-scout": "open_router",
"microsoft/wizardlm-2-8x22b": "open_router",
"mistralai/mistral-nemo": "open_router",
"moonshotai/kimi-k2": "open_router",
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
"o1": "openai",
"o1-mini": "openai",
"o3-2025-04-16": "openai",
"o3-mini": "openai",
"openai/gpt-oss-120b": "open_router",
"openai/gpt-oss-20b": "open_router",
"perplexity/sonar": "open_router",
"perplexity/sonar-deep-research": "open_router",
"perplexity/sonar-pro": "open_router",
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
"qwen/qwen3-coder": "open_router",
"v0-1.0-md": "v0",
"v0-1.5-lg": "v0",
"v0-1.5-md": "v0",
"x-ai/grok-4": "open_router",
"x-ai/grok-4-fast": "open_router",
"x-ai/grok-4.1-fast": "open_router",
"x-ai/grok-code-fast-1": "open_router"
},
"discriminator_values": [
"gpt-4o"
]
}
},
"required": [
"jina_api_key_credentials",
"openai_api_key_credentials"
],
"title": "AIWebpageCopyImproverCredentialsInputSchema",
"type": "object"
}
}

View File

@@ -1,615 +0,0 @@
{
"id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
"version": 29,
"is_active": true,
"name": "Email Address Finder",
"description": "Input information of a business and find their email address",
"instructions": null,
"recommended_schedule_cron": null,
"nodes": [
{
"id": "04cad535-9f1a-4876-8b07-af5897d8c282",
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
"input_default": {
"name": "Address",
"value": "USA"
},
"metadata": {
"position": {
"x": 1047.9357219838776,
"y": 1067.9123910370954
}
},
"input_links": [],
"output_links": [
{
"id": "aac29f7b-3cd1-4c91-9a2a-72a8301c0957",
"source_id": "04cad535-9f1a-4876-8b07-af5897d8c282",
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
"source_name": "result",
"sink_name": "values_#_ADDRESS",
"is_static": true
}
],
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
"block_id": "3146e4fe-2cdd-4f29-bd12-0c9d5bb4deb0",
"input_default": {
"group": 1,
"pattern": "<email>(.*?)<\\/email>"
},
"metadata": {
"position": {
"x": 3381.2821481740634,
"y": 246.091098184158
}
},
"input_links": [
{
"id": "9f8188ce-1f3d-46fb-acda-b2a57c0e5da6",
"source_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
"sink_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
"source_name": "response",
"sink_name": "text",
"is_static": false
}
],
"output_links": [
{
"id": "b15b5143-27b7-486e-a166-4095e72e5235",
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
"sink_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
"source_name": "negative",
"sink_name": "values_#_Result",
"is_static": false
},
{
"id": "23591872-3c6b-4562-87d3-5b6ade698e48",
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
"source_name": "positive",
"sink_name": "value",
"is_static": false
}
],
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
"input_default": {
"name": "Email"
},
"metadata": {
"position": {
"x": 4525.4246310882,
"y": 246.36913665010354
}
},
"input_links": [
{
"id": "d87b07ea-dcec-4d38-a644-2c1d741ea3cb",
"source_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
"source_name": "output",
"sink_name": "value",
"is_static": false
},
{
"id": "23591872-3c6b-4562-87d3-5b6ade698e48",
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
"source_name": "positive",
"sink_name": "value",
"is_static": false
}
],
"output_links": [],
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "4a41df99-ffe2-4c12-b528-632979c9c030",
"block_id": "87840993-2053-44b7-8da4-187ad4ee518c",
"input_default": {},
"metadata": {
"position": {
"x": 2182.7499999999995,
"y": 242.00001144409185
}
},
"input_links": [
{
"id": "2e411d3d-79ba-4958-9c1c-b76a45a2e649",
"source_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
"sink_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
"source_name": "output",
"sink_name": "query",
"is_static": false
}
],
"output_links": [
{
"id": "899cc7d8-a96b-4107-b3c6-4c78edcf0c6b",
"source_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
"source_name": "results",
"sink_name": "prompt_values_#_WEBSITE_CONTENT",
"is_static": false
}
],
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
"input_default": {
"name": "Business Name",
"value": "Tim Cook"
},
"metadata": {
"position": {
"x": 1049.9704155272595,
"y": 244.49931152418344
}
},
"input_links": [],
"output_links": [
{
"id": "946b522c-365f-4ee0-96f9-28863d9882ea",
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
"source_name": "result",
"sink_name": "values_#_NAME",
"is_static": true
},
{
"id": "43e920a7-0bb4-4fae-9a22-91df95c7342a",
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
"source_name": "result",
"sink_name": "prompt_values_#_BUSINESS_NAME",
"is_static": true
}
],
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
"block_id": "db7d8f02-2f44-4c55-ab7a-eae0941f0c30",
"input_default": {
"format": "Email Address of {{NAME}}, {{ADDRESS}}",
"values": {}
},
"metadata": {
"position": {
"x": 1625.25,
"y": 243.25001144409185
}
},
"input_links": [
{
"id": "946b522c-365f-4ee0-96f9-28863d9882ea",
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
"source_name": "result",
"sink_name": "values_#_NAME",
"is_static": true
},
{
"id": "aac29f7b-3cd1-4c91-9a2a-72a8301c0957",
"source_id": "04cad535-9f1a-4876-8b07-af5897d8c282",
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
"source_name": "result",
"sink_name": "values_#_ADDRESS",
"is_static": true
}
],
"output_links": [
{
"id": "2e411d3d-79ba-4958-9c1c-b76a45a2e649",
"source_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
"sink_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
"source_name": "output",
"sink_name": "query",
"is_static": false
}
],
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "266b7255-11c4-4b88-99e2-85db31a2e865",
"block_id": "db7d8f02-2f44-4c55-ab7a-eae0941f0c30",
"input_default": {
"format": "Failed to find email. \nResult:\n{{RESULT}}",
"values": {}
},
"metadata": {
"position": {
"x": 3949.7493830805934,
"y": 705.209819698647
}
},
"input_links": [
{
"id": "b15b5143-27b7-486e-a166-4095e72e5235",
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
"sink_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
"source_name": "negative",
"sink_name": "values_#_Result",
"is_static": false
}
],
"output_links": [
{
"id": "d87b07ea-dcec-4d38-a644-2c1d741ea3cb",
"source_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
"source_name": "output",
"sink_name": "value",
"is_static": false
}
],
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
"input_default": {
"model": "claude-sonnet-4-5-20250929",
"prompt": "<business_website>\n{{WEBSITE_CONTENT}}\n</business_website>\n\nExtract the Contact Email of {{BUSINESS_NAME}}.\n\nIf no email that can be used to contact {{BUSINESS_NAME}} is present, output `N/A`.\nDo not share any emails other than the email for this specific entity.\n\nIf multiple present pick the likely best one.\n\nRespond with the email (or N/A) inside <email></email> tags.\n\nExample Response:\n\n<thoughts_or_comments>\nThere were many emails present, but luckily one was for {{BUSINESS_NAME}} which I have included below.\n</thoughts_or_comments>\n<email>\nexample@email.com\n</email>",
"prompt_values": {}
},
"metadata": {
"position": {
"x": 2774.879259081777,
"y": 243.3102035752969
}
},
"input_links": [
{
"id": "43e920a7-0bb4-4fae-9a22-91df95c7342a",
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
"source_name": "result",
"sink_name": "prompt_values_#_BUSINESS_NAME",
"is_static": true
},
{
"id": "899cc7d8-a96b-4107-b3c6-4c78edcf0c6b",
"source_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
"source_name": "results",
"sink_name": "prompt_values_#_WEBSITE_CONTENT",
"is_static": false
}
],
"output_links": [
{
"id": "9f8188ce-1f3d-46fb-acda-b2a57c0e5da6",
"source_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
"sink_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
"source_name": "response",
"sink_name": "text",
"is_static": false
}
],
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
"graph_version": 29,
"webhook_id": null,
"webhook": null
}
],
"links": [
{
"id": "9f8188ce-1f3d-46fb-acda-b2a57c0e5da6",
"source_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
"sink_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
"source_name": "response",
"sink_name": "text",
"is_static": false
},
{
"id": "b15b5143-27b7-486e-a166-4095e72e5235",
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
"sink_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
"source_name": "negative",
"sink_name": "values_#_Result",
"is_static": false
},
{
"id": "d87b07ea-dcec-4d38-a644-2c1d741ea3cb",
"source_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
"source_name": "output",
"sink_name": "value",
"is_static": false
},
{
"id": "946b522c-365f-4ee0-96f9-28863d9882ea",
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
"source_name": "result",
"sink_name": "values_#_NAME",
"is_static": true
},
{
"id": "23591872-3c6b-4562-87d3-5b6ade698e48",
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
"source_name": "positive",
"sink_name": "value",
"is_static": false
},
{
"id": "43e920a7-0bb4-4fae-9a22-91df95c7342a",
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
"source_name": "result",
"sink_name": "prompt_values_#_BUSINESS_NAME",
"is_static": true
},
{
"id": "2e411d3d-79ba-4958-9c1c-b76a45a2e649",
"source_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
"sink_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
"source_name": "output",
"sink_name": "query",
"is_static": false
},
{
"id": "aac29f7b-3cd1-4c91-9a2a-72a8301c0957",
"source_id": "04cad535-9f1a-4876-8b07-af5897d8c282",
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
"source_name": "result",
"sink_name": "values_#_ADDRESS",
"is_static": true
},
{
"id": "899cc7d8-a96b-4107-b3c6-4c78edcf0c6b",
"source_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
"source_name": "results",
"sink_name": "prompt_values_#_WEBSITE_CONTENT",
"is_static": false
}
],
"forked_from_id": null,
"forked_from_version": null,
"sub_graphs": [],
"user_id": "",
"created_at": "2025-01-03T00:46:30.244Z",
"input_schema": {
"type": "object",
"properties": {
"Address": {
"advanced": false,
"secret": false,
"title": "Address",
"default": "USA"
},
"Business Name": {
"advanced": false,
"secret": false,
"title": "Business Name",
"default": "Tim Cook"
}
},
"required": []
},
"output_schema": {
"type": "object",
"properties": {
"Email": {
"advanced": false,
"secret": false,
"title": "Email"
}
},
"required": [
"Email"
]
},
"has_external_trigger": false,
"has_human_in_the_loop": false,
"trigger_setup_info": null,
"credentials_input_schema": {
"properties": {
"jina_api_key_credentials": {
"credentials_provider": [
"jina"
],
"credentials_types": [
"api_key"
],
"properties": {
"id": {
"title": "Id",
"type": "string"
},
"title": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"title": "Title"
},
"provider": {
"const": "jina",
"title": "Provider",
"type": "string"
},
"type": {
"const": "api_key",
"title": "Type",
"type": "string"
}
},
"required": [
"id",
"provider",
"type"
],
"title": "CredentialsMetaInput[Literal[<ProviderName.JINA: 'jina'>], Literal['api_key']]",
"type": "object",
"discriminator_values": []
},
"anthropic_api_key_credentials": {
"credentials_provider": [
"anthropic"
],
"credentials_types": [
"api_key"
],
"properties": {
"id": {
"title": "Id",
"type": "string"
},
"title": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"title": "Title"
},
"provider": {
"const": "anthropic",
"title": "Provider",
"type": "string"
},
"type": {
"const": "api_key",
"title": "Type",
"type": "string"
}
},
"required": [
"id",
"provider",
"type"
],
"title": "CredentialsMetaInput[Literal[<ProviderName.ANTHROPIC: 'anthropic'>], Literal['api_key']]",
"type": "object",
"discriminator": "model",
"discriminator_mapping": {
"Llama-3.3-70B-Instruct": "llama_api",
"Llama-3.3-8B-Instruct": "llama_api",
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
"amazon/nova-lite-v1": "open_router",
"amazon/nova-micro-v1": "open_router",
"amazon/nova-pro-v1": "open_router",
"claude-3-7-sonnet-20250219": "anthropic",
"claude-3-haiku-20240307": "anthropic",
"claude-haiku-4-5-20251001": "anthropic",
"claude-opus-4-1-20250805": "anthropic",
"claude-opus-4-20250514": "anthropic",
"claude-opus-4-5-20251101": "anthropic",
"claude-sonnet-4-20250514": "anthropic",
"claude-sonnet-4-5-20250929": "anthropic",
"cohere/command-r-08-2024": "open_router",
"cohere/command-r-plus-08-2024": "open_router",
"deepseek/deepseek-chat": "open_router",
"deepseek/deepseek-r1-0528": "open_router",
"dolphin-mistral:latest": "ollama",
"google/gemini-2.0-flash-001": "open_router",
"google/gemini-2.0-flash-lite-001": "open_router",
"google/gemini-2.5-flash": "open_router",
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
"google/gemini-2.5-pro-preview-03-25": "open_router",
"google/gemini-3-pro-preview": "open_router",
"gpt-3.5-turbo": "openai",
"gpt-4-turbo": "openai",
"gpt-4.1-2025-04-14": "openai",
"gpt-4.1-mini-2025-04-14": "openai",
"gpt-4o": "openai",
"gpt-4o-mini": "openai",
"gpt-5-2025-08-07": "openai",
"gpt-5-chat-latest": "openai",
"gpt-5-mini-2025-08-07": "openai",
"gpt-5-nano-2025-08-07": "openai",
"gpt-5.1-2025-11-13": "openai",
"gryphe/mythomax-l2-13b": "open_router",
"llama-3.1-8b-instant": "groq",
"llama-3.3-70b-versatile": "groq",
"llama3": "ollama",
"llama3.1:405b": "ollama",
"llama3.2": "ollama",
"llama3.3": "ollama",
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
"meta-llama/llama-4-maverick": "open_router",
"meta-llama/llama-4-scout": "open_router",
"microsoft/wizardlm-2-8x22b": "open_router",
"mistralai/mistral-nemo": "open_router",
"moonshotai/kimi-k2": "open_router",
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
"o1": "openai",
"o1-mini": "openai",
"o3-2025-04-16": "openai",
"o3-mini": "openai",
"openai/gpt-oss-120b": "open_router",
"openai/gpt-oss-20b": "open_router",
"perplexity/sonar": "open_router",
"perplexity/sonar-deep-research": "open_router",
"perplexity/sonar-pro": "open_router",
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
"qwen/qwen3-coder": "open_router",
"v0-1.0-md": "v0",
"v0-1.5-lg": "v0",
"v0-1.5-md": "v0",
"x-ai/grok-4": "open_router",
"x-ai/grok-4-fast": "open_router",
"x-ai/grok-4.1-fast": "open_router",
"x-ai/grok-code-fast-1": "open_router"
},
"discriminator_values": [
"claude-sonnet-4-5-20250929"
]
}
},
"required": [
"jina_api_key_credentials",
"anthropic_api_key_credentials"
],
"title": "EmailAddressFinderCredentialsInputSchema",
"type": "object"
}
}

View File

@@ -1,107 +0,0 @@
from fastapi import HTTPException, Security, status
from fastapi.security import APIKeyHeader, HTTPAuthorizationCredentials, HTTPBearer
from prisma.enums import APIKeyPermission
from backend.data.auth.api_key import APIKeyInfo, validate_api_key
from backend.data.auth.base import APIAuthorizationInfo
from backend.data.auth.oauth import (
InvalidClientError,
InvalidTokenError,
OAuthAccessTokenInfo,
validate_access_token,
)
api_key_header = APIKeyHeader(name="X-API-Key", auto_error=False)
bearer_auth = HTTPBearer(auto_error=False)
async def require_api_key(api_key: str | None = Security(api_key_header)) -> APIKeyInfo:
"""Middleware for API key authentication only"""
if api_key is None:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail="Missing API key"
)
api_key_obj = await validate_api_key(api_key)
if not api_key_obj:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid API key"
)
return api_key_obj
async def require_access_token(
bearer: HTTPAuthorizationCredentials | None = Security(bearer_auth),
) -> OAuthAccessTokenInfo:
"""Middleware for OAuth access token authentication only"""
if bearer is None:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Missing Authorization header",
)
try:
token_info, _ = await validate_access_token(bearer.credentials)
except (InvalidClientError, InvalidTokenError) as e:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=str(e))
return token_info
async def require_auth(
api_key: str | None = Security(api_key_header),
bearer: HTTPAuthorizationCredentials | None = Security(bearer_auth),
) -> APIAuthorizationInfo:
"""
Unified authentication middleware supporting both API keys and OAuth tokens.
Supports two authentication methods, which are checked in order:
1. X-API-Key header (existing API key authentication)
2. Authorization: Bearer <token> header (OAuth access token)
Returns:
APIAuthorizationInfo: base class of both APIKeyInfo and OAuthAccessTokenInfo.
"""
# Try API key first
if api_key is not None:
api_key_info = await validate_api_key(api_key)
if api_key_info:
return api_key_info
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid API key"
)
# Try OAuth bearer token
if bearer is not None:
try:
token_info, _ = await validate_access_token(bearer.credentials)
return token_info
except (InvalidClientError, InvalidTokenError) as e:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=str(e))
# No credentials provided
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Missing authentication. Provide API key or access token.",
)
def require_permission(permission: APIKeyPermission):
"""
Dependency function for checking specific permissions
(works with API keys and OAuth tokens)
"""
async def check_permission(
auth: APIAuthorizationInfo = Security(require_auth),
) -> APIAuthorizationInfo:
if permission not in auth.scopes:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail=f"Missing required permission: {permission.value}",
)
return auth
return check_permission

View File

@@ -1,655 +0,0 @@
"""
External API endpoints for integrations and credentials.
This module provides endpoints for external applications (like Autopilot) to:
- Initiate OAuth flows with custom callback URLs
- Complete OAuth flows by exchanging authorization codes
- Create API key, user/password, and host-scoped credentials
- List and manage user credentials
"""
import logging
from typing import TYPE_CHECKING, Annotated, Any, Literal, Optional, Union
from urllib.parse import urlparse
from fastapi import APIRouter, Body, HTTPException, Path, Security, status
from prisma.enums import APIKeyPermission
from pydantic import BaseModel, Field, SecretStr
from backend.api.external.middleware import require_permission
from backend.api.features.integrations.models import get_all_provider_names
from backend.data.auth.base import APIAuthorizationInfo
from backend.data.model import (
APIKeyCredentials,
Credentials,
CredentialsType,
HostScopedCredentials,
OAuth2Credentials,
UserPasswordCredentials,
)
from backend.integrations.creds_manager import IntegrationCredentialsManager
from backend.integrations.oauth import CREDENTIALS_BY_PROVIDER, HANDLERS_BY_NAME
from backend.integrations.providers import ProviderName
from backend.util.settings import Settings
if TYPE_CHECKING:
from backend.integrations.oauth import BaseOAuthHandler
logger = logging.getLogger(__name__)
settings = Settings()
creds_manager = IntegrationCredentialsManager()
integrations_router = APIRouter(prefix="/integrations", tags=["integrations"])
# ==================== Request/Response Models ==================== #
class OAuthInitiateRequest(BaseModel):
"""Request model for initiating an OAuth flow."""
callback_url: str = Field(
..., description="The external app's callback URL for OAuth redirect"
)
scopes: list[str] = Field(
default_factory=list, description="OAuth scopes to request"
)
state_metadata: dict[str, Any] = Field(
default_factory=dict,
description="Arbitrary metadata to echo back on completion",
)
class OAuthInitiateResponse(BaseModel):
"""Response model for OAuth initiation."""
login_url: str = Field(..., description="URL to redirect user for OAuth consent")
state_token: str = Field(..., description="State token for CSRF protection")
expires_at: int = Field(
..., description="Unix timestamp when the state token expires"
)
class OAuthCompleteRequest(BaseModel):
"""Request model for completing an OAuth flow."""
code: str = Field(..., description="Authorization code from OAuth provider")
state_token: str = Field(..., description="State token from initiate request")
class OAuthCompleteResponse(BaseModel):
"""Response model for OAuth completion."""
credentials_id: str = Field(..., description="ID of the stored credentials")
provider: str = Field(..., description="Provider name")
type: str = Field(..., description="Credential type (oauth2)")
title: Optional[str] = Field(None, description="Credential title")
scopes: list[str] = Field(default_factory=list, description="Granted scopes")
username: Optional[str] = Field(None, description="Username from provider")
state_metadata: dict[str, Any] = Field(
default_factory=dict, description="Echoed metadata from initiate request"
)
class CredentialSummary(BaseModel):
"""Summary of a credential without sensitive data."""
id: str
provider: str
type: CredentialsType
title: Optional[str] = None
scopes: Optional[list[str]] = None
username: Optional[str] = None
host: Optional[str] = None
class ProviderInfo(BaseModel):
"""Information about an integration provider."""
name: str
supports_oauth: bool = False
supports_api_key: bool = False
supports_user_password: bool = False
supports_host_scoped: bool = False
default_scopes: list[str] = Field(default_factory=list)
# ==================== Credential Creation Models ==================== #
class CreateAPIKeyCredentialRequest(BaseModel):
"""Request model for creating API key credentials."""
type: Literal["api_key"] = "api_key"
api_key: str = Field(..., description="The API key")
title: str = Field(..., description="A name for this credential")
expires_at: Optional[int] = Field(
None, description="Unix timestamp when the API key expires"
)
class CreateUserPasswordCredentialRequest(BaseModel):
"""Request model for creating username/password credentials."""
type: Literal["user_password"] = "user_password"
username: str = Field(..., description="Username")
password: str = Field(..., description="Password")
title: str = Field(..., description="A name for this credential")
class CreateHostScopedCredentialRequest(BaseModel):
"""Request model for creating host-scoped credentials."""
type: Literal["host_scoped"] = "host_scoped"
host: str = Field(..., description="Host/domain pattern to match")
headers: dict[str, str] = Field(..., description="Headers to include in requests")
title: str = Field(..., description="A name for this credential")
# Union type for credential creation
CreateCredentialRequest = Annotated[
CreateAPIKeyCredentialRequest
| CreateUserPasswordCredentialRequest
| CreateHostScopedCredentialRequest,
Field(discriminator="type"),
]
class CreateCredentialResponse(BaseModel):
"""Response model for credential creation."""
id: str
provider: str
type: CredentialsType
title: Optional[str] = None
# ==================== Helper Functions ==================== #
def validate_callback_url(callback_url: str) -> bool:
"""Validate that the callback URL is from an allowed origin."""
allowed_origins = settings.config.external_oauth_callback_origins
try:
parsed = urlparse(callback_url)
callback_origin = f"{parsed.scheme}://{parsed.netloc}"
for allowed in allowed_origins:
# Simple origin matching
if callback_origin == allowed:
return True
# Allow localhost with any port in development (proper hostname check)
if parsed.hostname == "localhost":
for allowed in allowed_origins:
allowed_parsed = urlparse(allowed)
if allowed_parsed.hostname == "localhost":
return True
return False
except Exception:
return False
def _get_oauth_handler_for_external(
provider_name: str, redirect_uri: str
) -> "BaseOAuthHandler":
"""Get an OAuth handler configured with an external redirect URI."""
# Ensure blocks are loaded so SDK providers are available
try:
from backend.blocks import load_all_blocks
load_all_blocks()
except Exception as e:
logger.warning(f"Failed to load blocks: {e}")
if provider_name not in HANDLERS_BY_NAME:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Provider '{provider_name}' does not support OAuth",
)
# Check if this provider has custom OAuth credentials
oauth_credentials = CREDENTIALS_BY_PROVIDER.get(provider_name)
if oauth_credentials and not oauth_credentials.use_secrets:
import os
client_id = (
os.getenv(oauth_credentials.client_id_env_var)
if oauth_credentials.client_id_env_var
else None
)
client_secret = (
os.getenv(oauth_credentials.client_secret_env_var)
if oauth_credentials.client_secret_env_var
else None
)
else:
client_id = getattr(settings.secrets, f"{provider_name}_client_id", None)
client_secret = getattr(
settings.secrets, f"{provider_name}_client_secret", None
)
if not (client_id and client_secret):
logger.error(f"Attempt to use unconfigured {provider_name} OAuth integration")
raise HTTPException(
status_code=status.HTTP_501_NOT_IMPLEMENTED,
detail={
"message": f"Integration with provider '{provider_name}' is not configured.",
"hint": "Set client ID and secret in the application's deployment environment",
},
)
handler_class = HANDLERS_BY_NAME[provider_name]
return handler_class(
client_id=client_id,
client_secret=client_secret,
redirect_uri=redirect_uri,
)
# ==================== Endpoints ==================== #
@integrations_router.get("/providers", response_model=list[ProviderInfo])
async def list_providers(
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_INTEGRATIONS)
),
) -> list[ProviderInfo]:
"""
List all available integration providers.
Returns a list of all providers with their supported credential types.
Most providers support API key credentials, and some also support OAuth.
"""
# Ensure blocks are loaded
try:
from backend.blocks import load_all_blocks
load_all_blocks()
except Exception as e:
logger.warning(f"Failed to load blocks: {e}")
from backend.sdk.registry import AutoRegistry
providers = []
for name in get_all_provider_names():
supports_oauth = name in HANDLERS_BY_NAME
handler_class = HANDLERS_BY_NAME.get(name)
default_scopes = (
getattr(handler_class, "DEFAULT_SCOPES", []) if handler_class else []
)
# Check if provider has specific auth types from SDK registration
sdk_provider = AutoRegistry.get_provider(name)
if sdk_provider and sdk_provider.supported_auth_types:
supports_api_key = "api_key" in sdk_provider.supported_auth_types
supports_user_password = (
"user_password" in sdk_provider.supported_auth_types
)
supports_host_scoped = "host_scoped" in sdk_provider.supported_auth_types
else:
# Fallback for legacy providers
supports_api_key = True # All providers can accept API keys
supports_user_password = name in ("smtp",)
supports_host_scoped = name == "http"
providers.append(
ProviderInfo(
name=name,
supports_oauth=supports_oauth,
supports_api_key=supports_api_key,
supports_user_password=supports_user_password,
supports_host_scoped=supports_host_scoped,
default_scopes=default_scopes,
)
)
return providers
@integrations_router.post(
"/{provider}/oauth/initiate",
response_model=OAuthInitiateResponse,
summary="Initiate OAuth flow",
)
async def initiate_oauth(
provider: Annotated[str, Path(title="The OAuth provider")],
request: OAuthInitiateRequest,
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.MANAGE_INTEGRATIONS)
),
) -> OAuthInitiateResponse:
"""
Initiate an OAuth flow for an external application.
This endpoint allows external apps to start an OAuth flow with a custom
callback URL. The callback URL must be from an allowed origin configured
in the platform settings.
Returns a login URL to redirect the user to, along with a state token
for CSRF protection.
"""
# Validate callback URL
if not validate_callback_url(request.callback_url):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=(
f"Callback URL origin is not allowed. "
f"Allowed origins: {settings.config.external_oauth_callback_origins}",
),
)
# Validate provider
try:
provider_name = ProviderName(provider)
except ValueError:
# Check if it's a dynamically registered provider
if provider not in HANDLERS_BY_NAME:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Provider '{provider}' not found",
)
provider_name = provider
# Get OAuth handler with external callback URL
handler = _get_oauth_handler_for_external(
provider if isinstance(provider_name, str) else provider_name.value,
request.callback_url,
)
# Store state token with external flow metadata
# Note: initiated_by_api_key_id is only available for API key auth, not OAuth
api_key_id = getattr(auth, "id", None) if auth.type == "api_key" else None
state_token, code_challenge = await creds_manager.store.store_state_token(
user_id=auth.user_id,
provider=provider if isinstance(provider_name, str) else provider_name.value,
scopes=request.scopes,
callback_url=request.callback_url,
state_metadata=request.state_metadata,
initiated_by_api_key_id=api_key_id,
)
# Build login URL
login_url = handler.get_login_url(
request.scopes, state_token, code_challenge=code_challenge
)
# Calculate expiration (10 minutes from now)
from datetime import datetime, timedelta, timezone
expires_at = int((datetime.now(timezone.utc) + timedelta(minutes=10)).timestamp())
return OAuthInitiateResponse(
login_url=login_url,
state_token=state_token,
expires_at=expires_at,
)
@integrations_router.post(
"/{provider}/oauth/complete",
response_model=OAuthCompleteResponse,
summary="Complete OAuth flow",
)
async def complete_oauth(
provider: Annotated[str, Path(title="The OAuth provider")],
request: OAuthCompleteRequest,
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.MANAGE_INTEGRATIONS)
),
) -> OAuthCompleteResponse:
"""
Complete an OAuth flow by exchanging the authorization code for tokens.
This endpoint should be called after the user has authorized the application
and been redirected back to the external app's callback URL with an
authorization code.
"""
# Verify state token
valid_state = await creds_manager.store.verify_state_token(
auth.user_id, request.state_token, provider
)
if not valid_state:
logger.warning(f"Invalid or expired state token for provider {provider}")
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Invalid or expired state token",
)
# Verify this is an external flow (callback_url must be set)
if not valid_state.callback_url:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="State token was not created for external OAuth flow",
)
# Get OAuth handler with the original callback URL
handler = _get_oauth_handler_for_external(provider, valid_state.callback_url)
try:
scopes = valid_state.scopes
scopes = handler.handle_default_scopes(scopes)
credentials = await handler.exchange_code_for_tokens(
request.code, scopes, valid_state.code_verifier
)
# Handle Linear's space-separated scopes
if len(credentials.scopes) == 1 and " " in credentials.scopes[0]:
credentials.scopes = credentials.scopes[0].split(" ")
# Check scope mismatch
if not set(scopes).issubset(set(credentials.scopes)):
logger.warning(
f"Granted scopes {credentials.scopes} for provider {provider} "
f"do not include all requested scopes {scopes}"
)
except Exception as e:
logger.error(f"OAuth2 Code->Token exchange failed for provider {provider}: {e}")
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"OAuth2 callback failed to exchange code for tokens: {str(e)}",
)
# Store credentials
await creds_manager.create(auth.user_id, credentials)
logger.info(f"Successfully completed external OAuth for provider {provider}")
return OAuthCompleteResponse(
credentials_id=credentials.id,
provider=credentials.provider,
type=credentials.type,
title=credentials.title,
scopes=credentials.scopes,
username=credentials.username,
state_metadata=valid_state.state_metadata,
)
@integrations_router.get("/credentials", response_model=list[CredentialSummary])
async def list_credentials(
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_INTEGRATIONS)
),
) -> list[CredentialSummary]:
"""
List all credentials for the authenticated user.
Returns metadata about each credential without exposing sensitive tokens.
"""
credentials = await creds_manager.store.get_all_creds(auth.user_id)
return [
CredentialSummary(
id=cred.id,
provider=cred.provider,
type=cred.type,
title=cred.title,
scopes=cred.scopes if isinstance(cred, OAuth2Credentials) else None,
username=cred.username if isinstance(cred, OAuth2Credentials) else None,
host=cred.host if isinstance(cred, HostScopedCredentials) else None,
)
for cred in credentials
]
@integrations_router.get(
"/{provider}/credentials", response_model=list[CredentialSummary]
)
async def list_credentials_by_provider(
provider: Annotated[str, Path(title="The provider to list credentials for")],
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_INTEGRATIONS)
),
) -> list[CredentialSummary]:
"""
List credentials for a specific provider.
"""
credentials = await creds_manager.store.get_creds_by_provider(
auth.user_id, provider
)
return [
CredentialSummary(
id=cred.id,
provider=cred.provider,
type=cred.type,
title=cred.title,
scopes=cred.scopes if isinstance(cred, OAuth2Credentials) else None,
username=cred.username if isinstance(cred, OAuth2Credentials) else None,
host=cred.host if isinstance(cred, HostScopedCredentials) else None,
)
for cred in credentials
]
@integrations_router.post(
"/{provider}/credentials",
response_model=CreateCredentialResponse,
status_code=status.HTTP_201_CREATED,
summary="Create credentials",
)
async def create_credential(
provider: Annotated[str, Path(title="The provider to create credentials for")],
request: Union[
CreateAPIKeyCredentialRequest,
CreateUserPasswordCredentialRequest,
CreateHostScopedCredentialRequest,
] = Body(..., discriminator="type"),
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.MANAGE_INTEGRATIONS)
),
) -> CreateCredentialResponse:
"""
Create non-OAuth credentials for a provider.
Supports creating:
- API key credentials (type: "api_key")
- Username/password credentials (type: "user_password")
- Host-scoped credentials (type: "host_scoped")
For OAuth credentials, use the OAuth initiate/complete flow instead.
"""
# Validate provider exists
all_providers = get_all_provider_names()
if provider not in all_providers:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Provider '{provider}' not found",
)
# Create the appropriate credential type
credentials: Credentials
if request.type == "api_key":
credentials = APIKeyCredentials(
provider=provider,
api_key=SecretStr(request.api_key),
title=request.title,
expires_at=request.expires_at,
)
elif request.type == "user_password":
credentials = UserPasswordCredentials(
provider=provider,
username=SecretStr(request.username),
password=SecretStr(request.password),
title=request.title,
)
elif request.type == "host_scoped":
# Convert string headers to SecretStr
secret_headers = {k: SecretStr(v) for k, v in request.headers.items()}
credentials = HostScopedCredentials(
provider=provider,
host=request.host,
headers=secret_headers,
title=request.title,
)
else:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"Unsupported credential type: {request.type}",
)
# Store credentials
try:
await creds_manager.create(auth.user_id, credentials)
except Exception as e:
logger.error(f"Failed to store credentials: {e}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"Failed to store credentials: {str(e)}",
)
logger.info(f"Created {request.type} credentials for provider {provider}")
return CreateCredentialResponse(
id=credentials.id,
provider=provider,
type=credentials.type,
title=credentials.title,
)
class DeleteCredentialResponse(BaseModel):
"""Response model for deleting a credential."""
deleted: bool = Field(..., description="Whether the credential was deleted")
credentials_id: str = Field(..., description="ID of the deleted credential")
@integrations_router.delete(
"/{provider}/credentials/{cred_id}",
response_model=DeleteCredentialResponse,
)
async def delete_credential(
provider: Annotated[str, Path(title="The provider")],
cred_id: Annotated[str, Path(title="The credential ID to delete")],
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.DELETE_INTEGRATIONS)
),
) -> DeleteCredentialResponse:
"""
Delete a credential.
Note: This does not revoke the tokens with the provider. For full cleanup,
use the main API's delete endpoint which handles webhook cleanup and
token revocation.
"""
creds = await creds_manager.store.get_creds_by_id(auth.user_id, cred_id)
if not creds:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND, detail="Credentials not found"
)
if creds.provider != provider:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Credentials do not match the specified provider",
)
await creds_manager.delete(auth.user_id, cred_id)
return DeleteCredentialResponse(deleted=True, credentials_id=cred_id)

View File

@@ -1,328 +0,0 @@
import logging
import urllib.parse
from collections import defaultdict
from typing import Annotated, Any, Literal, Optional, Sequence
from fastapi import APIRouter, Body, HTTPException, Security
from prisma.enums import AgentExecutionStatus, APIKeyPermission
from pydantic import BaseModel, Field
from typing_extensions import TypedDict
import backend.api.features.store.cache as store_cache
import backend.api.features.store.model as store_model
import backend.data.block
from backend.api.external.middleware import require_permission
from backend.data import execution as execution_db
from backend.data import graph as graph_db
from backend.data import user as user_db
from backend.data.auth.base import APIAuthorizationInfo
from backend.data.block import BlockInput, CompletedBlockOutput
from backend.executor.utils import add_graph_execution
from backend.util.settings import Settings
from .integrations import integrations_router
from .tools import tools_router
settings = Settings()
logger = logging.getLogger(__name__)
v1_router = APIRouter()
v1_router.include_router(integrations_router)
v1_router.include_router(tools_router)
class UserInfoResponse(BaseModel):
id: str
name: Optional[str]
email: str
timezone: str = Field(
description="The user's last known timezone (e.g. 'Europe/Amsterdam'), "
"or 'not-set' if not set"
)
@v1_router.get(
path="/me",
tags=["user", "meta"],
)
async def get_user_info(
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.IDENTITY)
),
) -> UserInfoResponse:
user = await user_db.get_user_by_id(auth.user_id)
return UserInfoResponse(
id=user.id,
name=user.name,
email=user.email,
timezone=user.timezone,
)
@v1_router.get(
path="/blocks",
tags=["blocks"],
dependencies=[Security(require_permission(APIKeyPermission.READ_BLOCK))],
)
async def get_graph_blocks() -> Sequence[dict[Any, Any]]:
blocks = [block() for block in backend.data.block.get_blocks().values()]
return [b.to_dict() for b in blocks if not b.disabled]
@v1_router.post(
path="/blocks/{block_id}/execute",
tags=["blocks"],
dependencies=[Security(require_permission(APIKeyPermission.EXECUTE_BLOCK))],
)
async def execute_graph_block(
block_id: str,
data: BlockInput,
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.EXECUTE_BLOCK)
),
) -> CompletedBlockOutput:
obj = backend.data.block.get_block(block_id)
if not obj:
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
output = defaultdict(list)
async for name, data in obj.execute(data):
output[name].append(data)
return output
@v1_router.post(
path="/graphs/{graph_id}/execute/{graph_version}",
tags=["graphs"],
)
async def execute_graph(
graph_id: str,
graph_version: int,
node_input: Annotated[dict[str, Any], Body(..., embed=True, default_factory=dict)],
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.EXECUTE_GRAPH)
),
) -> dict[str, Any]:
try:
graph_exec = await add_graph_execution(
graph_id=graph_id,
user_id=auth.user_id,
inputs=node_input,
graph_version=graph_version,
)
return {"id": graph_exec.id}
except Exception as e:
msg = str(e).encode().decode("unicode_escape")
raise HTTPException(status_code=400, detail=msg)
class ExecutionNode(TypedDict):
node_id: str
input: Any
output: dict[str, Any]
class GraphExecutionResult(TypedDict):
execution_id: str
status: str
nodes: list[ExecutionNode]
output: Optional[list[dict[str, str]]]
@v1_router.get(
path="/graphs/{graph_id}/executions/{graph_exec_id}/results",
tags=["graphs"],
)
async def get_graph_execution_results(
graph_id: str,
graph_exec_id: str,
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.READ_GRAPH)
),
) -> GraphExecutionResult:
graph_exec = await execution_db.get_graph_execution(
user_id=auth.user_id,
execution_id=graph_exec_id,
include_node_executions=True,
)
if not graph_exec:
raise HTTPException(
status_code=404, detail=f"Graph execution #{graph_exec_id} not found."
)
if not await graph_db.get_graph(
graph_id=graph_exec.graph_id,
version=graph_exec.graph_version,
user_id=auth.user_id,
):
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
return GraphExecutionResult(
execution_id=graph_exec_id,
status=graph_exec.status.value,
nodes=[
ExecutionNode(
node_id=node_exec.node_id,
input=node_exec.input_data.get("value", node_exec.input_data),
output={k: v for k, v in node_exec.output_data.items()},
)
for node_exec in graph_exec.node_executions
],
output=(
[
{name: value}
for name, values in graph_exec.outputs.items()
for value in values
]
if graph_exec.status == AgentExecutionStatus.COMPLETED
else None
),
)
##############################################
############### Store Endpoints ##############
##############################################
@v1_router.get(
path="/store/agents",
tags=["store"],
dependencies=[Security(require_permission(APIKeyPermission.READ_STORE))],
response_model=store_model.StoreAgentsResponse,
)
async def get_store_agents(
featured: bool = False,
creator: str | None = None,
sorted_by: Literal["rating", "runs", "name", "updated_at"] | None = None,
search_query: str | None = None,
category: str | None = None,
page: int = 1,
page_size: int = 20,
) -> store_model.StoreAgentsResponse:
"""
Get a paginated list of agents from the store with optional filtering and sorting.
Args:
featured: Filter to only show featured agents
creator: Filter agents by creator username
sorted_by: Sort agents by "runs", "rating", "name", or "updated_at"
search_query: Search agents by name, subheading and description
category: Filter agents by category
page: Page number for pagination (default 1)
page_size: Number of agents per page (default 20)
Returns:
StoreAgentsResponse: Paginated list of agents matching the filters
"""
if page < 1:
raise HTTPException(status_code=422, detail="Page must be greater than 0")
if page_size < 1:
raise HTTPException(status_code=422, detail="Page size must be greater than 0")
agents = await store_cache._get_cached_store_agents(
featured=featured,
creator=creator,
sorted_by=sorted_by,
search_query=search_query,
category=category,
page=page,
page_size=page_size,
)
return agents
@v1_router.get(
path="/store/agents/{username}/{agent_name}",
tags=["store"],
dependencies=[Security(require_permission(APIKeyPermission.READ_STORE))],
response_model=store_model.StoreAgentDetails,
)
async def get_store_agent(
username: str,
agent_name: str,
) -> store_model.StoreAgentDetails:
"""
Get details of a specific store agent by username and agent name.
Args:
username: Creator's username
agent_name: Name/slug of the agent
Returns:
StoreAgentDetails: Detailed information about the agent
"""
username = urllib.parse.unquote(username).lower()
agent_name = urllib.parse.unquote(agent_name).lower()
agent = await store_cache._get_cached_agent_details(
username=username, agent_name=agent_name
)
return agent
@v1_router.get(
path="/store/creators",
tags=["store"],
dependencies=[Security(require_permission(APIKeyPermission.READ_STORE))],
response_model=store_model.CreatorsResponse,
)
async def get_store_creators(
featured: bool = False,
search_query: str | None = None,
sorted_by: Literal["agent_rating", "agent_runs", "num_agents"] | None = None,
page: int = 1,
page_size: int = 20,
) -> store_model.CreatorsResponse:
"""
Get a paginated list of store creators with optional filtering and sorting.
Args:
featured: Filter to only show featured creators
search_query: Search creators by profile description
sorted_by: Sort by "agent_rating", "agent_runs", or "num_agents"
page: Page number for pagination (default 1)
page_size: Number of creators per page (default 20)
Returns:
CreatorsResponse: Paginated list of creators matching the filters
"""
if page < 1:
raise HTTPException(status_code=422, detail="Page must be greater than 0")
if page_size < 1:
raise HTTPException(status_code=422, detail="Page size must be greater than 0")
creators = await store_cache._get_cached_store_creators(
featured=featured,
search_query=search_query,
sorted_by=sorted_by,
page=page,
page_size=page_size,
)
return creators
@v1_router.get(
path="/store/creators/{username}",
tags=["store"],
dependencies=[Security(require_permission(APIKeyPermission.READ_STORE))],
response_model=store_model.CreatorDetails,
)
async def get_store_creator(
username: str,
) -> store_model.CreatorDetails:
"""
Get details of a specific store creator by username.
Args:
username: Creator's username
Returns:
CreatorDetails: Detailed information about the creator
"""
username = urllib.parse.unquote(username).lower()
creator = await store_cache._get_cached_creator_details(username=username)
return creator

View File

@@ -1,152 +0,0 @@
"""External API routes for chat tools - stateless HTTP endpoints.
Note: These endpoints use ephemeral sessions that are not persisted to Redis.
As a result, session-based rate limiting (max_agent_runs, max_agent_schedules)
is not enforced for external API calls. Each request creates a fresh session
with zeroed counters. Rate limiting for external API consumers should be
handled separately (e.g., via API key quotas).
"""
import logging
from typing import Any
from fastapi import APIRouter, Security
from prisma.enums import APIKeyPermission
from pydantic import BaseModel, Field
from backend.api.external.middleware import require_permission
from backend.api.features.chat.model import ChatSession
from backend.api.features.chat.tools import find_agent_tool, run_agent_tool
from backend.api.features.chat.tools.models import ToolResponseBase
from backend.data.auth.base import APIAuthorizationInfo
logger = logging.getLogger(__name__)
tools_router = APIRouter(prefix="/tools", tags=["tools"])
# Note: We use Security() as a function parameter dependency (auth: APIAuthorizationInfo = Security(...))
# rather than in the decorator's dependencies= list. This avoids duplicate permission checks
# while still enforcing auth AND giving us access to auth for extracting user_id.
# Request models
class FindAgentRequest(BaseModel):
query: str = Field(..., description="Search query for finding agents")
class RunAgentRequest(BaseModel):
"""Request to run or schedule an agent.
The tool automatically handles the setup flow:
- First call returns available inputs so user can decide what values to use
- Returns missing credentials if user needs to configure them
- Executes when inputs are provided OR use_defaults=true
- Schedules execution if schedule_name and cron are provided
"""
username_agent_slug: str = Field(
...,
description="The marketplace agent slug (e.g., 'username/agent-name')",
)
inputs: dict[str, Any] = Field(
default_factory=dict,
description="Dictionary of input values for the agent",
)
use_defaults: bool = Field(
default=False,
description="Set to true to run with default values (user must confirm)",
)
schedule_name: str | None = Field(
None,
description="Name for scheduled execution (triggers scheduling mode)",
)
cron: str | None = Field(
None,
description="Cron expression (5 fields: minute hour day month weekday)",
)
timezone: str = Field(
default="UTC",
description="IANA timezone (e.g., 'America/New_York', 'UTC')",
)
def _create_ephemeral_session(user_id: str) -> ChatSession:
"""Create an ephemeral session for stateless API requests."""
return ChatSession.new(user_id)
@tools_router.post(
path="/find-agent",
)
async def find_agent(
request: FindAgentRequest,
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.USE_TOOLS)
),
) -> dict[str, Any]:
"""
Search for agents in the marketplace based on capabilities and user needs.
Args:
request: Search query for finding agents
Returns:
List of matching agents or no results response
"""
session = _create_ephemeral_session(auth.user_id)
result = await find_agent_tool._execute(
user_id=auth.user_id,
session=session,
query=request.query,
)
return _response_to_dict(result)
@tools_router.post(
path="/run-agent",
)
async def run_agent(
request: RunAgentRequest,
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.USE_TOOLS)
),
) -> dict[str, Any]:
"""
Run or schedule an agent from the marketplace.
The endpoint automatically handles the setup flow:
- Returns missing inputs if required fields are not provided
- Returns missing credentials if user needs to configure them
- Executes immediately if all requirements are met
- Schedules execution if schedule_name and cron are provided
For scheduled execution:
- Cron format: "minute hour day month weekday"
- Examples: "0 9 * * 1-5" (9am weekdays), "0 0 * * *" (daily at midnight)
- Timezone: Use IANA timezone names like "America/New_York"
Args:
request: Agent slug, inputs, and optional schedule config
Returns:
- setup_requirements: If inputs or credentials are missing
- execution_started: If agent was run or scheduled successfully
- error: If something went wrong
"""
session = _create_ephemeral_session(auth.user_id)
result = await run_agent_tool._execute(
user_id=auth.user_id,
session=session,
username_agent_slug=request.username_agent_slug,
inputs=request.inputs,
use_defaults=request.use_defaults,
schedule_name=request.schedule_name or "",
cron=request.cron or "",
timezone=request.timezone,
)
return _response_to_dict(result)
def _response_to_dict(result: ToolResponseBase) -> dict[str, Any]:
"""Convert a tool response to a dictionary for JSON serialization."""
return result.model_dump()

View File

@@ -1,340 +0,0 @@
"""Tests for analytics API endpoints."""
import json
from unittest.mock import AsyncMock, Mock
import fastapi
import fastapi.testclient
import pytest
import pytest_mock
from pytest_snapshot.plugin import Snapshot
from .analytics import router as analytics_router
app = fastapi.FastAPI()
app.include_router(analytics_router)
client = fastapi.testclient.TestClient(app)
@pytest.fixture(autouse=True)
def setup_app_auth(mock_jwt_user):
"""Setup auth overrides for all tests in this module."""
from autogpt_libs.auth.jwt_utils import get_jwt_payload
app.dependency_overrides[get_jwt_payload] = mock_jwt_user["get_jwt_payload"]
yield
app.dependency_overrides.clear()
# =============================================================================
# /log_raw_metric endpoint tests
# =============================================================================
def test_log_raw_metric_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
test_user_id: str,
) -> None:
"""Test successful raw metric logging."""
mock_result = Mock(id="metric-123-uuid")
mock_log_metric = mocker.patch(
"backend.data.analytics.log_raw_metric",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"metric_name": "page_load_time",
"metric_value": 2.5,
"data_string": "/dashboard",
}
response = client.post("/log_raw_metric", json=request_data)
assert response.status_code == 200, f"Unexpected response: {response.text}"
assert response.json() == "metric-123-uuid"
mock_log_metric.assert_called_once_with(
user_id=test_user_id,
metric_name="page_load_time",
metric_value=2.5,
data_string="/dashboard",
)
configured_snapshot.assert_match(
json.dumps({"metric_id": response.json()}, indent=2, sort_keys=True),
"analytics_log_metric_success",
)
@pytest.mark.parametrize(
"metric_value,metric_name,data_string,test_id",
[
(100, "api_calls_count", "external_api", "integer_value"),
(0, "error_count", "no_errors", "zero_value"),
(-5.2, "temperature_delta", "cooling", "negative_value"),
(1.23456789, "precision_test", "float_precision", "float_precision"),
(999999999, "large_number", "max_value", "large_number"),
(0.0000001, "tiny_number", "min_value", "tiny_number"),
],
)
def test_log_raw_metric_various_values(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
metric_value: float,
metric_name: str,
data_string: str,
test_id: str,
) -> None:
"""Test raw metric logging with various metric values."""
mock_result = Mock(id=f"metric-{test_id}-uuid")
mocker.patch(
"backend.data.analytics.log_raw_metric",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"metric_name": metric_name,
"metric_value": metric_value,
"data_string": data_string,
}
response = client.post("/log_raw_metric", json=request_data)
assert response.status_code == 200, f"Failed for {test_id}: {response.text}"
configured_snapshot.assert_match(
json.dumps(
{"metric_id": response.json(), "test_case": test_id},
indent=2,
sort_keys=True,
),
f"analytics_metric_{test_id}",
)
@pytest.mark.parametrize(
"invalid_data,expected_error",
[
({}, "Field required"),
({"metric_name": "test"}, "Field required"),
(
{"metric_name": "test", "metric_value": "not_a_number", "data_string": "x"},
"Input should be a valid number",
),
(
{"metric_name": "", "metric_value": 1.0, "data_string": "test"},
"String should have at least 1 character",
),
(
{"metric_name": "test", "metric_value": 1.0, "data_string": ""},
"String should have at least 1 character",
),
],
ids=[
"empty_request",
"missing_metric_value_and_data_string",
"invalid_metric_value_type",
"empty_metric_name",
"empty_data_string",
],
)
def test_log_raw_metric_validation_errors(
invalid_data: dict,
expected_error: str,
) -> None:
"""Test validation errors for invalid metric requests."""
response = client.post("/log_raw_metric", json=invalid_data)
assert response.status_code == 422
error_detail = response.json()
assert "detail" in error_detail, f"Missing 'detail' in error: {error_detail}"
error_text = json.dumps(error_detail)
assert (
expected_error in error_text
), f"Expected '{expected_error}' in error response: {error_text}"
def test_log_raw_metric_service_error(
mocker: pytest_mock.MockFixture,
test_user_id: str,
) -> None:
"""Test error handling when analytics service fails."""
mocker.patch(
"backend.data.analytics.log_raw_metric",
new_callable=AsyncMock,
side_effect=Exception("Database connection failed"),
)
request_data = {
"metric_name": "test_metric",
"metric_value": 1.0,
"data_string": "test",
}
response = client.post("/log_raw_metric", json=request_data)
assert response.status_code == 500
error_detail = response.json()["detail"]
assert "Database connection failed" in error_detail["message"]
assert "hint" in error_detail
# =============================================================================
# /log_raw_analytics endpoint tests
# =============================================================================
def test_log_raw_analytics_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
test_user_id: str,
) -> None:
"""Test successful raw analytics logging."""
mock_result = Mock(id="analytics-789-uuid")
mock_log_analytics = mocker.patch(
"backend.data.analytics.log_raw_analytics",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"type": "user_action",
"data": {
"action": "button_click",
"button_id": "submit_form",
"timestamp": "2023-01-01T00:00:00Z",
"metadata": {"form_type": "registration", "fields_filled": 5},
},
"data_index": "button_click_submit_form",
}
response = client.post("/log_raw_analytics", json=request_data)
assert response.status_code == 200, f"Unexpected response: {response.text}"
assert response.json() == "analytics-789-uuid"
mock_log_analytics.assert_called_once_with(
test_user_id,
"user_action",
request_data["data"],
"button_click_submit_form",
)
configured_snapshot.assert_match(
json.dumps({"analytics_id": response.json()}, indent=2, sort_keys=True),
"analytics_log_analytics_success",
)
def test_log_raw_analytics_complex_data(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test raw analytics logging with complex nested data structures."""
mock_result = Mock(id="analytics-complex-uuid")
mocker.patch(
"backend.data.analytics.log_raw_analytics",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"type": "agent_execution",
"data": {
"agent_id": "agent_123",
"execution_id": "exec_456",
"status": "completed",
"duration_ms": 3500,
"nodes_executed": 15,
"blocks_used": [
{"block_id": "llm_block", "count": 3},
{"block_id": "http_block", "count": 5},
{"block_id": "code_block", "count": 2},
],
"errors": [],
"metadata": {
"trigger": "manual",
"user_tier": "premium",
"environment": "production",
},
},
"data_index": "agent_123_exec_456",
}
response = client.post("/log_raw_analytics", json=request_data)
assert response.status_code == 200
configured_snapshot.assert_match(
json.dumps(
{"analytics_id": response.json(), "logged_data": request_data["data"]},
indent=2,
sort_keys=True,
),
"analytics_log_analytics_complex_data",
)
@pytest.mark.parametrize(
"invalid_data,expected_error",
[
({}, "Field required"),
({"type": "test"}, "Field required"),
(
{"type": "test", "data": "not_a_dict", "data_index": "test"},
"Input should be a valid dictionary",
),
({"type": "test", "data": {"key": "value"}}, "Field required"),
],
ids=[
"empty_request",
"missing_data_and_data_index",
"invalid_data_type",
"missing_data_index",
],
)
def test_log_raw_analytics_validation_errors(
invalid_data: dict,
expected_error: str,
) -> None:
"""Test validation errors for invalid analytics requests."""
response = client.post("/log_raw_analytics", json=invalid_data)
assert response.status_code == 422
error_detail = response.json()
assert "detail" in error_detail, f"Missing 'detail' in error: {error_detail}"
error_text = json.dumps(error_detail)
assert (
expected_error in error_text
), f"Expected '{expected_error}' in error response: {error_text}"
def test_log_raw_analytics_service_error(
mocker: pytest_mock.MockFixture,
test_user_id: str,
) -> None:
"""Test error handling when analytics service fails."""
mocker.patch(
"backend.data.analytics.log_raw_analytics",
new_callable=AsyncMock,
side_effect=Exception("Analytics DB unreachable"),
)
request_data = {
"type": "test_event",
"data": {"key": "value"},
"data_index": "test_index",
}
response = client.post("/log_raw_analytics", json=request_data)
assert response.status_code == 500
error_detail = response.json()["detail"]
assert "Analytics DB unreachable" in error_detail["message"]
assert "hint" in error_detail

View File

@@ -1,689 +0,0 @@
import logging
from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from difflib import SequenceMatcher
from typing import Sequence
import prisma
import backend.api.features.library.db as library_db
import backend.api.features.library.model as library_model
import backend.api.features.store.db as store_db
import backend.api.features.store.model as store_model
import backend.data.block
from backend.blocks import load_all_blocks
from backend.blocks.llm import LlmModel
from backend.data.block import AnyBlockSchema, BlockCategory, BlockInfo, BlockSchema
from backend.data.db import query_raw_with_schema
from backend.integrations.providers import ProviderName
from backend.util.cache import cached
from backend.util.models import Pagination
from .model import (
BlockCategoryResponse,
BlockResponse,
BlockType,
CountResponse,
FilterType,
Provider,
ProviderResponse,
SearchEntry,
)
logger = logging.getLogger(__name__)
llm_models = [name.name.lower().replace("_", " ") for name in LlmModel]
MAX_LIBRARY_AGENT_RESULTS = 100
MAX_MARKETPLACE_AGENT_RESULTS = 100
MIN_SCORE_FOR_FILTERED_RESULTS = 10.0
SearchResultItem = BlockInfo | library_model.LibraryAgent | store_model.StoreAgent
@dataclass
class _ScoredItem:
item: SearchResultItem
filter_type: FilterType
score: float
sort_key: str
@dataclass
class _SearchCacheEntry:
items: list[SearchResultItem]
total_items: dict[FilterType, int]
def get_block_categories(category_blocks: int = 3) -> list[BlockCategoryResponse]:
categories: dict[BlockCategory, BlockCategoryResponse] = {}
for block_type in load_all_blocks().values():
block: AnyBlockSchema = block_type()
# Skip disabled blocks
if block.disabled:
continue
# Skip blocks that don't have categories (all should have at least one)
if not block.categories:
continue
# Add block to the categories
for category in block.categories:
if category not in categories:
categories[category] = BlockCategoryResponse(
name=category.name.lower(),
total_blocks=0,
blocks=[],
)
categories[category].total_blocks += 1
# Append if the category has less than the specified number of blocks
if len(categories[category].blocks) < category_blocks:
categories[category].blocks.append(block.get_info())
# Sort categories by name
return sorted(categories.values(), key=lambda x: x.name)
def get_blocks(
*,
category: str | None = None,
type: BlockType | None = None,
provider: ProviderName | None = None,
page: int = 1,
page_size: int = 50,
) -> BlockResponse:
"""
Get blocks based on either category, type or provider.
Providing nothing fetches all block types.
"""
# Only one of category, type, or provider can be specified
if (category and type) or (category and provider) or (type and provider):
raise ValueError("Only one of category, type, or provider can be specified")
blocks: list[AnyBlockSchema] = []
skip = (page - 1) * page_size
take = page_size
total = 0
for block_type in load_all_blocks().values():
block: AnyBlockSchema = block_type()
# Skip disabled blocks
if block.disabled:
continue
# Skip blocks that don't match the category
if category and category not in {c.name.lower() for c in block.categories}:
continue
# Skip blocks that don't match the type
if (
(type == "input" and block.block_type.value != "Input")
or (type == "output" and block.block_type.value != "Output")
or (type == "action" and block.block_type.value in ("Input", "Output"))
):
continue
# Skip blocks that don't match the provider
if provider:
credentials_info = block.input_schema.get_credentials_fields_info().values()
if not any(provider in info.provider for info in credentials_info):
continue
total += 1
if skip > 0:
skip -= 1
continue
if take > 0:
take -= 1
blocks.append(block)
return BlockResponse(
blocks=[b.get_info() for b in blocks],
pagination=Pagination(
total_items=total,
total_pages=(total + page_size - 1) // page_size,
current_page=page,
page_size=page_size,
),
)
def get_block_by_id(block_id: str) -> BlockInfo | None:
"""
Get a specific block by its ID.
"""
for block_type in load_all_blocks().values():
block: AnyBlockSchema = block_type()
if block.id == block_id:
return block.get_info()
return None
async def update_search(user_id: str, search: SearchEntry) -> str:
"""
Upsert a search request for the user and return the search ID.
"""
if search.search_id:
# Update existing search
await prisma.models.BuilderSearchHistory.prisma().update(
where={
"id": search.search_id,
},
data={
"searchQuery": search.search_query or "",
"filter": search.filter or [], # type: ignore
"byCreator": search.by_creator or [],
},
)
return search.search_id
else:
# Create new search
new_search = await prisma.models.BuilderSearchHistory.prisma().create(
data={
"userId": user_id,
"searchQuery": search.search_query or "",
"filter": search.filter or [], # type: ignore
"byCreator": search.by_creator or [],
}
)
return new_search.id
async def get_recent_searches(user_id: str, limit: int = 5) -> list[SearchEntry]:
"""
Get the user's most recent search requests.
"""
searches = await prisma.models.BuilderSearchHistory.prisma().find_many(
where={
"userId": user_id,
},
order={
"updatedAt": "desc",
},
take=limit,
)
return [
SearchEntry(
search_query=s.searchQuery,
filter=s.filter, # type: ignore
by_creator=s.byCreator,
search_id=s.id,
)
for s in searches
]
async def get_sorted_search_results(
*,
user_id: str,
search_query: str | None,
filters: Sequence[FilterType],
by_creator: Sequence[str] | None = None,
) -> _SearchCacheEntry:
normalized_filters: tuple[FilterType, ...] = tuple(sorted(set(filters or [])))
normalized_creators: tuple[str, ...] = tuple(sorted(set(by_creator or [])))
return await _build_cached_search_results(
user_id=user_id,
search_query=search_query or "",
filters=normalized_filters,
by_creator=normalized_creators,
)
@cached(ttl_seconds=300, shared_cache=True)
async def _build_cached_search_results(
user_id: str,
search_query: str,
filters: tuple[FilterType, ...],
by_creator: tuple[str, ...],
) -> _SearchCacheEntry:
normalized_query = (search_query or "").strip().lower()
include_blocks = "blocks" in filters
include_integrations = "integrations" in filters
include_library_agents = "my_agents" in filters
include_marketplace_agents = "marketplace_agents" in filters
scored_items: list[_ScoredItem] = []
total_items: dict[FilterType, int] = {
"blocks": 0,
"integrations": 0,
"marketplace_agents": 0,
"my_agents": 0,
}
block_results, block_total, integration_total = _collect_block_results(
normalized_query=normalized_query,
include_blocks=include_blocks,
include_integrations=include_integrations,
)
scored_items.extend(block_results)
total_items["blocks"] = block_total
total_items["integrations"] = integration_total
if include_library_agents:
library_response = await library_db.list_library_agents(
user_id=user_id,
search_term=search_query or None,
page=1,
page_size=MAX_LIBRARY_AGENT_RESULTS,
)
total_items["my_agents"] = library_response.pagination.total_items
scored_items.extend(
_build_library_items(
agents=library_response.agents,
normalized_query=normalized_query,
)
)
if include_marketplace_agents:
marketplace_response = await store_db.get_store_agents(
creators=list(by_creator) or None,
search_query=search_query or None,
page=1,
page_size=MAX_MARKETPLACE_AGENT_RESULTS,
)
total_items["marketplace_agents"] = marketplace_response.pagination.total_items
scored_items.extend(
_build_marketplace_items(
agents=marketplace_response.agents,
normalized_query=normalized_query,
)
)
sorted_items = sorted(
scored_items,
key=lambda entry: (-entry.score, entry.sort_key, entry.filter_type),
)
return _SearchCacheEntry(
items=[entry.item for entry in sorted_items],
total_items=total_items,
)
def _collect_block_results(
*,
normalized_query: str,
include_blocks: bool,
include_integrations: bool,
) -> tuple[list[_ScoredItem], int, int]:
results: list[_ScoredItem] = []
block_count = 0
integration_count = 0
if not include_blocks and not include_integrations:
return results, block_count, integration_count
for block_type in load_all_blocks().values():
block: AnyBlockSchema = block_type()
if block.disabled:
continue
block_info = block.get_info()
credentials = list(block.input_schema.get_credentials_fields().values())
is_integration = len(credentials) > 0
if is_integration and not include_integrations:
continue
if not is_integration and not include_blocks:
continue
score = _score_block(block, block_info, normalized_query)
if not _should_include_item(score, normalized_query):
continue
filter_type: FilterType = "integrations" if is_integration else "blocks"
if is_integration:
integration_count += 1
else:
block_count += 1
results.append(
_ScoredItem(
item=block_info,
filter_type=filter_type,
score=score,
sort_key=_get_item_name(block_info),
)
)
return results, block_count, integration_count
def _build_library_items(
*,
agents: list[library_model.LibraryAgent],
normalized_query: str,
) -> list[_ScoredItem]:
results: list[_ScoredItem] = []
for agent in agents:
score = _score_library_agent(agent, normalized_query)
if not _should_include_item(score, normalized_query):
continue
results.append(
_ScoredItem(
item=agent,
filter_type="my_agents",
score=score,
sort_key=_get_item_name(agent),
)
)
return results
def _build_marketplace_items(
*,
agents: list[store_model.StoreAgent],
normalized_query: str,
) -> list[_ScoredItem]:
results: list[_ScoredItem] = []
for agent in agents:
score = _score_store_agent(agent, normalized_query)
if not _should_include_item(score, normalized_query):
continue
results.append(
_ScoredItem(
item=agent,
filter_type="marketplace_agents",
score=score,
sort_key=_get_item_name(agent),
)
)
return results
def get_providers(
query: str = "",
page: int = 1,
page_size: int = 50,
) -> ProviderResponse:
providers = []
query = query.lower()
skip = (page - 1) * page_size
take = page_size
all_providers = _get_all_providers()
for provider in all_providers.values():
if (
query not in provider.name.value.lower()
and query not in provider.description.lower()
):
continue
if skip > 0:
skip -= 1
continue
if take > 0:
take -= 1
providers.append(provider)
total = len(all_providers)
return ProviderResponse(
providers=providers,
pagination=Pagination(
total_items=total,
total_pages=(total + page_size - 1) // page_size,
current_page=page,
page_size=page_size,
),
)
async def get_counts(user_id: str) -> CountResponse:
my_agents = await prisma.models.LibraryAgent.prisma().count(
where={
"userId": user_id,
"isDeleted": False,
"isArchived": False,
}
)
counts = await _get_static_counts()
return CountResponse(
my_agents=my_agents,
**counts,
)
@cached(ttl_seconds=3600)
async def _get_static_counts():
"""
Get counts of blocks, integrations, and marketplace agents.
This is cached to avoid unnecessary database queries and calculations.
"""
all_blocks = 0
input_blocks = 0
action_blocks = 0
output_blocks = 0
integrations = 0
for block_type in load_all_blocks().values():
block: AnyBlockSchema = block_type()
if block.disabled:
continue
all_blocks += 1
if block.block_type.value == "Input":
input_blocks += 1
elif block.block_type.value == "Output":
output_blocks += 1
else:
action_blocks += 1
credentials = list(block.input_schema.get_credentials_fields().values())
if len(credentials) > 0:
integrations += 1
marketplace_agents = await prisma.models.StoreAgent.prisma().count()
return {
"all_blocks": all_blocks,
"input_blocks": input_blocks,
"action_blocks": action_blocks,
"output_blocks": output_blocks,
"integrations": integrations,
"marketplace_agents": marketplace_agents,
}
def _matches_llm_model(schema_cls: type[BlockSchema], query: str) -> bool:
for field in schema_cls.model_fields.values():
if field.annotation == LlmModel:
# Check if query matches any value in llm_models
if any(query in name for name in llm_models):
return True
return False
def _score_block(
block: AnyBlockSchema,
block_info: BlockInfo,
normalized_query: str,
) -> float:
if not normalized_query:
return 0.0
name = block_info.name.lower()
description = block_info.description.lower()
score = _score_primary_fields(name, description, normalized_query)
category_text = " ".join(
category.get("category", "").lower() for category in block_info.categories
)
score += _score_additional_field(category_text, normalized_query, 12, 6)
credentials_info = block.input_schema.get_credentials_fields_info().values()
provider_names = [
provider.value.lower()
for info in credentials_info
for provider in info.provider
]
provider_text = " ".join(provider_names)
score += _score_additional_field(provider_text, normalized_query, 15, 6)
if _matches_llm_model(block.input_schema, normalized_query):
score += 20
return score
def _score_library_agent(
agent: library_model.LibraryAgent,
normalized_query: str,
) -> float:
if not normalized_query:
return 0.0
name = agent.name.lower()
description = (agent.description or "").lower()
instructions = (agent.instructions or "").lower()
score = _score_primary_fields(name, description, normalized_query)
score += _score_additional_field(instructions, normalized_query, 15, 6)
score += _score_additional_field(
agent.creator_name.lower(), normalized_query, 10, 5
)
return score
def _score_store_agent(
agent: store_model.StoreAgent,
normalized_query: str,
) -> float:
if not normalized_query:
return 0.0
name = agent.agent_name.lower()
description = agent.description.lower()
sub_heading = agent.sub_heading.lower()
score = _score_primary_fields(name, description, normalized_query)
score += _score_additional_field(sub_heading, normalized_query, 12, 6)
score += _score_additional_field(agent.creator.lower(), normalized_query, 10, 5)
return score
def _score_primary_fields(name: str, description: str, query: str) -> float:
score = 0.0
if name == query:
score += 120
elif name.startswith(query):
score += 90
elif query in name:
score += 60
score += SequenceMatcher(None, name, query).ratio() * 50
if description:
if query in description:
score += 30
score += SequenceMatcher(None, description, query).ratio() * 25
return score
def _score_additional_field(
value: str,
query: str,
contains_weight: float,
similarity_weight: float,
) -> float:
if not value or not query:
return 0.0
score = 0.0
if query in value:
score += contains_weight
score += SequenceMatcher(None, value, query).ratio() * similarity_weight
return score
def _should_include_item(score: float, normalized_query: str) -> bool:
if not normalized_query:
return True
return score >= MIN_SCORE_FOR_FILTERED_RESULTS
def _get_item_name(item: SearchResultItem) -> str:
if isinstance(item, BlockInfo):
return item.name.lower()
if isinstance(item, library_model.LibraryAgent):
return item.name.lower()
return item.agent_name.lower()
@cached(ttl_seconds=3600)
def _get_all_providers() -> dict[ProviderName, Provider]:
providers: dict[ProviderName, Provider] = {}
for block_type in load_all_blocks().values():
block: AnyBlockSchema = block_type()
if block.disabled:
continue
credentials_info = block.input_schema.get_credentials_fields_info().values()
for info in credentials_info:
for provider in info.provider: # provider is a ProviderName enum member
if provider in providers:
providers[provider].integration_count += 1
else:
providers[provider] = Provider(
name=provider, description="", integration_count=1
)
return providers
@cached(ttl_seconds=3600)
async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]:
suggested_blocks = []
# Sum the number of executions for each block type
# Prisma cannot group by nested relations, so we do a raw query
# Calculate the cutoff timestamp
timestamp_threshold = datetime.now(timezone.utc) - timedelta(days=30)
results = await query_raw_with_schema(
"""
SELECT
agent_node."agentBlockId" AS block_id,
COUNT(execution.id) AS execution_count
FROM {schema_prefix}"AgentNodeExecution" execution
JOIN {schema_prefix}"AgentNode" agent_node ON execution."agentNodeId" = agent_node.id
WHERE execution."endedTime" >= $1::timestamp
GROUP BY agent_node."agentBlockId"
ORDER BY execution_count DESC;
""",
timestamp_threshold,
)
# Get the top blocks based on execution count
# But ignore Input and Output blocks
blocks: list[tuple[BlockInfo, int]] = []
for block_type in load_all_blocks().values():
block: AnyBlockSchema = block_type()
if block.disabled or block.block_type in (
backend.data.block.BlockType.INPUT,
backend.data.block.BlockType.OUTPUT,
backend.data.block.BlockType.AGENT,
):
continue
# Find the execution count for this block
execution_count = next(
(row["execution_count"] for row in results if row["block_id"] == block.id),
0,
)
blocks.append((block.get_info(), execution_count))
# Sort blocks by execution count
blocks.sort(key=lambda x: x[1], reverse=True)
suggested_blocks = [block[0] for block in blocks]
# Return the top blocks
return suggested_blocks[:count]

View File

@@ -1,249 +0,0 @@
"""Database operations for chat sessions."""
import asyncio
import logging
from datetime import UTC, datetime
from typing import Any, cast
from prisma.models import ChatMessage as PrismaChatMessage
from prisma.models import ChatSession as PrismaChatSession
from prisma.types import (
ChatMessageCreateInput,
ChatSessionCreateInput,
ChatSessionUpdateInput,
ChatSessionWhereInput,
)
from backend.data.db import transaction
from backend.util.json import SafeJson
logger = logging.getLogger(__name__)
async def get_chat_session(session_id: str) -> PrismaChatSession | None:
"""Get a chat session by ID from the database."""
session = await PrismaChatSession.prisma().find_unique(
where={"id": session_id},
include={"Messages": True},
)
if session and session.Messages:
# Sort messages by sequence in Python - Prisma Python client doesn't support
# order_by in include clauses (unlike Prisma JS), so we sort after fetching
session.Messages.sort(key=lambda m: m.sequence)
return session
async def create_chat_session(
session_id: str,
user_id: str,
) -> PrismaChatSession:
"""Create a new chat session in the database."""
data = ChatSessionCreateInput(
id=session_id,
userId=user_id,
credentials=SafeJson({}),
successfulAgentRuns=SafeJson({}),
successfulAgentSchedules=SafeJson({}),
)
return await PrismaChatSession.prisma().create(
data=data,
include={"Messages": True},
)
async def update_chat_session(
session_id: str,
credentials: dict[str, Any] | None = None,
successful_agent_runs: dict[str, Any] | None = None,
successful_agent_schedules: dict[str, Any] | None = None,
total_prompt_tokens: int | None = None,
total_completion_tokens: int | None = None,
title: str | None = None,
) -> PrismaChatSession | None:
"""Update a chat session's metadata."""
data: ChatSessionUpdateInput = {"updatedAt": datetime.now(UTC)}
if credentials is not None:
data["credentials"] = SafeJson(credentials)
if successful_agent_runs is not None:
data["successfulAgentRuns"] = SafeJson(successful_agent_runs)
if successful_agent_schedules is not None:
data["successfulAgentSchedules"] = SafeJson(successful_agent_schedules)
if total_prompt_tokens is not None:
data["totalPromptTokens"] = total_prompt_tokens
if total_completion_tokens is not None:
data["totalCompletionTokens"] = total_completion_tokens
if title is not None:
data["title"] = title
session = await PrismaChatSession.prisma().update(
where={"id": session_id},
data=data,
include={"Messages": True},
)
if session and session.Messages:
# Sort in Python - Prisma Python doesn't support order_by in include clauses
session.Messages.sort(key=lambda m: m.sequence)
return session
async def add_chat_message(
session_id: str,
role: str,
sequence: int,
content: str | None = None,
name: str | None = None,
tool_call_id: str | None = None,
refusal: str | None = None,
tool_calls: list[dict[str, Any]] | None = None,
function_call: dict[str, Any] | None = None,
) -> PrismaChatMessage:
"""Add a message to a chat session."""
# Build input dict dynamically rather than using ChatMessageCreateInput directly
# because Prisma's TypedDict validation rejects optional fields set to None.
# We only include fields that have values, then cast at the end.
data: dict[str, Any] = {
"Session": {"connect": {"id": session_id}},
"role": role,
"sequence": sequence,
}
# Add optional string fields
if content is not None:
data["content"] = content
if name is not None:
data["name"] = name
if tool_call_id is not None:
data["toolCallId"] = tool_call_id
if refusal is not None:
data["refusal"] = refusal
# Add optional JSON fields only when they have values
if tool_calls is not None:
data["toolCalls"] = SafeJson(tool_calls)
if function_call is not None:
data["functionCall"] = SafeJson(function_call)
# Run message create and session timestamp update in parallel for lower latency
_, message = await asyncio.gather(
PrismaChatSession.prisma().update(
where={"id": session_id},
data={"updatedAt": datetime.now(UTC)},
),
PrismaChatMessage.prisma().create(data=cast(ChatMessageCreateInput, data)),
)
return message
async def add_chat_messages_batch(
session_id: str,
messages: list[dict[str, Any]],
start_sequence: int,
) -> list[PrismaChatMessage]:
"""Add multiple messages to a chat session in a batch.
Uses a transaction for atomicity - if any message creation fails,
the entire batch is rolled back.
"""
if not messages:
return []
created_messages = []
async with transaction() as tx:
for i, msg in enumerate(messages):
# Build input dict dynamically rather than using ChatMessageCreateInput
# directly because Prisma's TypedDict validation rejects optional fields
# set to None. We only include fields that have values, then cast.
data: dict[str, Any] = {
"Session": {"connect": {"id": session_id}},
"role": msg["role"],
"sequence": start_sequence + i,
}
# Add optional string fields
if msg.get("content") is not None:
data["content"] = msg["content"]
if msg.get("name") is not None:
data["name"] = msg["name"]
if msg.get("tool_call_id") is not None:
data["toolCallId"] = msg["tool_call_id"]
if msg.get("refusal") is not None:
data["refusal"] = msg["refusal"]
# Add optional JSON fields only when they have values
if msg.get("tool_calls") is not None:
data["toolCalls"] = SafeJson(msg["tool_calls"])
if msg.get("function_call") is not None:
data["functionCall"] = SafeJson(msg["function_call"])
created = await PrismaChatMessage.prisma(tx).create(
data=cast(ChatMessageCreateInput, data)
)
created_messages.append(created)
# Update session's updatedAt timestamp within the same transaction.
# Note: Token usage (total_prompt_tokens, total_completion_tokens) is updated
# separately via update_chat_session() after streaming completes.
await PrismaChatSession.prisma(tx).update(
where={"id": session_id},
data={"updatedAt": datetime.now(UTC)},
)
return created_messages
async def get_user_chat_sessions(
user_id: str,
limit: int = 50,
offset: int = 0,
) -> list[PrismaChatSession]:
"""Get chat sessions for a user, ordered by most recent."""
return await PrismaChatSession.prisma().find_many(
where={"userId": user_id},
order={"updatedAt": "desc"},
take=limit,
skip=offset,
)
async def get_user_session_count(user_id: str) -> int:
"""Get the total number of chat sessions for a user."""
return await PrismaChatSession.prisma().count(where={"userId": user_id})
async def delete_chat_session(session_id: str, user_id: str | None = None) -> bool:
"""Delete a chat session and all its messages.
Args:
session_id: The session ID to delete.
user_id: If provided, validates that the session belongs to this user
before deletion. This prevents unauthorized deletion of other
users' sessions.
Returns:
True if deleted successfully, False otherwise.
"""
try:
# Build typed where clause with optional user_id validation
where_clause: ChatSessionWhereInput = {"id": session_id}
if user_id is not None:
where_clause["userId"] = user_id
result = await PrismaChatSession.prisma().delete_many(where=where_clause)
if result == 0:
logger.warning(
f"No session deleted for {session_id} "
f"(user_id validation: {user_id is not None})"
)
return False
return True
except Exception as e:
logger.error(f"Failed to delete chat session {session_id}: {e}")
return False
async def get_chat_session_message_count(session_id: str) -> int:
"""Get the number of messages in a chat session."""
count = await PrismaChatMessage.prisma().count(where={"sessionId": session_id})
return count

View File

@@ -1,597 +0,0 @@
import asyncio
import logging
import uuid
from datetime import UTC, datetime
from typing import Any
from weakref import WeakValueDictionary
from openai.types.chat import (
ChatCompletionAssistantMessageParam,
ChatCompletionDeveloperMessageParam,
ChatCompletionFunctionMessageParam,
ChatCompletionMessageParam,
ChatCompletionSystemMessageParam,
ChatCompletionToolMessageParam,
ChatCompletionUserMessageParam,
)
from openai.types.chat.chat_completion_assistant_message_param import FunctionCall
from openai.types.chat.chat_completion_message_tool_call_param import (
ChatCompletionMessageToolCallParam,
Function,
)
from prisma.models import ChatMessage as PrismaChatMessage
from prisma.models import ChatSession as PrismaChatSession
from pydantic import BaseModel
from backend.data.redis_client import get_redis_async
from backend.util import json
from backend.util.exceptions import DatabaseError, RedisError
from . import db as chat_db
from .config import ChatConfig
logger = logging.getLogger(__name__)
config = ChatConfig()
def _parse_json_field(value: str | dict | list | None, default: Any = None) -> Any:
"""Parse a JSON field that may be stored as string or already parsed."""
if value is None:
return default
if isinstance(value, str):
return json.loads(value)
return value
# Redis cache key prefix for chat sessions
CHAT_SESSION_CACHE_PREFIX = "chat:session:"
def _get_session_cache_key(session_id: str) -> str:
"""Get the Redis cache key for a chat session."""
return f"{CHAT_SESSION_CACHE_PREFIX}{session_id}"
# Session-level locks to prevent race conditions during concurrent upserts.
# Uses WeakValueDictionary to automatically garbage collect locks when no longer referenced,
# preventing unbounded memory growth while maintaining lock semantics for active sessions.
# Invalidation: Locks are auto-removed by GC when no coroutine holds a reference (after
# async with lock: completes). Explicit cleanup also occurs in delete_chat_session().
_session_locks: WeakValueDictionary[str, asyncio.Lock] = WeakValueDictionary()
_session_locks_mutex = asyncio.Lock()
async def _get_session_lock(session_id: str) -> asyncio.Lock:
"""Get or create a lock for a specific session to prevent concurrent upserts.
Uses WeakValueDictionary for automatic cleanup: locks are garbage collected
when no coroutine holds a reference to them, preventing memory leaks from
unbounded growth of session locks.
"""
async with _session_locks_mutex:
lock = _session_locks.get(session_id)
if lock is None:
lock = asyncio.Lock()
_session_locks[session_id] = lock
return lock
class ChatMessage(BaseModel):
role: str
content: str | None = None
name: str | None = None
tool_call_id: str | None = None
refusal: str | None = None
tool_calls: list[dict] | None = None
function_call: dict | None = None
class Usage(BaseModel):
prompt_tokens: int
completion_tokens: int
total_tokens: int
class ChatSession(BaseModel):
session_id: str
user_id: str
title: str | None = None
messages: list[ChatMessage]
usage: list[Usage]
credentials: dict[str, dict] = {} # Map of provider -> credential metadata
started_at: datetime
updated_at: datetime
successful_agent_runs: dict[str, int] = {}
successful_agent_schedules: dict[str, int] = {}
@staticmethod
def new(user_id: str) -> "ChatSession":
return ChatSession(
session_id=str(uuid.uuid4()),
user_id=user_id,
title=None,
messages=[],
usage=[],
credentials={},
started_at=datetime.now(UTC),
updated_at=datetime.now(UTC),
)
@staticmethod
def from_db(
prisma_session: PrismaChatSession,
prisma_messages: list[PrismaChatMessage] | None = None,
) -> "ChatSession":
"""Convert Prisma models to Pydantic ChatSession."""
messages = []
if prisma_messages:
for msg in prisma_messages:
messages.append(
ChatMessage(
role=msg.role,
content=msg.content,
name=msg.name,
tool_call_id=msg.toolCallId,
refusal=msg.refusal,
tool_calls=_parse_json_field(msg.toolCalls),
function_call=_parse_json_field(msg.functionCall),
)
)
# Parse JSON fields from Prisma
credentials = _parse_json_field(prisma_session.credentials, default={})
successful_agent_runs = _parse_json_field(
prisma_session.successfulAgentRuns, default={}
)
successful_agent_schedules = _parse_json_field(
prisma_session.successfulAgentSchedules, default={}
)
# Calculate usage from token counts
usage = []
if prisma_session.totalPromptTokens or prisma_session.totalCompletionTokens:
usage.append(
Usage(
prompt_tokens=prisma_session.totalPromptTokens or 0,
completion_tokens=prisma_session.totalCompletionTokens or 0,
total_tokens=(prisma_session.totalPromptTokens or 0)
+ (prisma_session.totalCompletionTokens or 0),
)
)
return ChatSession(
session_id=prisma_session.id,
user_id=prisma_session.userId,
title=prisma_session.title,
messages=messages,
usage=usage,
credentials=credentials,
started_at=prisma_session.createdAt,
updated_at=prisma_session.updatedAt,
successful_agent_runs=successful_agent_runs,
successful_agent_schedules=successful_agent_schedules,
)
def to_openai_messages(self) -> list[ChatCompletionMessageParam]:
messages = []
for message in self.messages:
if message.role == "developer":
m = ChatCompletionDeveloperMessageParam(
role="developer",
content=message.content or "",
)
if message.name:
m["name"] = message.name
messages.append(m)
elif message.role == "system":
m = ChatCompletionSystemMessageParam(
role="system",
content=message.content or "",
)
if message.name:
m["name"] = message.name
messages.append(m)
elif message.role == "user":
m = ChatCompletionUserMessageParam(
role="user",
content=message.content or "",
)
if message.name:
m["name"] = message.name
messages.append(m)
elif message.role == "assistant":
m = ChatCompletionAssistantMessageParam(
role="assistant",
content=message.content or "",
)
if message.function_call:
m["function_call"] = FunctionCall(
arguments=message.function_call["arguments"],
name=message.function_call["name"],
)
if message.refusal:
m["refusal"] = message.refusal
if message.tool_calls:
t: list[ChatCompletionMessageToolCallParam] = []
for tool_call in message.tool_calls:
# Tool calls are stored with nested structure: {id, type, function: {name, arguments}}
function_data = tool_call.get("function", {})
# Skip tool calls that are missing required fields
if "id" not in tool_call or "name" not in function_data:
logger.warning(
f"Skipping invalid tool call: missing required fields. "
f"Got: {tool_call.keys()}, function keys: {function_data.keys()}"
)
continue
# Arguments are stored as a JSON string
arguments_str = function_data.get("arguments", "{}")
t.append(
ChatCompletionMessageToolCallParam(
id=tool_call["id"],
type="function",
function=Function(
arguments=arguments_str,
name=function_data["name"],
),
)
)
m["tool_calls"] = t
if message.name:
m["name"] = message.name
messages.append(m)
elif message.role == "tool":
messages.append(
ChatCompletionToolMessageParam(
role="tool",
content=message.content or "",
tool_call_id=message.tool_call_id or "",
)
)
elif message.role == "function":
messages.append(
ChatCompletionFunctionMessageParam(
role="function",
content=message.content,
name=message.name or "",
)
)
return messages
async def _get_session_from_cache(session_id: str) -> ChatSession | None:
"""Get a chat session from Redis cache."""
redis_key = _get_session_cache_key(session_id)
async_redis = await get_redis_async()
raw_session: bytes | None = await async_redis.get(redis_key)
if raw_session is None:
return None
try:
session = ChatSession.model_validate_json(raw_session)
logger.info(
f"Loading session {session_id} from cache: "
f"message_count={len(session.messages)}, "
f"roles={[m.role for m in session.messages]}"
)
return session
except Exception as e:
logger.error(f"Failed to deserialize session {session_id}: {e}", exc_info=True)
raise RedisError(f"Corrupted session data for {session_id}") from e
async def _cache_session(session: ChatSession) -> None:
"""Cache a chat session in Redis."""
redis_key = _get_session_cache_key(session.session_id)
async_redis = await get_redis_async()
await async_redis.setex(redis_key, config.session_ttl, session.model_dump_json())
async def _get_session_from_db(session_id: str) -> ChatSession | None:
"""Get a chat session from the database."""
prisma_session = await chat_db.get_chat_session(session_id)
if not prisma_session:
return None
messages = prisma_session.Messages
logger.info(
f"Loading session {session_id} from DB: "
f"has_messages={messages is not None}, "
f"message_count={len(messages) if messages else 0}, "
f"roles={[m.role for m in messages] if messages else []}"
)
return ChatSession.from_db(prisma_session, messages)
async def _save_session_to_db(
session: ChatSession, existing_message_count: int
) -> None:
"""Save or update a chat session in the database."""
# Check if session exists in DB
existing = await chat_db.get_chat_session(session.session_id)
if not existing:
# Create new session
await chat_db.create_chat_session(
session_id=session.session_id,
user_id=session.user_id,
)
existing_message_count = 0
# Calculate total tokens from usage
total_prompt = sum(u.prompt_tokens for u in session.usage)
total_completion = sum(u.completion_tokens for u in session.usage)
# Update session metadata
await chat_db.update_chat_session(
session_id=session.session_id,
credentials=session.credentials,
successful_agent_runs=session.successful_agent_runs,
successful_agent_schedules=session.successful_agent_schedules,
total_prompt_tokens=total_prompt,
total_completion_tokens=total_completion,
)
# Add new messages (only those after existing count)
new_messages = session.messages[existing_message_count:]
if new_messages:
messages_data = []
for msg in new_messages:
messages_data.append(
{
"role": msg.role,
"content": msg.content,
"name": msg.name,
"tool_call_id": msg.tool_call_id,
"refusal": msg.refusal,
"tool_calls": msg.tool_calls,
"function_call": msg.function_call,
}
)
logger.info(
f"Saving {len(new_messages)} new messages to DB for session {session.session_id}: "
f"roles={[m['role'] for m in messages_data]}, "
f"start_sequence={existing_message_count}"
)
await chat_db.add_chat_messages_batch(
session_id=session.session_id,
messages=messages_data,
start_sequence=existing_message_count,
)
async def get_chat_session(
session_id: str,
user_id: str | None = None,
) -> ChatSession | None:
"""Get a chat session by ID.
Checks Redis cache first, falls back to database if not found.
Caches database results back to Redis.
Args:
session_id: The session ID to fetch.
user_id: If provided, validates that the session belongs to this user.
If None, ownership is not validated (admin/system access).
"""
# Try cache first
try:
session = await _get_session_from_cache(session_id)
if session:
# Verify user ownership if user_id was provided for validation
if user_id is not None and session.user_id != user_id:
logger.warning(
f"Session {session_id} user id mismatch: {session.user_id} != {user_id}"
)
return None
return session
except RedisError:
logger.warning(f"Cache error for session {session_id}, trying database")
except Exception as e:
logger.warning(f"Unexpected cache error for session {session_id}: {e}")
# Fall back to database
logger.info(f"Session {session_id} not in cache, checking database")
session = await _get_session_from_db(session_id)
if session is None:
logger.warning(f"Session {session_id} not found in cache or database")
return None
# Verify user ownership if user_id was provided for validation
if user_id is not None and session.user_id != user_id:
logger.warning(
f"Session {session_id} user id mismatch: {session.user_id} != {user_id}"
)
return None
# Cache the session from DB
try:
await _cache_session(session)
logger.info(f"Cached session {session_id} from database")
except Exception as e:
logger.warning(f"Failed to cache session {session_id}: {e}")
return session
async def upsert_chat_session(
session: ChatSession,
) -> ChatSession:
"""Update a chat session in both cache and database.
Uses session-level locking to prevent race conditions when concurrent
operations (e.g., background title update and main stream handler)
attempt to upsert the same session simultaneously.
Raises:
DatabaseError: If the database write fails. The cache is still updated
as a best-effort optimization, but the error is propagated to ensure
callers are aware of the persistence failure.
RedisError: If the cache write fails (after successful DB write).
"""
# Acquire session-specific lock to prevent concurrent upserts
lock = await _get_session_lock(session.session_id)
async with lock:
# Get existing message count from DB for incremental saves
existing_message_count = await chat_db.get_chat_session_message_count(
session.session_id
)
db_error: Exception | None = None
# Save to database (primary storage)
try:
await _save_session_to_db(session, existing_message_count)
except Exception as e:
logger.error(
f"Failed to save session {session.session_id} to database: {e}"
)
db_error = e
# Save to cache (best-effort, even if DB failed)
try:
await _cache_session(session)
except Exception as e:
# If DB succeeded but cache failed, raise cache error
if db_error is None:
raise RedisError(
f"Failed to persist chat session {session.session_id} to Redis: {e}"
) from e
# If both failed, log cache error but raise DB error (more critical)
logger.warning(
f"Cache write also failed for session {session.session_id}: {e}"
)
# Propagate DB error after attempting cache (prevents data loss)
if db_error is not None:
raise DatabaseError(
f"Failed to persist chat session {session.session_id} to database"
) from db_error
return session
async def create_chat_session(user_id: str) -> ChatSession:
"""Create a new chat session and persist it.
Raises:
DatabaseError: If the database write fails. We fail fast to ensure
callers never receive a non-persisted session that only exists
in cache (which would be lost when the cache expires).
"""
session = ChatSession.new(user_id)
# Create in database first - fail fast if this fails
try:
await chat_db.create_chat_session(
session_id=session.session_id,
user_id=user_id,
)
except Exception as e:
logger.error(f"Failed to create session {session.session_id} in database: {e}")
raise DatabaseError(
f"Failed to create chat session {session.session_id} in database"
) from e
# Cache the session (best-effort optimization, DB is source of truth)
try:
await _cache_session(session)
except Exception as e:
logger.warning(f"Failed to cache new session {session.session_id}: {e}")
return session
async def get_user_sessions(
user_id: str,
limit: int = 50,
offset: int = 0,
) -> tuple[list[ChatSession], int]:
"""Get chat sessions for a user from the database with total count.
Returns:
A tuple of (sessions, total_count) where total_count is the overall
number of sessions for the user (not just the current page).
"""
prisma_sessions = await chat_db.get_user_chat_sessions(user_id, limit, offset)
total_count = await chat_db.get_user_session_count(user_id)
sessions = []
for prisma_session in prisma_sessions:
# Convert without messages for listing (lighter weight)
sessions.append(ChatSession.from_db(prisma_session, None))
return sessions, total_count
async def delete_chat_session(session_id: str, user_id: str | None = None) -> bool:
"""Delete a chat session from both cache and database.
Args:
session_id: The session ID to delete.
user_id: If provided, validates that the session belongs to this user
before deletion. This prevents unauthorized deletion.
Returns:
True if deleted successfully, False otherwise.
"""
# Delete from database first (with optional user_id validation)
# This confirms ownership before invalidating cache
deleted = await chat_db.delete_chat_session(session_id, user_id)
if not deleted:
return False
# Only invalidate cache and clean up lock after DB confirms deletion
try:
redis_key = _get_session_cache_key(session_id)
async_redis = await get_redis_async()
await async_redis.delete(redis_key)
except Exception as e:
logger.warning(f"Failed to delete session {session_id} from cache: {e}")
# Clean up session lock (belt-and-suspenders with WeakValueDictionary)
async with _session_locks_mutex:
_session_locks.pop(session_id, None)
return True
async def update_session_title(session_id: str, title: str) -> bool:
"""Update only the title of a chat session.
This is a lightweight operation that doesn't touch messages, avoiding
race conditions with concurrent message updates. Use this for background
title generation instead of upsert_chat_session.
Args:
session_id: The session ID to update.
title: The new title to set.
Returns:
True if updated successfully, False otherwise.
"""
try:
result = await chat_db.update_chat_session(session_id=session_id, title=title)
if result is None:
logger.warning(f"Session {session_id} not found for title update")
return False
# Invalidate cache so next fetch gets updated title
try:
redis_key = _get_session_cache_key(session_id)
async_redis = await get_redis_async()
await async_redis.delete(redis_key)
except Exception as e:
logger.warning(f"Failed to invalidate cache for session {session_id}: {e}")
return True
except Exception as e:
logger.error(f"Failed to update title for session {session_id}: {e}")
return False

View File

@@ -1,119 +0,0 @@
import pytest
from .model import (
ChatMessage,
ChatSession,
Usage,
get_chat_session,
upsert_chat_session,
)
messages = [
ChatMessage(content="Hello, how are you?", role="user"),
ChatMessage(
content="I'm fine, thank you!",
role="assistant",
tool_calls=[
{
"id": "t123",
"type": "function",
"function": {
"name": "get_weather",
"arguments": '{"city": "New York"}',
},
}
],
),
ChatMessage(
content="I'm using the tool to get the weather",
role="tool",
tool_call_id="t123",
),
]
@pytest.mark.asyncio(loop_scope="session")
async def test_chatsession_serialization_deserialization():
s = ChatSession.new(user_id="abc123")
s.messages = messages
s.usage = [Usage(prompt_tokens=100, completion_tokens=200, total_tokens=300)]
serialized = s.model_dump_json()
s2 = ChatSession.model_validate_json(serialized)
assert s2.model_dump() == s.model_dump()
@pytest.mark.asyncio(loop_scope="session")
async def test_chatsession_redis_storage(setup_test_user, test_user_id):
s = ChatSession.new(user_id=test_user_id)
s.messages = messages
s = await upsert_chat_session(s)
s2 = await get_chat_session(
session_id=s.session_id,
user_id=s.user_id,
)
assert s2 == s
@pytest.mark.asyncio(loop_scope="session")
async def test_chatsession_redis_storage_user_id_mismatch(
setup_test_user, test_user_id
):
s = ChatSession.new(user_id=test_user_id)
s.messages = messages
s = await upsert_chat_session(s)
s2 = await get_chat_session(s.session_id, "different_user_id")
assert s2 is None
@pytest.mark.asyncio(loop_scope="session")
async def test_chatsession_db_storage(setup_test_user, test_user_id):
"""Test that messages are correctly saved to and loaded from DB (not cache)."""
from backend.data.redis_client import get_redis_async
# Create session with messages including assistant message
s = ChatSession.new(user_id=test_user_id)
s.messages = messages # Contains user, assistant, and tool messages
assert s.session_id is not None, "Session id is not set"
# Upsert to save to both cache and DB
s = await upsert_chat_session(s)
# Clear the Redis cache to force DB load
redis_key = f"chat:session:{s.session_id}"
async_redis = await get_redis_async()
await async_redis.delete(redis_key)
# Load from DB (cache was cleared)
s2 = await get_chat_session(
session_id=s.session_id,
user_id=s.user_id,
)
assert s2 is not None, "Session not found after loading from DB"
assert len(s2.messages) == len(
s.messages
), f"Message count mismatch: expected {len(s.messages)}, got {len(s2.messages)}"
# Verify all roles are present
roles = [m.role for m in s2.messages]
assert "user" in roles, f"User message missing. Roles found: {roles}"
assert "assistant" in roles, f"Assistant message missing. Roles found: {roles}"
assert "tool" in roles, f"Tool message missing. Roles found: {roles}"
# Verify message content
for orig, loaded in zip(s.messages, s2.messages):
assert orig.role == loaded.role, f"Role mismatch: {orig.role} != {loaded.role}"
assert (
orig.content == loaded.content
), f"Content mismatch for {orig.role}: {orig.content} != {loaded.content}"
if orig.tool_calls:
assert (
loaded.tool_calls is not None
), f"Tool calls missing for {orig.role} message"
assert len(orig.tool_calls) == len(loaded.tool_calls)

View File

@@ -1,144 +0,0 @@
"""
Response models for Vercel AI SDK UI Stream Protocol.
This module implements the AI SDK UI Stream Protocol (v1) for streaming chat responses.
See: https://ai-sdk.dev/docs/ai-sdk-ui/stream-protocol
"""
from enum import Enum
from typing import Any
from pydantic import BaseModel, Field
class ResponseType(str, Enum):
"""Types of streaming responses following AI SDK protocol."""
# Message lifecycle
START = "start"
FINISH = "finish"
# Text streaming
TEXT_START = "text-start"
TEXT_DELTA = "text-delta"
TEXT_END = "text-end"
# Tool interaction
TOOL_INPUT_START = "tool-input-start"
TOOL_INPUT_AVAILABLE = "tool-input-available"
TOOL_OUTPUT_AVAILABLE = "tool-output-available"
# Other
ERROR = "error"
USAGE = "usage"
class StreamBaseResponse(BaseModel):
"""Base response model for all streaming responses."""
type: ResponseType
def to_sse(self) -> str:
"""Convert to SSE format."""
return f"data: {self.model_dump_json()}\n\n"
# ========== Message Lifecycle ==========
class StreamStart(StreamBaseResponse):
"""Start of a new message."""
type: ResponseType = ResponseType.START
messageId: str = Field(..., description="Unique message ID")
class StreamFinish(StreamBaseResponse):
"""End of message/stream."""
type: ResponseType = ResponseType.FINISH
# ========== Text Streaming ==========
class StreamTextStart(StreamBaseResponse):
"""Start of a text block."""
type: ResponseType = ResponseType.TEXT_START
id: str = Field(..., description="Text block ID")
class StreamTextDelta(StreamBaseResponse):
"""Streaming text content delta."""
type: ResponseType = ResponseType.TEXT_DELTA
id: str = Field(..., description="Text block ID")
delta: str = Field(..., description="Text content delta")
class StreamTextEnd(StreamBaseResponse):
"""End of a text block."""
type: ResponseType = ResponseType.TEXT_END
id: str = Field(..., description="Text block ID")
# ========== Tool Interaction ==========
class StreamToolInputStart(StreamBaseResponse):
"""Tool call started notification."""
type: ResponseType = ResponseType.TOOL_INPUT_START
toolCallId: str = Field(..., description="Unique tool call ID")
toolName: str = Field(..., description="Name of the tool being called")
class StreamToolInputAvailable(StreamBaseResponse):
"""Tool input is ready for execution."""
type: ResponseType = ResponseType.TOOL_INPUT_AVAILABLE
toolCallId: str = Field(..., description="Unique tool call ID")
toolName: str = Field(..., description="Name of the tool being called")
input: dict[str, Any] = Field(
default_factory=dict, description="Tool input arguments"
)
class StreamToolOutputAvailable(StreamBaseResponse):
"""Tool execution result."""
type: ResponseType = ResponseType.TOOL_OUTPUT_AVAILABLE
toolCallId: str = Field(..., description="Tool call ID this responds to")
output: str | dict[str, Any] = Field(..., description="Tool execution output")
# Additional fields for internal use (not part of AI SDK spec but useful)
toolName: str | None = Field(
default=None, description="Name of the tool that was executed"
)
success: bool = Field(
default=True, description="Whether the tool execution succeeded"
)
# ========== Other ==========
class StreamUsage(StreamBaseResponse):
"""Token usage statistics."""
type: ResponseType = ResponseType.USAGE
promptTokens: int = Field(..., description="Number of prompt tokens")
completionTokens: int = Field(..., description="Number of completion tokens")
totalTokens: int = Field(..., description="Total number of tokens")
class StreamError(StreamBaseResponse):
"""Error response."""
type: ResponseType = ResponseType.ERROR
errorText: str = Field(..., description="Error message text")
code: str | None = Field(default=None, description="Error code")
details: dict[str, Any] | None = Field(
default=None, description="Additional error details"
)

View File

@@ -1,362 +0,0 @@
"""Chat API routes for chat session management and streaming via SSE."""
import logging
from collections.abc import AsyncGenerator
from typing import Annotated
from autogpt_libs import auth
from fastapi import APIRouter, Depends, Query, Security
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from backend.util.exceptions import NotFoundError
from . import service as chat_service
from .config import ChatConfig
from .model import ChatSession, create_chat_session, get_chat_session, get_user_sessions
config = ChatConfig()
logger = logging.getLogger(__name__)
async def _validate_and_get_session(
session_id: str,
user_id: str | None,
) -> ChatSession:
"""Validate session exists and belongs to user."""
session = await get_chat_session(session_id, user_id)
if not session:
raise NotFoundError(f"Session {session_id} not found.")
return session
router = APIRouter(
tags=["chat"],
)
# ========== Request/Response Models ==========
class StreamChatRequest(BaseModel):
"""Request model for streaming chat with optional context."""
message: str
is_user_message: bool = True
context: dict[str, str] | None = None # {url: str, content: str}
class CreateSessionResponse(BaseModel):
"""Response model containing information on a newly created chat session."""
id: str
created_at: str
user_id: str | None
class SessionDetailResponse(BaseModel):
"""Response model providing complete details for a chat session, including messages."""
id: str
created_at: str
updated_at: str
user_id: str | None
messages: list[dict]
class SessionSummaryResponse(BaseModel):
"""Response model for a session summary (without messages)."""
id: str
created_at: str
updated_at: str
title: str | None = None
class ListSessionsResponse(BaseModel):
"""Response model for listing chat sessions."""
sessions: list[SessionSummaryResponse]
total: int
# ========== Routes ==========
@router.get(
"/sessions",
dependencies=[Security(auth.requires_user)],
)
async def list_sessions(
user_id: Annotated[str, Security(auth.get_user_id)],
limit: int = Query(default=50, ge=1, le=100),
offset: int = Query(default=0, ge=0),
) -> ListSessionsResponse:
"""
List chat sessions for the authenticated user.
Returns a paginated list of chat sessions belonging to the current user,
ordered by most recently updated.
Args:
user_id: The authenticated user's ID.
limit: Maximum number of sessions to return (1-100).
offset: Number of sessions to skip for pagination.
Returns:
ListSessionsResponse: List of session summaries and total count.
"""
sessions, total_count = await get_user_sessions(user_id, limit, offset)
return ListSessionsResponse(
sessions=[
SessionSummaryResponse(
id=session.session_id,
created_at=session.started_at.isoformat(),
updated_at=session.updated_at.isoformat(),
title=session.title,
)
for session in sessions
],
total=total_count,
)
@router.post(
"/sessions",
)
async def create_session(
user_id: Annotated[str, Depends(auth.get_user_id)],
) -> CreateSessionResponse:
"""
Create a new chat session.
Initiates a new chat session for the authenticated user.
Args:
user_id: The authenticated user ID parsed from the JWT (required).
Returns:
CreateSessionResponse: Details of the created session.
"""
logger.info(
f"Creating session with user_id: "
f"...{user_id[-8:] if len(user_id) > 8 else '<redacted>'}"
)
session = await create_chat_session(user_id)
return CreateSessionResponse(
id=session.session_id,
created_at=session.started_at.isoformat(),
user_id=session.user_id,
)
@router.get(
"/sessions/{session_id}",
)
async def get_session(
session_id: str,
user_id: Annotated[str | None, Depends(auth.get_user_id)],
) -> SessionDetailResponse:
"""
Retrieve the details of a specific chat session.
Looks up a chat session by ID for the given user (if authenticated) and returns all session data including messages.
Args:
session_id: The unique identifier for the desired chat session.
user_id: The optional authenticated user ID, or None for anonymous access.
Returns:
SessionDetailResponse: Details for the requested session; raises NotFoundError if not found.
"""
session = await get_chat_session(session_id, user_id)
if not session:
raise NotFoundError(f"Session {session_id} not found")
messages = [message.model_dump() for message in session.messages]
logger.info(
f"Returning session {session_id}: "
f"message_count={len(messages)}, "
f"roles={[m.get('role') for m in messages]}"
)
return SessionDetailResponse(
id=session.session_id,
created_at=session.started_at.isoformat(),
updated_at=session.updated_at.isoformat(),
user_id=session.user_id or None,
messages=messages,
)
@router.post(
"/sessions/{session_id}/stream",
)
async def stream_chat_post(
session_id: str,
request: StreamChatRequest,
user_id: str | None = Depends(auth.get_user_id),
):
"""
Stream chat responses for a session (POST with context support).
Streams the AI/completion responses in real time over Server-Sent Events (SSE), including:
- Text fragments as they are generated
- Tool call UI elements (if invoked)
- Tool execution results
Args:
session_id: The chat session identifier to associate with the streamed messages.
request: Request body containing message, is_user_message, and optional context.
user_id: Optional authenticated user ID.
Returns:
StreamingResponse: SSE-formatted response chunks.
"""
session = await _validate_and_get_session(session_id, user_id)
async def event_generator() -> AsyncGenerator[str, None]:
async for chunk in chat_service.stream_chat_completion(
session_id,
request.message,
is_user_message=request.is_user_message,
user_id=user_id,
session=session, # Pass pre-fetched session to avoid double-fetch
context=request.context,
):
yield chunk.to_sse()
# AI SDK protocol termination
yield "data: [DONE]\n\n"
return StreamingResponse(
event_generator(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no", # Disable nginx buffering
"x-vercel-ai-ui-message-stream": "v1", # AI SDK protocol header
},
)
@router.get(
"/sessions/{session_id}/stream",
)
async def stream_chat_get(
session_id: str,
message: Annotated[str, Query(min_length=1, max_length=10000)],
user_id: str | None = Depends(auth.get_user_id),
is_user_message: bool = Query(default=True),
):
"""
Stream chat responses for a session (GET - legacy endpoint).
Streams the AI/completion responses in real time over Server-Sent Events (SSE), including:
- Text fragments as they are generated
- Tool call UI elements (if invoked)
- Tool execution results
Args:
session_id: The chat session identifier to associate with the streamed messages.
message: The user's new message to process.
user_id: Optional authenticated user ID.
is_user_message: Whether the message is a user message.
Returns:
StreamingResponse: SSE-formatted response chunks.
"""
session = await _validate_and_get_session(session_id, user_id)
async def event_generator() -> AsyncGenerator[str, None]:
async for chunk in chat_service.stream_chat_completion(
session_id,
message,
is_user_message=is_user_message,
user_id=user_id,
session=session, # Pass pre-fetched session to avoid double-fetch
):
yield chunk.to_sse()
# AI SDK protocol termination
yield "data: [DONE]\n\n"
return StreamingResponse(
event_generator(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no", # Disable nginx buffering
"x-vercel-ai-ui-message-stream": "v1", # AI SDK protocol header
},
)
@router.patch(
"/sessions/{session_id}/assign-user",
dependencies=[Security(auth.requires_user)],
status_code=200,
)
async def session_assign_user(
session_id: str,
user_id: Annotated[str, Security(auth.get_user_id)],
) -> dict:
"""
Assign an authenticated user to a chat session.
Used (typically post-login) to claim an existing anonymous session as the current authenticated user.
Args:
session_id: The identifier for the (previously anonymous) session.
user_id: The authenticated user's ID to associate with the session.
Returns:
dict: Status of the assignment.
"""
await chat_service.assign_user_to_session(session_id, user_id)
return {"status": "ok"}
# ========== Health Check ==========
@router.get("/health", status_code=200)
async def health_check() -> dict:
"""
Health check endpoint for the chat service.
Performs a full cycle test of session creation and retrieval. Should always return healthy
if the service and data layer are operational.
Returns:
dict: A status dictionary indicating health, service name, and API version.
"""
from backend.data.user import get_or_create_user
# Ensure health check user exists (required for FK constraint)
health_check_user_id = "health-check-user"
await get_or_create_user(
{
"sub": health_check_user_id,
"email": "health-check@system.local",
"user_metadata": {"name": "Health Check User"},
}
)
# Create and retrieve session to verify full data layer
session = await create_chat_session(health_check_user_id)
await get_chat_session(session.session_id, health_check_user_id)
return {
"status": "healthy",
"service": "chat",
"version": "0.1.0",
}

View File

@@ -1,904 +0,0 @@
import asyncio
import logging
from collections.abc import AsyncGenerator
from typing import Any
import orjson
from langfuse import Langfuse
from openai import (
APIConnectionError,
APIError,
APIStatusError,
AsyncOpenAI,
RateLimitError,
)
from openai.types.chat import ChatCompletionChunk, ChatCompletionToolParam
from backend.data.understanding import (
format_understanding_for_prompt,
get_business_understanding,
)
from backend.util.exceptions import NotFoundError
from backend.util.settings import Settings
from . import db as chat_db
from .config import ChatConfig
from .model import (
ChatMessage,
ChatSession,
Usage,
get_chat_session,
update_session_title,
upsert_chat_session,
)
from .response_model import (
StreamBaseResponse,
StreamError,
StreamFinish,
StreamStart,
StreamTextDelta,
StreamTextEnd,
StreamTextStart,
StreamToolInputAvailable,
StreamToolInputStart,
StreamToolOutputAvailable,
StreamUsage,
)
from .tools import execute_tool, tools
logger = logging.getLogger(__name__)
config = ChatConfig()
settings = Settings()
client = AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
# Langfuse client (lazy initialization)
_langfuse_client: Langfuse | None = None
class LangfuseNotConfiguredError(Exception):
"""Raised when Langfuse is required but not configured."""
pass
def _is_langfuse_configured() -> bool:
"""Check if Langfuse credentials are configured."""
return bool(
settings.secrets.langfuse_public_key and settings.secrets.langfuse_secret_key
)
def _get_langfuse_client() -> Langfuse:
"""Get or create the Langfuse client for prompt management and tracing."""
global _langfuse_client
if _langfuse_client is None:
if not _is_langfuse_configured():
raise LangfuseNotConfiguredError(
"Langfuse is not configured. The chat feature requires Langfuse for prompt management. "
"Please set the LANGFUSE_PUBLIC_KEY and LANGFUSE_SECRET_KEY environment variables."
)
_langfuse_client = Langfuse(
public_key=settings.secrets.langfuse_public_key,
secret_key=settings.secrets.langfuse_secret_key,
host=settings.secrets.langfuse_host or "https://cloud.langfuse.com",
)
return _langfuse_client
def _get_environment() -> str:
"""Get the current environment name for Langfuse tagging."""
return settings.config.app_env.value
def _get_langfuse_prompt() -> str:
"""Fetch the latest production prompt from Langfuse.
Returns:
The compiled prompt text from Langfuse.
Raises:
Exception: If Langfuse is unavailable or prompt fetch fails.
"""
try:
langfuse = _get_langfuse_client()
# cache_ttl_seconds=0 disables SDK caching to always get the latest prompt
prompt = langfuse.get_prompt(config.langfuse_prompt_name, cache_ttl_seconds=0)
compiled = prompt.compile()
logger.info(
f"Fetched prompt '{config.langfuse_prompt_name}' from Langfuse "
f"(version: {prompt.version})"
)
return compiled
except Exception as e:
logger.error(f"Failed to fetch prompt from Langfuse: {e}")
raise
async def _is_first_session(user_id: str) -> bool:
"""Check if this is the user's first chat session.
Returns True if the user has 1 or fewer sessions (meaning this is their first).
"""
try:
session_count = await chat_db.get_user_session_count(user_id)
return session_count <= 1
except Exception as e:
logger.warning(f"Failed to check session count for user {user_id}: {e}")
return False # Default to non-onboarding if we can't check
async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]:
"""Build the full system prompt including business understanding if available.
Args:
user_id: The user ID for fetching business understanding
If "default" and this is the user's first session, will use "onboarding" instead.
Returns:
Tuple of (compiled prompt string, Langfuse prompt object for tracing)
"""
langfuse = _get_langfuse_client()
# cache_ttl_seconds=0 disables SDK caching to always get the latest prompt
prompt = langfuse.get_prompt(config.langfuse_prompt_name, cache_ttl_seconds=0)
# If user is authenticated, try to fetch their business understanding
understanding = None
if user_id:
try:
understanding = await get_business_understanding(user_id)
except Exception as e:
logger.warning(f"Failed to fetch business understanding: {e}")
understanding = None
if understanding:
context = format_understanding_for_prompt(understanding)
else:
context = "This is the first time you are meeting the user. Greet them and introduce them to the platform"
compiled = prompt.compile(users_information=context)
return compiled, prompt
async def _generate_session_title(message: str) -> str | None:
"""Generate a concise title for a chat session based on the first message.
Args:
message: The first user message in the session
Returns:
A short title (3-6 words) or None if generation fails
"""
try:
response = await client.chat.completions.create(
model=config.title_model,
messages=[
{
"role": "system",
"content": (
"Generate a very short title (3-6 words) for a chat conversation "
"based on the user's first message. The title should capture the "
"main topic or intent. Return ONLY the title, no quotes or punctuation."
),
},
{"role": "user", "content": message[:500]}, # Limit input length
],
max_tokens=20,
)
title = response.choices[0].message.content
if title:
# Clean up the title
title = title.strip().strip("\"'")
# Limit length
if len(title) > 50:
title = title[:47] + "..."
return title
return None
except Exception as e:
logger.warning(f"Failed to generate session title: {e}")
return None
async def assign_user_to_session(
session_id: str,
user_id: str,
) -> ChatSession:
"""
Assign a user to a chat session.
"""
session = await get_chat_session(session_id, None)
if not session:
raise NotFoundError(f"Session {session_id} not found")
session.user_id = user_id
return await upsert_chat_session(session)
async def stream_chat_completion(
session_id: str,
message: str | None = None,
is_user_message: bool = True,
user_id: str | None = None,
retry_count: int = 0,
session: ChatSession | None = None,
context: dict[str, str] | None = None, # {url: str, content: str}
) -> AsyncGenerator[StreamBaseResponse, None]:
"""Main entry point for streaming chat completions with database handling.
This function handles all database operations and delegates streaming
to the internal _stream_chat_chunks function.
Args:
session_id: Chat session ID
user_message: User's input message
user_id: User ID for authentication (None for anonymous)
session: Optional pre-loaded session object (for recursive calls to avoid Redis refetch)
Yields:
StreamBaseResponse objects formatted as SSE
Raises:
NotFoundError: If session_id is invalid
ValueError: If max_context_messages is exceeded
"""
logger.info(
f"Streaming chat completion for session {session_id} for message {message} and user id {user_id}. Message is user message: {is_user_message}"
)
# Check if Langfuse is configured - required for chat functionality
if not _is_langfuse_configured():
logger.error("Chat request failed: Langfuse is not configured")
yield StreamError(
errorText="Chat service is not available. Langfuse must be configured "
"with LANGFUSE_PUBLIC_KEY and LANGFUSE_SECRET_KEY environment variables."
)
yield StreamFinish()
return
# Langfuse observations will be created after session is loaded (need messages for input)
# Initialize to None so finally block can safely check and end them
trace = None
generation = None
# Only fetch from Redis if session not provided (initial call)
if session is None:
session = await get_chat_session(session_id, user_id)
logger.info(
f"Fetched session from Redis: {session.session_id if session else 'None'}, "
f"message_count={len(session.messages) if session else 0}"
)
else:
logger.info(
f"Using provided session object: {session.session_id}, "
f"message_count={len(session.messages)}"
)
if not session:
raise NotFoundError(
f"Session {session_id} not found. Please create a new session first."
)
if message:
# Build message content with context if provided
message_content = message
if context and context.get("url") and context.get("content"):
context_text = f"Page URL: {context['url']}\n\nPage Content:\n{context['content']}\n\n---\n\nUser Message: {message}"
message_content = context_text
logger.info(
f"Including page context: URL={context['url']}, content_length={len(context['content'])}"
)
session.messages.append(
ChatMessage(
role="user" if is_user_message else "assistant", content=message_content
)
)
logger.info(
f"Appended message (role={'user' if is_user_message else 'assistant'}), "
f"new message_count={len(session.messages)}"
)
logger.info(
f"Upserting session: {session.session_id} with user id {session.user_id}, "
f"message_count={len(session.messages)}"
)
session = await upsert_chat_session(session)
assert session, "Session not found"
# Generate title for new sessions on first user message (non-blocking)
# Check: is_user_message, no title yet, and this is the first user message
if is_user_message and message and not session.title:
user_messages = [m for m in session.messages if m.role == "user"]
if len(user_messages) == 1:
# First user message - generate title in background
import asyncio
# Capture only the values we need (not the session object) to avoid
# stale data issues when the main flow modifies the session
captured_session_id = session_id
captured_message = message
async def _update_title():
try:
title = await _generate_session_title(captured_message)
if title:
# Use dedicated title update function that doesn't
# touch messages, avoiding race conditions
await update_session_title(captured_session_id, title)
logger.info(
f"Generated title for session {captured_session_id}: {title}"
)
except Exception as e:
logger.warning(f"Failed to update session title: {e}")
# Fire and forget - don't block the chat response
asyncio.create_task(_update_title())
# Build system prompt with business understanding
system_prompt, langfuse_prompt = await _build_system_prompt(user_id)
# Build input messages including system prompt for complete Langfuse logging
trace_input_messages = [{"role": "system", "content": system_prompt}] + [
m.model_dump() for m in session.messages
]
# Create Langfuse trace for this LLM call (each call gets its own trace, grouped by session_id)
# Using v3 SDK: start_observation creates a root span, update_trace sets trace-level attributes
try:
langfuse = _get_langfuse_client()
env = _get_environment()
trace = langfuse.start_observation(
name="chat_completion",
input={"messages": trace_input_messages},
metadata={
"environment": env,
"model": config.model,
"message_count": len(session.messages),
"prompt_name": langfuse_prompt.name if langfuse_prompt else None,
"prompt_version": langfuse_prompt.version if langfuse_prompt else None,
},
)
# Set trace-level attributes (session_id, user_id, tags)
trace.update_trace(
session_id=session_id,
user_id=user_id,
tags=[env, "copilot"],
)
except Exception as e:
logger.warning(f"Failed to create Langfuse trace: {e}")
# Initialize variables that will be used in finally block (must be defined before try)
assistant_response = ChatMessage(
role="assistant",
content="",
)
accumulated_tool_calls: list[dict[str, Any]] = []
# Wrap main logic in try/finally to ensure Langfuse observations are always ended
try:
has_yielded_end = False
has_yielded_error = False
has_done_tool_call = False
has_received_text = False
text_streaming_ended = False
tool_response_messages: list[ChatMessage] = []
should_retry = False
# Generate unique IDs for AI SDK protocol
import uuid as uuid_module
message_id = str(uuid_module.uuid4())
text_block_id = str(uuid_module.uuid4())
# Yield message start
yield StreamStart(messageId=message_id)
# Create Langfuse generation for each LLM call, linked to the prompt
# Using v3 SDK: start_observation with as_type="generation"
generation = (
trace.start_observation(
as_type="generation",
name="llm_call",
model=config.model,
input={"messages": trace_input_messages},
prompt=langfuse_prompt,
)
if trace
else None
)
try:
async for chunk in _stream_chat_chunks(
session=session,
tools=tools,
system_prompt=system_prompt,
text_block_id=text_block_id,
):
if isinstance(chunk, StreamTextStart):
# Emit text-start before first text delta
if not has_received_text:
yield chunk
elif isinstance(chunk, StreamTextDelta):
delta = chunk.delta or ""
assert assistant_response.content is not None
assistant_response.content += delta
has_received_text = True
yield chunk
elif isinstance(chunk, StreamTextEnd):
# Emit text-end after text completes
if has_received_text and not text_streaming_ended:
text_streaming_ended = True
yield chunk
elif isinstance(chunk, StreamToolInputStart):
# Emit text-end before first tool call, but only if we've received text
if has_received_text and not text_streaming_ended:
yield StreamTextEnd(id=text_block_id)
text_streaming_ended = True
yield chunk
elif isinstance(chunk, StreamToolInputAvailable):
# Accumulate tool calls in OpenAI format
accumulated_tool_calls.append(
{
"id": chunk.toolCallId,
"type": "function",
"function": {
"name": chunk.toolName,
"arguments": orjson.dumps(chunk.input).decode("utf-8"),
},
}
)
elif isinstance(chunk, StreamToolOutputAvailable):
result_content = (
chunk.output
if isinstance(chunk.output, str)
else orjson.dumps(chunk.output).decode("utf-8")
)
tool_response_messages.append(
ChatMessage(
role="tool",
content=result_content,
tool_call_id=chunk.toolCallId,
)
)
has_done_tool_call = True
# Track if any tool execution failed
if not chunk.success:
logger.warning(
f"Tool {chunk.toolName} (ID: {chunk.toolCallId}) execution failed"
)
yield chunk
elif isinstance(chunk, StreamFinish):
if not has_done_tool_call:
# Emit text-end before finish if we received text but haven't closed it
if has_received_text and not text_streaming_ended:
yield StreamTextEnd(id=text_block_id)
text_streaming_ended = True
has_yielded_end = True
yield chunk
elif isinstance(chunk, StreamError):
has_yielded_error = True
elif isinstance(chunk, StreamUsage):
session.usage.append(
Usage(
prompt_tokens=chunk.promptTokens,
completion_tokens=chunk.completionTokens,
total_tokens=chunk.totalTokens,
)
)
else:
logger.error(f"Unknown chunk type: {type(chunk)}", exc_info=True)
except Exception as e:
logger.error(f"Error during stream: {e!s}", exc_info=True)
# Check if this is a retryable error (JSON parsing, incomplete tool calls, etc.)
is_retryable = isinstance(e, (orjson.JSONDecodeError, KeyError, TypeError))
if is_retryable and retry_count < config.max_retries:
logger.info(
f"Retryable error encountered. Attempt {retry_count + 1}/{config.max_retries}"
)
should_retry = True
else:
# Non-retryable error or max retries exceeded
# Save any partial progress before reporting error
messages_to_save: list[ChatMessage] = []
# Add assistant message if it has content or tool calls
if accumulated_tool_calls:
assistant_response.tool_calls = accumulated_tool_calls
if assistant_response.content or assistant_response.tool_calls:
messages_to_save.append(assistant_response)
# Add tool response messages after assistant message
messages_to_save.extend(tool_response_messages)
session.messages.extend(messages_to_save)
await upsert_chat_session(session)
if not has_yielded_error:
error_message = str(e)
if not is_retryable:
error_message = f"Non-retryable error: {error_message}"
elif retry_count >= config.max_retries:
error_message = f"Max retries ({config.max_retries}) exceeded: {error_message}"
error_response = StreamError(errorText=error_message)
yield error_response
if not has_yielded_end:
yield StreamFinish()
return
# Handle retry outside of exception handler to avoid nesting
if should_retry and retry_count < config.max_retries:
logger.info(
f"Retrying stream_chat_completion for session {session_id}, attempt {retry_count + 1}"
)
async for chunk in stream_chat_completion(
session_id=session.session_id,
user_id=user_id,
retry_count=retry_count + 1,
session=session,
context=context,
):
yield chunk
return # Exit after retry to avoid double-saving in finally block
# Normal completion path - save session and handle tool call continuation
logger.info(
f"Normal completion path: session={session.session_id}, "
f"current message_count={len(session.messages)}"
)
# Build the messages list in the correct order
messages_to_save: list[ChatMessage] = []
# Add assistant message with tool_calls if any
if accumulated_tool_calls:
assistant_response.tool_calls = accumulated_tool_calls
logger.info(
f"Added {len(accumulated_tool_calls)} tool calls to assistant message"
)
if assistant_response.content or assistant_response.tool_calls:
messages_to_save.append(assistant_response)
logger.info(
f"Saving assistant message with content_len={len(assistant_response.content or '')}, tool_calls={len(assistant_response.tool_calls or [])}"
)
# Add tool response messages after assistant message
messages_to_save.extend(tool_response_messages)
logger.info(
f"Saving {len(tool_response_messages)} tool response messages, "
f"total_to_save={len(messages_to_save)}"
)
session.messages.extend(messages_to_save)
logger.info(
f"Extended session messages, new message_count={len(session.messages)}"
)
await upsert_chat_session(session)
# If we did a tool call, stream the chat completion again to get the next response
if has_done_tool_call:
logger.info(
"Tool call executed, streaming chat completion again to get assistant response"
)
async for chunk in stream_chat_completion(
session_id=session.session_id,
user_id=user_id,
session=session, # Pass session object to avoid Redis refetch
context=context,
):
yield chunk
finally:
# Always end Langfuse observations to prevent resource leaks
# Guard against None and catch errors to avoid masking original exceptions
if generation is not None:
try:
latest_usage = session.usage[-1] if session.usage else None
generation.update(
model=config.model,
output={
"content": assistant_response.content,
"tool_calls": accumulated_tool_calls or None,
},
usage_details=(
{
"input": latest_usage.prompt_tokens,
"output": latest_usage.completion_tokens,
"total": latest_usage.total_tokens,
}
if latest_usage
else None
),
)
generation.end()
except Exception as e:
logger.warning(f"Failed to end Langfuse generation: {e}")
if trace is not None:
try:
if accumulated_tool_calls:
trace.update_trace(output={"tool_calls": accumulated_tool_calls})
else:
trace.update_trace(output={"response": assistant_response.content})
trace.end()
except Exception as e:
logger.warning(f"Failed to end Langfuse trace: {e}")
# Retry configuration for OpenAI API calls
MAX_RETRIES = 3
BASE_DELAY_SECONDS = 1.0
MAX_DELAY_SECONDS = 30.0
def _is_retryable_error(error: Exception) -> bool:
"""Determine if an error is retryable."""
if isinstance(error, RateLimitError):
return True
if isinstance(error, APIConnectionError):
return True
if isinstance(error, APIStatusError):
# APIStatusError has a response with status_code
# Retry on 5xx status codes (server errors)
if error.response.status_code >= 500:
return True
if isinstance(error, APIError):
# Retry on overloaded errors or 500 errors (may not have status code)
error_message = str(error).lower()
if "overloaded" in error_message or "internal server error" in error_message:
return True
return False
async def _stream_chat_chunks(
session: ChatSession,
tools: list[ChatCompletionToolParam],
system_prompt: str | None = None,
text_block_id: str | None = None,
) -> AsyncGenerator[StreamBaseResponse, None]:
"""
Pure streaming function for OpenAI chat completions with tool calling.
This function is database-agnostic and focuses only on streaming logic.
Implements exponential backoff retry for transient API errors.
Args:
session: Chat session with conversation history
tools: Available tools for the model
system_prompt: System prompt to prepend to messages
Yields:
SSE formatted JSON response objects
"""
model = config.model
logger.info("Starting pure chat stream")
# Build messages with system prompt prepended
messages = session.to_openai_messages()
if system_prompt:
from openai.types.chat import ChatCompletionSystemMessageParam
system_message = ChatCompletionSystemMessageParam(
role="system",
content=system_prompt,
)
messages = [system_message] + messages
# Loop to handle tool calls and continue conversation
while True:
retry_count = 0
last_error: Exception | None = None
while retry_count <= MAX_RETRIES:
try:
logger.info(
f"Creating OpenAI chat completion stream..."
f"{f' (retry {retry_count}/{MAX_RETRIES})' if retry_count > 0 else ''}"
)
# Create the stream with proper types
stream = await client.chat.completions.create(
model=model,
messages=messages,
tools=tools,
tool_choice="auto",
stream=True,
stream_options={"include_usage": True},
)
# Variables to accumulate tool calls
tool_calls: list[dict[str, Any]] = []
active_tool_call_idx: int | None = None
finish_reason: str | None = None
# Track which tool call indices have had their start event emitted
emitted_start_for_idx: set[int] = set()
# Track if we've started the text block
text_started = False
# Process the stream
chunk: ChatCompletionChunk
async for chunk in stream:
if chunk.usage:
yield StreamUsage(
promptTokens=chunk.usage.prompt_tokens,
completionTokens=chunk.usage.completion_tokens,
totalTokens=chunk.usage.total_tokens,
)
if chunk.choices:
choice = chunk.choices[0]
delta = choice.delta
# Capture finish reason
if choice.finish_reason:
finish_reason = choice.finish_reason
logger.info(f"Finish reason: {finish_reason}")
# Handle content streaming
if delta.content:
# Emit text-start on first text content
if not text_started and text_block_id:
yield StreamTextStart(id=text_block_id)
text_started = True
# Stream the text delta
text_response = StreamTextDelta(
id=text_block_id or "",
delta=delta.content,
)
yield text_response
# Handle tool calls
if delta.tool_calls:
for tc_chunk in delta.tool_calls:
idx = tc_chunk.index
# Update active tool call index if needed
if (
active_tool_call_idx is None
or active_tool_call_idx != idx
):
active_tool_call_idx = idx
# Ensure we have a tool call object at this index
while len(tool_calls) <= idx:
tool_calls.append(
{
"id": "",
"type": "function",
"function": {
"name": "",
"arguments": "",
},
},
)
# Accumulate the tool call data
if tc_chunk.id:
tool_calls[idx]["id"] = tc_chunk.id
if tc_chunk.function:
if tc_chunk.function.name:
tool_calls[idx]["function"][
"name"
] = tc_chunk.function.name
if tc_chunk.function.arguments:
tool_calls[idx]["function"][
"arguments"
] += tc_chunk.function.arguments
# Emit StreamToolInputStart only after we have the tool call ID
if (
idx not in emitted_start_for_idx
and tool_calls[idx]["id"]
and tool_calls[idx]["function"]["name"]
):
yield StreamToolInputStart(
toolCallId=tool_calls[idx]["id"],
toolName=tool_calls[idx]["function"]["name"],
)
emitted_start_for_idx.add(idx)
logger.info(f"Stream complete. Finish reason: {finish_reason}")
# Yield all accumulated tool calls after the stream is complete
# This ensures all tool call arguments have been fully received
for idx, tool_call in enumerate(tool_calls):
try:
async for tc in _yield_tool_call(tool_calls, idx, session):
yield tc
except (orjson.JSONDecodeError, KeyError, TypeError) as e:
logger.error(
f"Failed to parse tool call {idx}: {e}",
exc_info=True,
extra={"tool_call": tool_call},
)
yield StreamError(
errorText=f"Invalid tool call arguments for tool {tool_call.get('function', {}).get('name', 'unknown')}: {e}",
)
# Re-raise to trigger retry logic in the parent function
raise
yield StreamFinish()
return
except Exception as e:
last_error = e
if _is_retryable_error(e) and retry_count < MAX_RETRIES:
retry_count += 1
# Calculate delay with exponential backoff
delay = min(
BASE_DELAY_SECONDS * (2 ** (retry_count - 1)),
MAX_DELAY_SECONDS,
)
logger.warning(
f"Retryable error in stream: {e!s}. "
f"Retrying in {delay:.1f}s (attempt {retry_count}/{MAX_RETRIES})"
)
await asyncio.sleep(delay)
continue # Retry the stream
else:
# Non-retryable error or max retries exceeded
logger.error(
f"Error in stream (not retrying): {e!s}",
exc_info=True,
)
error_response = StreamError(errorText=str(e))
yield error_response
yield StreamFinish()
return
# If we exit the retry loop without returning, it means we exhausted retries
if last_error:
logger.error(
f"Max retries ({MAX_RETRIES}) exceeded. Last error: {last_error!s}",
exc_info=True,
)
yield StreamError(errorText=f"Max retries exceeded: {last_error!s}")
yield StreamFinish()
return
async def _yield_tool_call(
tool_calls: list[dict[str, Any]],
yield_idx: int,
session: ChatSession,
) -> AsyncGenerator[StreamBaseResponse, None]:
"""
Yield a tool call and its execution result.
Raises:
orjson.JSONDecodeError: If tool call arguments cannot be parsed as JSON
KeyError: If expected tool call fields are missing
TypeError: If tool call structure is invalid
"""
tool_name = tool_calls[yield_idx]["function"]["name"]
tool_call_id = tool_calls[yield_idx]["id"]
logger.info(f"Yielding tool call: {tool_calls[yield_idx]}")
# Parse tool call arguments - handle empty arguments gracefully
raw_arguments = tool_calls[yield_idx]["function"]["arguments"]
if raw_arguments:
arguments = orjson.loads(raw_arguments)
else:
arguments = {}
yield StreamToolInputAvailable(
toolCallId=tool_call_id,
toolName=tool_name,
input=arguments,
)
tool_execution_response: StreamToolOutputAvailable = await execute_tool(
tool_name=tool_name,
parameters=arguments,
tool_call_id=tool_call_id,
user_id=session.user_id,
session=session,
)
logger.info(f"Yielding Tool execution response: {tool_execution_response}")
yield tool_execution_response

View File

@@ -1,59 +0,0 @@
from typing import TYPE_CHECKING, Any
from openai.types.chat import ChatCompletionToolParam
from backend.api.features.chat.model import ChatSession
from .add_understanding import AddUnderstandingTool
from .agent_output import AgentOutputTool
from .base import BaseTool
from .create_agent import CreateAgentTool
from .edit_agent import EditAgentTool
from .find_agent import FindAgentTool
from .find_block import FindBlockTool
from .find_library_agent import FindLibraryAgentTool
from .get_doc_page import GetDocPageTool
from .run_agent import RunAgentTool
from .run_block import RunBlockTool
from .search_docs import SearchDocsTool
if TYPE_CHECKING:
from backend.api.features.chat.response_model import StreamToolOutputAvailable
# Single source of truth for all tools
TOOL_REGISTRY: dict[str, BaseTool] = {
"add_understanding": AddUnderstandingTool(),
"create_agent": CreateAgentTool(),
"edit_agent": EditAgentTool(),
"find_agent": FindAgentTool(),
"find_block": FindBlockTool(),
"find_library_agent": FindLibraryAgentTool(),
"run_agent": RunAgentTool(),
"run_block": RunBlockTool(),
"agent_output": AgentOutputTool(),
"search_docs": SearchDocsTool(),
"get_doc_page": GetDocPageTool(),
}
# Export individual tool instances for backwards compatibility
find_agent_tool = TOOL_REGISTRY["find_agent"]
run_agent_tool = TOOL_REGISTRY["run_agent"]
# Generated from registry for OpenAI API
tools: list[ChatCompletionToolParam] = [
tool.as_openai_tool() for tool in TOOL_REGISTRY.values()
]
async def execute_tool(
tool_name: str,
parameters: dict[str, Any],
user_id: str | None,
session: ChatSession,
tool_call_id: str,
) -> "StreamToolOutputAvailable":
"""Execute a tool by name."""
tool = TOOL_REGISTRY.get(tool_name)
if not tool:
raise ValueError(f"Tool {tool_name} not found")
return await tool.execute(user_id, session, tool_call_id, **parameters)

View File

@@ -1,119 +0,0 @@
"""Tool for capturing user business understanding incrementally."""
import logging
from typing import Any
from backend.api.features.chat.model import ChatSession
from backend.data.understanding import (
BusinessUnderstandingInput,
upsert_business_understanding,
)
from .base import BaseTool
from .models import ErrorResponse, ToolResponseBase, UnderstandingUpdatedResponse
logger = logging.getLogger(__name__)
class AddUnderstandingTool(BaseTool):
"""Tool for capturing user's business understanding incrementally."""
@property
def name(self) -> str:
return "add_understanding"
@property
def description(self) -> str:
return """Capture and store information about the user's business context,
workflows, pain points, and automation goals. Call this tool whenever the user
shares information about their business. Each call incrementally adds to the
existing understanding - you don't need to provide all fields at once.
Use this to build a comprehensive profile that helps recommend better agents
and automations for the user's specific needs."""
@property
def parameters(self) -> dict[str, Any]:
# Auto-generate from Pydantic model schema
schema = BusinessUnderstandingInput.model_json_schema()
properties = {}
for field_name, field_schema in schema.get("properties", {}).items():
prop: dict[str, Any] = {"description": field_schema.get("description", "")}
# Handle anyOf for Optional types
if "anyOf" in field_schema:
for option in field_schema["anyOf"]:
if option.get("type") != "null":
prop["type"] = option.get("type", "string")
if "items" in option:
prop["items"] = option["items"]
break
else:
prop["type"] = field_schema.get("type", "string")
if "items" in field_schema:
prop["items"] = field_schema["items"]
properties[field_name] = prop
return {"type": "object", "properties": properties, "required": []}
@property
def requires_auth(self) -> bool:
"""Requires authentication to store user-specific data."""
return True
async def _execute(
self,
user_id: str | None,
session: ChatSession,
**kwargs,
) -> ToolResponseBase:
"""
Capture and store business understanding incrementally.
Each call merges new data with existing understanding:
- String fields are overwritten if provided
- List fields are appended (with deduplication)
"""
session_id = session.session_id
if not user_id:
return ErrorResponse(
message="Authentication required to save business understanding.",
session_id=session_id,
)
# Check if any data was provided
if not any(v is not None for v in kwargs.values()):
return ErrorResponse(
message="Please provide at least one field to update.",
session_id=session_id,
)
# Build input model from kwargs (only include fields defined in the model)
valid_fields = set(BusinessUnderstandingInput.model_fields.keys())
input_data = BusinessUnderstandingInput(
**{k: v for k, v in kwargs.items() if k in valid_fields}
)
# Track which fields were updated
updated_fields = [
k for k, v in kwargs.items() if k in valid_fields and v is not None
]
# Upsert with merge
understanding = await upsert_business_understanding(user_id, input_data)
# Build current understanding summary (filter out empty values)
current_understanding = {
k: v
for k, v in understanding.model_dump(
exclude={"id", "user_id", "created_at", "updated_at"}
).items()
if v is not None and v != [] and v != ""
}
return UnderstandingUpdatedResponse(
message=f"Updated understanding with: {', '.join(updated_fields)}. "
"I now have a better picture of your business context.",
session_id=session_id,
updated_fields=updated_fields,
current_understanding=current_understanding,
)

View File

@@ -1,29 +0,0 @@
"""Agent generator package - Creates agents from natural language."""
from .core import (
apply_agent_patch,
decompose_goal,
generate_agent,
generate_agent_patch,
get_agent_as_json,
save_agent_to_library,
)
from .fixer import apply_all_fixes
from .utils import get_blocks_info
from .validator import validate_agent
__all__ = [
# Core functions
"decompose_goal",
"generate_agent",
"generate_agent_patch",
"apply_agent_patch",
"save_agent_to_library",
"get_agent_as_json",
# Fixer
"apply_all_fixes",
# Validator
"validate_agent",
# Utils
"get_blocks_info",
]

View File

@@ -1,25 +0,0 @@
"""OpenRouter client configuration for agent generation."""
import os
from openai import AsyncOpenAI
# Configuration - use OPEN_ROUTER_API_KEY for consistency with chat/config.py
OPENROUTER_API_KEY = os.getenv("OPEN_ROUTER_API_KEY")
AGENT_GENERATOR_MODEL = os.getenv("AGENT_GENERATOR_MODEL", "anthropic/claude-opus-4.5")
# OpenRouter client (OpenAI-compatible API)
_client: AsyncOpenAI | None = None
def get_client() -> AsyncOpenAI:
"""Get or create the OpenRouter client."""
global _client
if _client is None:
if not OPENROUTER_API_KEY:
raise ValueError("OPENROUTER_API_KEY environment variable is required")
_client = AsyncOpenAI(
base_url="https://openrouter.ai/api/v1",
api_key=OPENROUTER_API_KEY,
)
return _client

View File

@@ -1,390 +0,0 @@
"""Core agent generation functions."""
import copy
import json
import logging
import uuid
from typing import Any
from backend.api.features.library import db as library_db
from backend.data.graph import Graph, Link, Node, create_graph
from .client import AGENT_GENERATOR_MODEL, get_client
from .prompts import DECOMPOSITION_PROMPT, GENERATION_PROMPT, PATCH_PROMPT
from .utils import get_block_summaries, parse_json_from_llm
logger = logging.getLogger(__name__)
async def decompose_goal(description: str, context: str = "") -> dict[str, Any] | None:
"""Break down a goal into steps or return clarifying questions.
Args:
description: Natural language goal description
context: Additional context (e.g., answers to previous questions)
Returns:
Dict with either:
- {"type": "clarifying_questions", "questions": [...]}
- {"type": "instructions", "steps": [...]}
Or None on error
"""
client = get_client()
prompt = DECOMPOSITION_PROMPT.format(block_summaries=get_block_summaries())
full_description = description
if context:
full_description = f"{description}\n\nAdditional context:\n{context}"
try:
response = await client.chat.completions.create(
model=AGENT_GENERATOR_MODEL,
messages=[
{"role": "system", "content": prompt},
{"role": "user", "content": full_description},
],
temperature=0,
)
content = response.choices[0].message.content
if content is None:
logger.error("LLM returned empty content for decomposition")
return None
result = parse_json_from_llm(content)
if result is None:
logger.error(f"Failed to parse decomposition response: {content[:200]}")
return None
return result
except Exception as e:
logger.error(f"Error decomposing goal: {e}")
return None
async def generate_agent(instructions: dict[str, Any]) -> dict[str, Any] | None:
"""Generate agent JSON from instructions.
Args:
instructions: Structured instructions from decompose_goal
Returns:
Agent JSON dict or None on error
"""
client = get_client()
prompt = GENERATION_PROMPT.format(block_summaries=get_block_summaries())
try:
response = await client.chat.completions.create(
model=AGENT_GENERATOR_MODEL,
messages=[
{"role": "system", "content": prompt},
{"role": "user", "content": json.dumps(instructions, indent=2)},
],
temperature=0,
)
content = response.choices[0].message.content
if content is None:
logger.error("LLM returned empty content for agent generation")
return None
result = parse_json_from_llm(content)
if result is None:
logger.error(f"Failed to parse agent JSON: {content[:200]}")
return None
# Ensure required fields
if "id" not in result:
result["id"] = str(uuid.uuid4())
if "version" not in result:
result["version"] = 1
if "is_active" not in result:
result["is_active"] = True
return result
except Exception as e:
logger.error(f"Error generating agent: {e}")
return None
def json_to_graph(agent_json: dict[str, Any]) -> Graph:
"""Convert agent JSON dict to Graph model.
Args:
agent_json: Agent JSON with nodes and links
Returns:
Graph ready for saving
"""
nodes = []
for n in agent_json.get("nodes", []):
node = Node(
id=n.get("id", str(uuid.uuid4())),
block_id=n["block_id"],
input_default=n.get("input_default", {}),
metadata=n.get("metadata", {}),
)
nodes.append(node)
links = []
for link_data in agent_json.get("links", []):
link = Link(
id=link_data.get("id", str(uuid.uuid4())),
source_id=link_data["source_id"],
sink_id=link_data["sink_id"],
source_name=link_data["source_name"],
sink_name=link_data["sink_name"],
is_static=link_data.get("is_static", False),
)
links.append(link)
return Graph(
id=agent_json.get("id", str(uuid.uuid4())),
version=agent_json.get("version", 1),
is_active=agent_json.get("is_active", True),
name=agent_json.get("name", "Generated Agent"),
description=agent_json.get("description", ""),
nodes=nodes,
links=links,
)
def _reassign_node_ids(graph: Graph) -> None:
"""Reassign all node and link IDs to new UUIDs.
This is needed when creating a new version to avoid unique constraint violations.
"""
# Create mapping from old node IDs to new UUIDs
id_map = {node.id: str(uuid.uuid4()) for node in graph.nodes}
# Reassign node IDs
for node in graph.nodes:
node.id = id_map[node.id]
# Update link references to use new node IDs
for link in graph.links:
link.id = str(uuid.uuid4()) # Also give links new IDs
if link.source_id in id_map:
link.source_id = id_map[link.source_id]
if link.sink_id in id_map:
link.sink_id = id_map[link.sink_id]
async def save_agent_to_library(
agent_json: dict[str, Any], user_id: str, is_update: bool = False
) -> tuple[Graph, Any]:
"""Save agent to database and user's library.
Args:
agent_json: Agent JSON dict
user_id: User ID
is_update: Whether this is an update to an existing agent
Returns:
Tuple of (created Graph, LibraryAgent)
"""
from backend.data.graph import get_graph_all_versions
graph = json_to_graph(agent_json)
if is_update:
# For updates, keep the same graph ID but increment version
# and reassign node/link IDs to avoid conflicts
if graph.id:
existing_versions = await get_graph_all_versions(graph.id, user_id)
if existing_versions:
latest_version = max(v.version for v in existing_versions)
graph.version = latest_version + 1
# Reassign node IDs (but keep graph ID the same)
_reassign_node_ids(graph)
logger.info(f"Updating agent {graph.id} to version {graph.version}")
else:
# For new agents, always generate a fresh UUID to avoid collisions
graph.id = str(uuid.uuid4())
graph.version = 1
# Reassign all node IDs as well
_reassign_node_ids(graph)
logger.info(f"Creating new agent with ID {graph.id}")
# Save to database
created_graph = await create_graph(graph, user_id)
# Add to user's library (or update existing library agent)
library_agents = await library_db.create_library_agent(
graph=created_graph,
user_id=user_id,
create_library_agents_for_sub_graphs=False,
)
return created_graph, library_agents[0]
async def get_agent_as_json(
graph_id: str, user_id: str | None
) -> dict[str, Any] | None:
"""Fetch an agent and convert to JSON format for editing.
Args:
graph_id: Graph ID or library agent ID
user_id: User ID
Returns:
Agent as JSON dict or None if not found
"""
from backend.data.graph import get_graph
# Try to get the graph (version=None gets the active version)
graph = await get_graph(graph_id, version=None, user_id=user_id)
if not graph:
return None
# Convert to JSON format
nodes = []
for node in graph.nodes:
nodes.append(
{
"id": node.id,
"block_id": node.block_id,
"input_default": node.input_default,
"metadata": node.metadata,
}
)
links = []
for node in graph.nodes:
for link in node.output_links:
links.append(
{
"id": link.id,
"source_id": link.source_id,
"sink_id": link.sink_id,
"source_name": link.source_name,
"sink_name": link.sink_name,
"is_static": link.is_static,
}
)
return {
"id": graph.id,
"name": graph.name,
"description": graph.description,
"version": graph.version,
"is_active": graph.is_active,
"nodes": nodes,
"links": links,
}
async def generate_agent_patch(
update_request: str, current_agent: dict[str, Any]
) -> dict[str, Any] | None:
"""Generate a patch to update an existing agent.
Args:
update_request: Natural language description of changes
current_agent: Current agent JSON
Returns:
Patch dict or clarifying questions, or None on error
"""
client = get_client()
prompt = PATCH_PROMPT.format(
current_agent=json.dumps(current_agent, indent=2),
block_summaries=get_block_summaries(),
)
try:
response = await client.chat.completions.create(
model=AGENT_GENERATOR_MODEL,
messages=[
{"role": "system", "content": prompt},
{"role": "user", "content": update_request},
],
temperature=0,
)
content = response.choices[0].message.content
if content is None:
logger.error("LLM returned empty content for patch generation")
return None
return parse_json_from_llm(content)
except Exception as e:
logger.error(f"Error generating patch: {e}")
return None
def apply_agent_patch(
current_agent: dict[str, Any], patch: dict[str, Any]
) -> dict[str, Any]:
"""Apply a patch to an existing agent.
Args:
current_agent: Current agent JSON
patch: Patch dict with operations
Returns:
Updated agent JSON
"""
agent = copy.deepcopy(current_agent)
patches = patch.get("patches", [])
for p in patches:
patch_type = p.get("type")
if patch_type == "modify":
node_id = p.get("node_id")
changes = p.get("changes", {})
for node in agent.get("nodes", []):
if node["id"] == node_id:
_deep_update(node, changes)
logger.debug(f"Modified node {node_id}")
break
elif patch_type == "add":
new_nodes = p.get("new_nodes", [])
new_links = p.get("new_links", [])
agent["nodes"] = agent.get("nodes", []) + new_nodes
agent["links"] = agent.get("links", []) + new_links
logger.debug(f"Added {len(new_nodes)} nodes, {len(new_links)} links")
elif patch_type == "remove":
node_ids_to_remove = set(p.get("node_ids", []))
link_ids_to_remove = set(p.get("link_ids", []))
# Remove nodes
agent["nodes"] = [
n for n in agent.get("nodes", []) if n["id"] not in node_ids_to_remove
]
# Remove links (both explicit and those referencing removed nodes)
agent["links"] = [
link
for link in agent.get("links", [])
if link["id"] not in link_ids_to_remove
and link["source_id"] not in node_ids_to_remove
and link["sink_id"] not in node_ids_to_remove
]
logger.debug(
f"Removed {len(node_ids_to_remove)} nodes, {len(link_ids_to_remove)} links"
)
return agent
def _deep_update(target: dict, source: dict) -> None:
"""Recursively update a dict with another dict."""
for key, value in source.items():
if key in target and isinstance(target[key], dict) and isinstance(value, dict):
_deep_update(target[key], value)
else:
target[key] = value

Some files were not shown because too many files have changed in this diff Show More