mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-10 23:58:06 -05:00
Merge branch 'dev' into bently/open-2299-name-of-downloaded-agent-in-library-is-different-from-name
This commit is contained in:
2
.github/workflows/claude-dependabot.yml
vendored
2
.github/workflows/claude-dependabot.yml
vendored
@@ -80,7 +80,7 @@ jobs:
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
node-version: "22"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
8
.github/workflows/claude.yml
vendored
8
.github/workflows/claude.yml
vendored
@@ -44,6 +44,12 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@v1.3.1
|
||||
with:
|
||||
large-packages: false # slow
|
||||
docker-images: false # limited benefit
|
||||
|
||||
# Backend Python/Poetry setup (mirrors platform-backend-ci.yml)
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
@@ -90,7 +96,7 @@ jobs:
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
node-version: "22"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
4
.github/workflows/copilot-setup-steps.yml
vendored
4
.github/workflows/copilot-setup-steps.yml
vendored
@@ -78,7 +78,7 @@ jobs:
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
node-version: "22"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
@@ -299,4 +299,4 @@ jobs:
|
||||
echo "✅ AutoGPT Platform development environment setup complete!"
|
||||
echo "🚀 Ready for development with Docker services running"
|
||||
echo "📝 Backend server: poetry run serve (port 8000)"
|
||||
echo "🌐 Frontend server: pnpm dev (port 3000)"
|
||||
echo "🌐 Frontend server: pnpm dev (port 3000)"
|
||||
|
||||
4
.github/workflows/platform-frontend-ci.yml
vendored
4
.github/workflows/platform-frontend-ci.yml
vendored
@@ -12,6 +12,10 @@ on:
|
||||
- "autogpt_platform/frontend/**"
|
||||
merge_group:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name == 'merge_group' && format('merge-queue-{0}', github.ref) || format('{0}-{1}', github.ref, github.event.pull_request.number || github.sha) }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
4
.github/workflows/platform-fullstack-ci.yml
vendored
4
.github/workflows/platform-fullstack-ci.yml
vendored
@@ -12,6 +12,10 @@ on:
|
||||
- "autogpt_platform/**"
|
||||
merge_group:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name == 'merge_group' && format('merge-queue-{0}', github.ref) || github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
2
.github/workflows/repo-pr-label.yml
vendored
2
.github/workflows/repo-pr-label.yml
vendored
@@ -61,6 +61,6 @@ jobs:
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/labeler@v5
|
||||
- uses: actions/labeler@v6
|
||||
with:
|
||||
sync-labels: true
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.PHONY: start-core stop-core logs-core format lint migrate run-backend run-frontend
|
||||
.PHONY: start-core stop-core logs-core format lint migrate run-backend run-frontend load-store-agents
|
||||
|
||||
# Run just Supabase + Redis + RabbitMQ
|
||||
start-core:
|
||||
@@ -42,7 +42,10 @@ run-frontend:
|
||||
|
||||
test-data:
|
||||
cd backend && poetry run python test/test_data_creator.py
|
||||
|
||||
|
||||
load-store-agents:
|
||||
cd backend && poetry run load-store-agents
|
||||
|
||||
help:
|
||||
@echo "Usage: make <target>"
|
||||
@echo "Targets:"
|
||||
@@ -54,4 +57,5 @@ help:
|
||||
@echo " migrate - Run backend database migrations"
|
||||
@echo " run-backend - Run the backend FastAPI server"
|
||||
@echo " run-frontend - Run the frontend Next.js development server"
|
||||
@echo " test-data - Run the test data creator"
|
||||
@echo " test-data - Run the test data creator"
|
||||
@echo " load-store-agents - Load store agents from agents/ folder into test database"
|
||||
@@ -134,13 +134,6 @@ POSTMARK_WEBHOOK_TOKEN=
|
||||
# Error Tracking
|
||||
SENTRY_DSN=
|
||||
|
||||
# Cloudflare Turnstile (CAPTCHA) Configuration
|
||||
# Get these from the Cloudflare Turnstile dashboard: https://dash.cloudflare.com/?to=/:account/turnstile
|
||||
# This is the backend secret key
|
||||
TURNSTILE_SECRET_KEY=
|
||||
# This is the verify URL
|
||||
TURNSTILE_VERIFY_URL=https://challenges.cloudflare.com/turnstile/v0/siteverify
|
||||
|
||||
# Feature Flags
|
||||
LAUNCH_DARKLY_SDK_KEY=
|
||||
|
||||
|
||||
242
autogpt_platform/backend/agents/StoreAgent_rows.csv
Normal file
242
autogpt_platform/backend/agents/StoreAgent_rows.csv
Normal file
@@ -0,0 +1,242 @@
|
||||
listing_id,storeListingVersionId,slug,agent_name,agent_video,agent_image,featured,sub_heading,description,categories,useForOnboarding,is_available
|
||||
6e60a900-9d7d-490e-9af2-a194827ed632,d85882b8-633f-44ce-a315-c20a8c123d19,flux-ai-image-generator,Flux AI Image Generator,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/ca154dd1-140e-454c-91bd-2d8a00de3f08.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/577d995d-bc38-40a9-a23f-1f30f5774bdb.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/415db1b7-115c-43ab-bd6c-4e9f7ef95be1.jpg""]",false,Transform ideas into breathtaking images,"Transform ideas into breathtaking images with this AI-powered Image Generator. Using cutting-edge Flux AI technology, the tool crafts highly detailed, photorealistic visuals from simple text prompts. Perfect for artists, marketers, and content creators, this generator produces unique images tailored to user specifications. From fantastical scenes to lifelike portraits, users can unleash creativity with professional-quality results in seconds. Easy to use and endlessly versatile, bring imagination to life with the AI Image Generator today!","[""creative""]",false,true
|
||||
f11fc6e9-6166-4676-ac5d-f07127b270c1,c775f60d-b99f-418b-8fe0-53172258c3ce,youtube-transcription-scraper,YouTube Transcription Scraper,https://youtu.be/H8S3pU68lGE,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/65bce54b-0124-4b0d-9e3e-f9b89d0dc99e.jpg""]",false,Fetch the transcriptions from the most popular YouTube videos in your chosen topic,"Effortlessly gather transcriptions from multiple YouTube videos with this agent. It scrapes and compiles video transcripts into a clean, organized list, making it easy to extract insights, quotes, or content from various sources in one go. Ideal for researchers, content creators, and marketers looking to quickly analyze or repurpose video content.","[""writing""]",false,true
|
||||
17908889-b599-4010-8e4f-bed19b8f3446,6e16e65a-ad34-4108-b4fd-4a23fced5ea2,business-ownerceo-finder,Decision Maker Lead Finder,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/1020d94e-b6a2-4fa7-bbdf-2c218b0de563.jpg""]",false,Contact CEOs today,"Find the key decision-makers you need, fast.
|
||||
|
||||
This agent identifies business owners or CEOs of local companies in any area you choose. Simply enter what kind of businesses you’re looking for and where, and it will:
|
||||
|
||||
* Search the area and gather public information
|
||||
* Return names, roles, and contact details when available
|
||||
* Provide smart Google search suggestions if details aren’t found
|
||||
|
||||
Perfect for:
|
||||
|
||||
* B2B sales teams seeking verified leads
|
||||
* Recruiters sourcing local talent
|
||||
* Researchers looking to connect with business leaders
|
||||
|
||||
Save hours of manual searching and get straight to the people who matter most.","[""business""]",true,true
|
||||
72beca1d-45ea-4403-a7ce-e2af168ee428,415b7352-0dc6-4214-9d87-0ad3751b711d,smart-meeting-brief,Smart Meeting Prep,https://youtu.be/9ydZR2hkxaY,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/2f116ce1-63ae-4d39-a5cd-f514defc2b97.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/0a71a60a-2263-4f12-9836-9c76ab49f155.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/95327695-9184-403c-907a-a9d3bdafa6a5.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/2bc77788-790b-47d4-8a61-ce97b695e9f5.png""]",true,Business meeting briefings delivered daily,"Never walk into a meeting unprepared again. Every day at 4 pm, the Smart Meeting Prep Agent scans your calendar for tomorrow's external meetings. It reviews your past email exchanges, researches each participant's background and role, and compiles the insights into a concise briefing, so you can close your workday ready for tomorrow's calls.
|
||||
|
||||
How It Works
|
||||
1. At 4 pm, the agent scans your calendar and identifies external meetings scheduled for the next day.
|
||||
2. It reviews recent email threads with each participant to surface key relationship history and communication context.
|
||||
3. It conducts online research to gather publicly available information on roles, company backgrounds, and relevant professional data.
|
||||
4. It produces a unified briefing for each participant, including past exchange highlights, profile notes, and strategic conversation points.","[""personal""]",true,true
|
||||
9fa5697a-617b-4fae-aea0-7dbbed279976,b8ceb480-a7a2-4c90-8513-181a49f7071f,automated-support-ai,Automated Support Agent,https://youtu.be/nBMfu_5sgDA,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/ed56febc-2205-4179-9e7e-505d8500b66c.png""]",true,Automate up to 80 percent of inbound support emails,"Overview:
|
||||
Support teams spend countless hours on basic tickets. This agent automates repetitive customer support tasks. It reads incoming requests, researches your knowledge base, and responds automatically when confident. When unsure, it escalates to a human for final resolution.
|
||||
|
||||
How it Works:
|
||||
New support emails are routed to the agent.
|
||||
The agent checks internal documentation for answers.
|
||||
It measures confidence in the answer found and either replies directly or escalates to a human.
|
||||
|
||||
Business Value:
|
||||
Automating the easy 80 percent of support tickets allows your team to focus on high-value, complex customer issues, improving efficiency and response times.","[""business""]",false,true
|
||||
2bdac92b-a12c-4131-bb46-0e3b89f61413,31daf49d-31d3-476b-aa4c-099abc59b458,unspirational-poster-maker,Unspirational Poster Maker,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/6a490dac-27e5-405f-a4c4-8d1c55b85060.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/d343fbb5-478c-4e38-94df-4337293b61f1.jpg""]",false,Because adulting is hard,"This witty AI agent generates hilariously relatable ""motivational"" posters that tackle the everyday struggles of procrastination, overthinking, and workplace chaos with a blend of absurdity and sarcasm. From goldfish facing impossible tasks to cats in existential crises, The Unspirational Poster Maker designs tongue-in-cheek graphics and captions that mock productivity clichés and embrace our collective struggles to ""get it together."" Perfect for adding a touch of humour to the workday, these posters remind us that sometimes, all we can do is laugh at the chaos.","[""creative""]",false,true
|
||||
9adf005e-2854-4cc7-98cf-f7103b92a7b7,a03b0d8c-4751-43d6-a54e-c3b7856ba4e3,ai-shortform-video-generator-create-viral-ready-content,AI Video Generator,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/8d2670b9-fea5-4966-a597-0a4511bffdc3.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/aabe8aec-0110-4ce7-a259-4f86fe8fe07d.png""]",false,Create Viral-Ready Shorts Content in Seconds,"OVERVIEW
|
||||
Transform any trending headline or broad topic into a polished, vertical short-form video in a single run.
|
||||
The agent automates research, scriptwriting, metadata creation, and Revid.ai rendering, returning one ready-to-publish MP4 plus its title, script and hashtags.
|
||||
|
||||
HOW IT WORKS
|
||||
1. Input a topic or an exact news headline.
|
||||
2. The agent fetches live search results and selects the most engaging related story.
|
||||
3. Key facts are summarised into concise research notes.
|
||||
4. Claude writes a 30–35 second script with visual cues, a three-second hook, tension loops, and a call-to-action.
|
||||
5. GPT-4o generates an eye-catching title and one or two discoverability hashtags.
|
||||
6. The script is sent to a state-of-the-art AI video generator to render a single 9:16 MP4 (default: 720 p, 30 fps, voice “Brian”, style “movingImage”, music “Bladerunner 2049”).
|
||||
– All voice, style and resolution settings can be adjusted in the Builder before you press ""Run"".
|
||||
7. Output delivered: Title, Script, Hashtags, Video URL.
|
||||
|
||||
KEY USE CASES
|
||||
- Broad-topic explainers (e.g. “Artificial Intelligence” or “Climate Tech”).
|
||||
- Real-time newsjacking with a specific breaking headline.
|
||||
- Product-launch spotlights and quick event recaps while interest is high.
|
||||
|
||||
BUSINESS VALUE
|
||||
- One-click speed: from idea to finished video in minutes.
|
||||
- Consistent brand look: Revid presets keep voice, style and aspect ratio on spec.
|
||||
- No-code workflow: marketers create social video without design or development queues.
|
||||
- Cloud convenience: Auto-GPT Cloud users are pre-configured with all required keys.
|
||||
Self-hosted users simply add OpenAI, Anthropic, Perplexity (OpenRouter/Jina) and Revid keys once.
|
||||
|
||||
IMPORTANT NOTES
|
||||
- The agent outputs exactly one video per execution. Run it again for additional shorts.
|
||||
- Video rendering time varies; AI-generated footage may take several minutes.","[""writing""]",false,true
|
||||
864e48ef-fee5-42c1-b6a4-2ae139db9fc1,55d40473-0f31-4ada-9e40-d3a7139fcbd4,automated-blog-writer,Automated SEO Blog Writer,https://youtu.be/nKcDCbDVobs,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/2dd5f95b-5b30-4bf8-a11b-bac776c5141a.jpg""]",true,"Automate research, writing, and publishing for high-ranking blog posts","Scale your blog with a fully automated content engine. The Automated SEO Blog Writer learns your brand voice, finds high-demand keywords, and creates SEO-optimized articles that attract organic traffic and boost visibility.
|
||||
|
||||
How it works:
|
||||
|
||||
1. Share your pitch, website, and values.
|
||||
2. The agent studies your site and uncovers proven SEO opportunities.
|
||||
3. It spends two hours researching and drafting each post.
|
||||
4. You set the cadence—publishing runs on autopilot.
|
||||
|
||||
Business value: Consistently publish research-backed, optimized posts that build domain authority, rankings, and thought leadership while you focus on what matters most.
|
||||
|
||||
Use cases:
|
||||
• Founders: Keep your blog active with no time drain.
|
||||
• Agencies: Deliver scalable SEO content for clients.
|
||||
• Strategists: Automate execution, focus on strategy.
|
||||
• Marketers: Drive steady organic growth.
|
||||
• Local businesses: Capture nearby search traffic.","[""writing""]",false,true
|
||||
6046f42e-eb84-406f-bae0-8e052064a4fa,a548e507-09a7-4b30-909c-f63fcda10fff,lead-finder-local-businesses,Lead Finder,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/abd6605f-d5f8-426b-af36-052e8ba5044f.webp""]",false,Auto-Prospect Like a Pro,"Turbo-charge your local lead generation with the AutoGPT Marketplace’s top Google Maps prospecting agent. “Lead Finder: Local Businesses” delivers verified, ready-to-contact prospects in any niche and city—so you can focus on closing, not searching.
|
||||
|
||||
**WHAT IT DOES**
|
||||
• Searches Google Maps via the official API (no scraping)
|
||||
• Prompts like “dentists in Chicago” or “coffee shops near me”
|
||||
• Returns: Name, Website, Rating, Reviews, **Phone & Address**
|
||||
• Exports instantly to your CRM, sheet, or outreach workflow
|
||||
|
||||
**WHY YOU’LL LOVE IT**
|
||||
✓ Hyper-targeted leads in minutes
|
||||
✓ Unlimited searches & locations
|
||||
✓ Zero CAPTCHAs or IP blocks
|
||||
✓ Works on AutoGPT Cloud or self-hosted (with your API key)
|
||||
✓ Cut prospecting time by 90%
|
||||
|
||||
**PERFECT FOR**
|
||||
— Marketers & PPC agencies
|
||||
— SEO consultants & designers
|
||||
— SaaS founders & sales teams
|
||||
|
||||
Stop scrolling directories—start filling your pipeline. Start now and let AI prospect while you profit.
|
||||
|
||||
→ Click *Add to Library* and own your market today.","[""business""]",true,true
|
||||
f623c862-24e9-44fc-8ce8-d8282bb51ad2,eafa21d3-bf14-4f63-a97f-a5ee41df83b3,linkedin-post-generator,LinkedIn Post Generator,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/297f6a8e-81a8-43e2-b106-c7ad4a5662df.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/fceebdc1-aef6-4000-97fc-4ef587f56bda.png""]",false,Auto‑craft LinkedIn gold,"Create research‑driven, high‑impact LinkedIn posts in minutes. This agent searches YouTube for the best videos on your chosen topic, pulls their transcripts, and distils the most valuable insights into a polished post ready for your company page or personal feed.
|
||||
|
||||
FEATURES
|
||||
• Automated YouTube research – discovers and analyses top‑ranked videos so you don’t have to
|
||||
• AI‑curated synthesis – combines multiple transcripts into one authoritative narrative
|
||||
• Full creative control – adjust style, tone, objective, opinion, clarity, target word count and number of videos
|
||||
• LinkedIn‑optimised output – hook, 2‑3 key points, CTA, strategic line breaks, 3‑5 hashtags, no markdown
|
||||
• One‑click publish – returns a ready‑to‑post text block (≤1 300 characters)
|
||||
|
||||
HOW IT WORKS
|
||||
1. Enter a topic and your preferred writing parameters.
|
||||
2. The agent builds a YouTube search, fetches the page, and extracts the top N video URLs.
|
||||
3. It pulls each transcript, then feeds them—plus your settings—into Claude 3.5 Sonnet.
|
||||
4. The model writes a concise, engaging post designed for maximum LinkedIn engagement.
|
||||
|
||||
USE CASES
|
||||
• Thought‑leadership updates backed by fresh video research
|
||||
• Rapid industry summaries after major events, webinars, or conferences
|
||||
• Consistent LinkedIn content for busy founders, marketers, and creators
|
||||
|
||||
WHY YOU’LL LOVE IT
|
||||
Save hours of manual research, avoid surface‑level hot‑takes, and publish posts that showcase real expertise—without the heavy lift.","[""writing""]",true,true
|
||||
7d4120ad-b6b3-4419-8bdb-7dd7d350ef32,e7bb29a1-23c7-4fee-aa3b-5426174b8c52,youtube-to-linkedin-post-converter,YouTube to LinkedIn Post Converter,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/f084b326-a708-4396-be51-7ba59ad2ef32.png""]",false,Transform Your YouTube Videos into Engaging LinkedIn Posts with AI,"WHAT IT DOES:
|
||||
This agent converts YouTube video content into a LinkedIn post by analyzing the video's transcript. It provides you with a tailored post that reflects the core ideas, key takeaways, and tone of the original video, optimizing it for engagement on LinkedIn.
|
||||
|
||||
HOW IT WORKS:
|
||||
- You provide the URL to the YouTube video (required)
|
||||
- You can choose the structure for the LinkedIn post (e.g., Personal Achievement Story, Lesson Learned, Thought Leadership, etc.)
|
||||
- You can also select the tone (e.g., Inspirational, Analytical, Conversational, etc.)
|
||||
- The transcript of the video is analyzed by the GPT-4 model and the Claude 3.5 Sonnet model
|
||||
- The models extract key insights, memorable quotes, and the main points from the video
|
||||
- You’ll receive a LinkedIn post, formatted according to your chosen structure and tone, optimized for professional engagement
|
||||
|
||||
INPUTS:
|
||||
- Source YouTube Video – Provide the URL to the YouTube video
|
||||
- Structure – Choose the post format (e.g., Personal Achievement Story, Thought Leadership, etc.)
|
||||
- Content – Specify the main message or idea of the post (e.g., Hot Take, Key Takeaways, etc.)
|
||||
- Tone – Select the tone for the post (e.g., Conversational, Inspirational, etc.)
|
||||
|
||||
OUTPUT:
|
||||
- LinkedIn Post – A well-crafted, AI-generated LinkedIn post with a professional tone, based on the video content and your specified preferences
|
||||
|
||||
Perfect for content creators, marketers, and professionals who want to repurpose YouTube videos for LinkedIn and boost their professional branding.","[""writing""]",false,true
|
||||
c61d6a83-ea48-4df8-b447-3da2d9fe5814,00fdd42c-a14c-4d19-a567-65374ea0e87f,personalized-morning-coffee-newsletter,Personal Newsletter,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/f4b38e4c-8166-4caf-9411-96c9c4c82d4c.png""]",false,Start your day with personalized AI newsletters that deliver credibility and context for every interest or mood.,"This Personal Newsletter Agent provides a bespoke daily digest on your favorite topics and tone. Whether you prefer industry insights, lighthearted reads, or breaking news, this agent crafts your own unique newsletter to keep you informed and entertained.
|
||||
|
||||
|
||||
How It Works
|
||||
1. Enter your favorite topics, industries, or areas of interest.
|
||||
2. Choose your tone—professional, casual, or humorous.
|
||||
3. Set your preferred delivery cadence: daily or weekly.
|
||||
4. The agent scans top sources and compiles 3–5 engaging stories, insights, and fun facts into a conversational newsletter.
|
||||
|
||||
Skip the morning scroll and enjoy a thoughtfully curated newsletter designed just for you. Stay ahead of trends, spark creative ideas, and enjoy an effortless, informed start to your day.
|
||||
|
||||
|
||||
Use Cases
|
||||
• Executives: Get a daily digest of market updates and leadership insights.
|
||||
• Marketers: Receive curated creative trends and campaign inspiration.
|
||||
• Entrepreneurs: Stay updated on your industry without information overload.","[""research""]",true,true
|
||||
e2e49cfc-4a39-4d62-a6b3-c095f6d025ff,fc2c9976-0962-4625-a27b-d316573a9e7f,email-address-finder,Email Scout - Contact Finder Assistant,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/da8a690a-7a8b-4c1d-b6f8-e2f840c0205d.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/6a2ac25c-1609-4881-8140-e6da2421afb3.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/26179263-fe06-45bd-b6a0-0754660a0a46.jpg""]",false,Find contact details from name and location using AI search,"Finding someone's professional email address can be time-consuming and frustrating. Manual searching across multiple websites, social profiles, and business directories often leads to dead ends or outdated information.
|
||||
|
||||
Email Scout automates this process by intelligently searching across publicly available sources when you provide a person's name and location. Simply input basic information like ""Tim Cook, USA"" or ""Sarah Smith, London"" and let the AI assistant do the work of finding potential contact details.
|
||||
|
||||
Key Features:
|
||||
- Quick search from just name and location
|
||||
- Scans multiple public sources
|
||||
- Automated AI-powered search process
|
||||
- Easy to use with simple inputs
|
||||
|
||||
Perfect for recruiters, business development professionals, researchers, and anyone needing to establish professional contact.
|
||||
|
||||
Note: This tool searches only publicly available information. Search results depend on what contact information people have made public. Some searches may not yield results if the information isn't publicly accessible.","[""""]",false,true
|
||||
81bcc372-0922-4a36-bc35-f7b1e51d6939,e437cc95-e671-489d-b915-76561fba8c7f,ai-youtube-to-blog-converter,YouTube Video to SEO Blog Writer,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/239e5a41-2515-4e1c-96ef-31d0d37ecbeb.webp"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/c7d96966-786f-4be6-ad7d-3a51c84efc0e.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/0275a74c-e2c2-4e29-a6e4-3a616c3c35dd.png""]",false,One link. One click. One powerful blog post.,"Effortlessly transform your YouTube videos into high-quality, SEO-optimized blog posts.
|
||||
|
||||
Your videos deserve a second life—in writing.
|
||||
Make your content work twice as hard by repurposing it into engaging, searchable articles.
|
||||
|
||||
Perfect for content creators, marketers, and bloggers, this tool analyzes video content and generates well-structured blog posts tailored to your tone, audience, and word count. Just paste a YouTube URL and let the AI handle the rest.
|
||||
|
||||
FEATURES
|
||||
|
||||
• CONTENT ANALYSIS
|
||||
Extracts key points from the video while preserving your message and intent.
|
||||
|
||||
• CUSTOMIZABLE OUTPUT
|
||||
Select a tone that fits your audience: casual, professional, educational, or formal.
|
||||
|
||||
• SEO OPTIMIZATION
|
||||
Automatically creates engaging titles and structured subheadings for better search visibility.
|
||||
|
||||
• USER-FRIENDLY
|
||||
Repurpose your videos into written content to expand your reach and improve accessibility.
|
||||
|
||||
Whether you're looking to grow your blog, boost SEO, or simply get more out of your content, the AI YouTube-to-Blog Converter makes it effortless.
|
||||
","[""writing""]",true,true
|
||||
5c3510d2-fc8b-4053-8e19-67f53c86eb1a,f2cc74bb-f43f-4395-9c35-ecb30b5b4fc9,ai-webpage-copy-improver,AI Webpage Copy Improver,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/d562d26f-5891-4b09-8859-fbb205972313.jpg""]",false,Boost Your Website's Search Engine Performance,"Elevate your web content with this powerful AI Webpage Copy Improver. Designed for marketers, SEO specialists, and web developers, this tool analyses and enhances website copy for maximum impact. Using advanced language models, it optimizes text for better clarity, SEO performance, and increased conversion rates. The AI examines your existing content, identifies areas for improvement, and generates refined copy that maintains your brand voice while boosting engagement. From homepage headlines to product descriptions, transform your web presence with AI-driven insights. Improve readability, incorporate targeted keywords, and craft compelling calls-to-action - all with the click of a button. Take your digital marketing to the next level with the AI Webpage Copy Improver.","[""marketing""]",true,true
|
||||
94d03bd3-7d44-4d47-b60c-edb2f89508d6,b6f6f0d3-49f4-4e3b-8155-ffe9141b32c0,domain-name-finder,Domain Name Finder,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/28545e09-b2b8-4916-b4c6-67f982510a78.jpeg""]",false,Instantly generate brand-ready domain names that are actually available,"Overview:
|
||||
Finding a domain name that fits your brand shouldn’t take hours of searching and failed checks. The Domain Name Finder Agent turns your pitch into hundreds of creative, brand-ready domain ideas—filtered by live availability so every result is actionable.
|
||||
|
||||
How It Works
|
||||
1. Input your product pitch, company name, or core keywords.
|
||||
2. The agent analyzes brand tone, audience, and industry context.
|
||||
3. It generates a list of unique, memorable domains that match your criteria.
|
||||
4. All names are pre-filtered for real-time availability, so you can register immediately.
|
||||
|
||||
|
||||
Business Value
|
||||
Save hours of guesswork and eliminate dead ends. Accelerate brand launches, startup naming, and campaign creation with ready-to-claim domains.
|
||||
|
||||
|
||||
Key Use Cases
|
||||
• Startup Founders: Quickly find brand-ready domains for MVP launches or rebrands.
|
||||
• Marketers: Test name options across campaigns with instant availability data.
|
||||
• Entrepreneurs: Validate ideas faster with instant domain options.","[""business""]",false,true
|
||||
7a831906-daab-426f-9d66-bcf98d869426,516d813b-d1bc-470f-add7-c63a4b2c2bad,ai-function,AI Function,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/620e8117-2ee1-4384-89e6-c2ef4ec3d9c9.webp"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/476259e2-5a79-4a7b-8e70-deeebfca70d7.png""]",false,Never Code Again,"AI FUNCTION MAGIC
|
||||
Your AI‑powered assistant for turning plain‑English descriptions into working Python functions.
|
||||
|
||||
HOW IT WORKS
|
||||
1. Describe what the function should do.
|
||||
2. Specify the inputs it needs.
|
||||
3. Receive the generated Python code.
|
||||
|
||||
FEATURES
|
||||
- Effortless Function Generation: convert natural‑language specs into complete functions.
|
||||
- Customizable Inputs: define the parameters that matter to you.
|
||||
- Versatile Use Cases: simulate data, automate tasks, prototype ideas.
|
||||
- Seamless Integration: add the generated function directly to your codebase.
|
||||
|
||||
EXAMPLE
|
||||
Request: “Create a function that generates 20 examples of fake people, each with a name, date of birth, job title, and age.”
|
||||
Input parameter: number_of_people (default 20)
|
||||
Result: a list of dictionaries such as
|
||||
[
|
||||
{ ""name"": ""Emma Martinez"", ""date_of_birth"": ""1992‑11‑03"", ""job_title"": ""Data Analyst"", ""age"": 32 },
|
||||
{ ""name"": ""Liam O’Connor"", ""date_of_birth"": ""1985‑07‑19"", ""job_title"": ""Marketing Manager"", ""age"": 39 },
|
||||
…18 more entries…
|
||||
]","[""development""]",false,true
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,590 @@
|
||||
{
|
||||
"id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
|
||||
"version": 29,
|
||||
"is_active": false,
|
||||
"name": "Unspirational Poster Maker",
|
||||
"description": "This witty AI agent generates hilariously relatable \"motivational\" posters that tackle the everyday struggles of procrastination, overthinking, and workplace chaos with a blend of absurdity and sarcasm. From goldfish facing impossible tasks to cats in existential crises, The Unspirational Poster Maker designs tongue-in-cheek graphics and captions that mock productivity clich\u00e9s and embrace our collective struggles to \"get it together.\" Perfect for adding a touch of humour to the workday, these posters remind us that sometimes, all we can do is laugh at the chaos.",
|
||||
"instructions": null,
|
||||
"recommended_schedule_cron": null,
|
||||
"nodes": [
|
||||
{
|
||||
"id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
"input_default": {
|
||||
"name": "Generated Image",
|
||||
"description": "The resulting generated image ready for you to review and post."
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 2329.937006807125,
|
||||
"y": 80.49068076698347
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "c6c511e8-e6a4-4969-9bc8-f67d60c1e229",
|
||||
"source_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "20845dda-91de-4508-8077-0504b1a5ae03",
|
||||
"source_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "6524c611-774b-45e9-899d-9a6aa80c549c",
|
||||
"source_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "714a0821-e5ba-4af7-9432-50491adda7b1",
|
||||
"source_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [],
|
||||
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "7e026d19-f9a6-412f-8082-610f9ba0c410",
|
||||
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
"input_default": {
|
||||
"name": "Theme",
|
||||
"value": "Cooking"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": -1219.5966324967521,
|
||||
"y": 80.50339731789956
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "8c2bd1f7-b17b-4835-81b6-bb336097aa7a",
|
||||
"source_id": "7e026d19-f9a6-412f-8082-610f9ba0c410",
|
||||
"sink_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_THEME",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "28bda769-b88b-44c9-be5c-52c2667f137e",
|
||||
"block_id": "6ab085e2-20b3-4055-bc3e-08036e01eca6",
|
||||
"input_default": {
|
||||
"upscale": "No Upscale"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1132.373897280427,
|
||||
"y": 88.44610377514573
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "54588c74-e090-4e49-89e4-844b9952a585",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "20845dda-91de-4508-8077-0504b1a5ae03",
|
||||
"source_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
|
||||
"block_id": "6ab085e2-20b3-4055-bc3e-08036e01eca6",
|
||||
"input_default": {
|
||||
"upscale": "No Upscale"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 590.7543882245375,
|
||||
"y": 85.69546832466654
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "66646786-3006-4417-a6b7-0158f2603d1d",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "6524c611-774b-45e9-899d-9a6aa80c549c",
|
||||
"source_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "576c5677-9050-4d1c-aad4-36b820c04fef",
|
||||
"block_id": "6ab085e2-20b3-4055-bc3e-08036e01eca6",
|
||||
"input_default": {
|
||||
"upscale": "No Upscale"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 60.48904654237981,
|
||||
"y": 86.06183359510214
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "201d3e03-bc06-4cee-846d-4c3c804d8857",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "714a0821-e5ba-4af7-9432-50491adda7b1",
|
||||
"source_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
|
||||
"block_id": "6ab085e2-20b3-4055-bc3e-08036e01eca6",
|
||||
"input_default": {
|
||||
"prompt": "A cat sprawled dramatically across an important-looking document during a work-from-home meeting, making direct eye contact with the camera while knocking over a coffee mug in slow motion. Text Overlay: \"Chaos is a career path. Be the obstacle everyone has to work around.\"",
|
||||
"upscale": "No Upscale"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1668.3572666956795,
|
||||
"y": 89.69665262457966
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "509b7587-1940-4a06-808d-edde9a74f400",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "c6c511e8-e6a4-4969-9bc8-f67d60c1e229",
|
||||
"source_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
|
||||
"input_default": {
|
||||
"model": "gpt-4o",
|
||||
"prompt": "<example_output>\nA photo of a sloth lounging on a desk, with its head resting on a keyboard. The keyboard is on top of a laptop with a blank spreadsheet open. A to-do list is placed beside the laptop, with the top item written as \"Do literally anything\". There is a text overlay that says \"If you can't outwork them, outnap them.\".\n</example_output>\n\nCreate a relatable satirical, snarky, user-deprecating motivational style image based on the theme: \"{{THEME}}\".\n\nOutput only the image description and caption, without any additional commentary or formatting.",
|
||||
"prompt_values": {}
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": -561.1139207164056,
|
||||
"y": 78.60434452403524
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "8c2bd1f7-b17b-4835-81b6-bb336097aa7a",
|
||||
"source_id": "7e026d19-f9a6-412f-8082-610f9ba0c410",
|
||||
"sink_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_THEME",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "54588c74-e090-4e49-89e4-844b9952a585",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "201d3e03-bc06-4cee-846d-4c3c804d8857",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "509b7587-1940-4a06-808d-edde9a74f400",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "66646786-3006-4417-a6b7-0158f2603d1d",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
{
|
||||
"id": "66646786-3006-4417-a6b7-0158f2603d1d",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "c6c511e8-e6a4-4969-9bc8-f67d60c1e229",
|
||||
"source_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "6524c611-774b-45e9-899d-9a6aa80c549c",
|
||||
"source_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "20845dda-91de-4508-8077-0504b1a5ae03",
|
||||
"source_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "8c2bd1f7-b17b-4835-81b6-bb336097aa7a",
|
||||
"source_id": "7e026d19-f9a6-412f-8082-610f9ba0c410",
|
||||
"sink_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_THEME",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "201d3e03-bc06-4cee-846d-4c3c804d8857",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "714a0821-e5ba-4af7-9432-50491adda7b1",
|
||||
"source_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "54588c74-e090-4e49-89e4-844b9952a585",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "509b7587-1940-4a06-808d-edde9a74f400",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"forked_from_id": null,
|
||||
"forked_from_version": null,
|
||||
"sub_graphs": [],
|
||||
"user_id": "",
|
||||
"created_at": "2024-12-20T19:58:34.390Z",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Theme": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Theme",
|
||||
"default": "Cooking"
|
||||
}
|
||||
},
|
||||
"required": []
|
||||
},
|
||||
"output_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Generated Image": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Generated Image",
|
||||
"description": "The resulting generated image ready for you to review and post."
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"Generated Image"
|
||||
]
|
||||
},
|
||||
"has_external_trigger": false,
|
||||
"has_human_in_the_loop": false,
|
||||
"trigger_setup_info": null,
|
||||
"credentials_input_schema": {
|
||||
"properties": {
|
||||
"ideogram_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"ideogram"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "ideogram",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.IDEOGRAM: 'ideogram'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator_values": []
|
||||
},
|
||||
"openai_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"openai"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "openai",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.OPENAI: 'openai'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator": "model",
|
||||
"discriminator_mapping": {
|
||||
"Llama-3.3-70B-Instruct": "llama_api",
|
||||
"Llama-3.3-8B-Instruct": "llama_api",
|
||||
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
|
||||
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
|
||||
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
|
||||
"amazon/nova-lite-v1": "open_router",
|
||||
"amazon/nova-micro-v1": "open_router",
|
||||
"amazon/nova-pro-v1": "open_router",
|
||||
"claude-3-7-sonnet-20250219": "anthropic",
|
||||
"claude-3-haiku-20240307": "anthropic",
|
||||
"claude-haiku-4-5-20251001": "anthropic",
|
||||
"claude-opus-4-1-20250805": "anthropic",
|
||||
"claude-opus-4-20250514": "anthropic",
|
||||
"claude-opus-4-5-20251101": "anthropic",
|
||||
"claude-sonnet-4-20250514": "anthropic",
|
||||
"claude-sonnet-4-5-20250929": "anthropic",
|
||||
"cohere/command-r-08-2024": "open_router",
|
||||
"cohere/command-r-plus-08-2024": "open_router",
|
||||
"deepseek/deepseek-chat": "open_router",
|
||||
"deepseek/deepseek-r1-0528": "open_router",
|
||||
"dolphin-mistral:latest": "ollama",
|
||||
"google/gemini-2.0-flash-001": "open_router",
|
||||
"google/gemini-2.0-flash-lite-001": "open_router",
|
||||
"google/gemini-2.5-flash": "open_router",
|
||||
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
|
||||
"google/gemini-2.5-pro-preview-03-25": "open_router",
|
||||
"google/gemini-3-pro-preview": "open_router",
|
||||
"gpt-3.5-turbo": "openai",
|
||||
"gpt-4-turbo": "openai",
|
||||
"gpt-4.1-2025-04-14": "openai",
|
||||
"gpt-4.1-mini-2025-04-14": "openai",
|
||||
"gpt-4o": "openai",
|
||||
"gpt-4o-mini": "openai",
|
||||
"gpt-5-2025-08-07": "openai",
|
||||
"gpt-5-chat-latest": "openai",
|
||||
"gpt-5-mini-2025-08-07": "openai",
|
||||
"gpt-5-nano-2025-08-07": "openai",
|
||||
"gpt-5.1-2025-11-13": "openai",
|
||||
"gryphe/mythomax-l2-13b": "open_router",
|
||||
"llama-3.1-8b-instant": "groq",
|
||||
"llama-3.3-70b-versatile": "groq",
|
||||
"llama3": "ollama",
|
||||
"llama3.1:405b": "ollama",
|
||||
"llama3.2": "ollama",
|
||||
"llama3.3": "ollama",
|
||||
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/llama-4-maverick": "open_router",
|
||||
"meta-llama/llama-4-scout": "open_router",
|
||||
"microsoft/wizardlm-2-8x22b": "open_router",
|
||||
"mistralai/mistral-nemo": "open_router",
|
||||
"moonshotai/kimi-k2": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
|
||||
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
|
||||
"o1": "openai",
|
||||
"o1-mini": "openai",
|
||||
"o3-2025-04-16": "openai",
|
||||
"o3-mini": "openai",
|
||||
"openai/gpt-oss-120b": "open_router",
|
||||
"openai/gpt-oss-20b": "open_router",
|
||||
"perplexity/sonar": "open_router",
|
||||
"perplexity/sonar-deep-research": "open_router",
|
||||
"perplexity/sonar-pro": "open_router",
|
||||
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
|
||||
"qwen/qwen3-coder": "open_router",
|
||||
"v0-1.0-md": "v0",
|
||||
"v0-1.5-lg": "v0",
|
||||
"v0-1.5-md": "v0",
|
||||
"x-ai/grok-4": "open_router",
|
||||
"x-ai/grok-4-fast": "open_router",
|
||||
"x-ai/grok-4.1-fast": "open_router",
|
||||
"x-ai/grok-code-fast-1": "open_router"
|
||||
},
|
||||
"discriminator_values": [
|
||||
"gpt-4o"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"ideogram_api_key_credentials",
|
||||
"openai_api_key_credentials"
|
||||
],
|
||||
"title": "UnspirationalPosterMakerCredentialsInputSchema",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,447 @@
|
||||
{
|
||||
"id": "622849a7-5848-4838-894d-01f8f07e3fad",
|
||||
"version": 18,
|
||||
"is_active": true,
|
||||
"name": "AI Function",
|
||||
"description": "## AI-Powered Function Magic: Never code again!\nProvide a description of a python function and your inputs and AI will provide the results.",
|
||||
"instructions": null,
|
||||
"recommended_schedule_cron": null,
|
||||
"nodes": [
|
||||
{
|
||||
"id": "26ff2973-3f9a-451d-b902-d45e5da0a7fe",
|
||||
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
"input_default": {
|
||||
"name": "return",
|
||||
"title": null,
|
||||
"value": null,
|
||||
"format": "",
|
||||
"secret": false,
|
||||
"advanced": false,
|
||||
"description": "The value returned by the function"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1598.8622921127233,
|
||||
"y": 291.59140862204725
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "caecc1de-fdbc-4fd9-9570-074057bb15f9",
|
||||
"source_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"sink_id": "26ff2973-3f9a-451d-b902-d45e5da0a7fe",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [],
|
||||
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
|
||||
"graph_version": 18,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
|
||||
"input_default": {
|
||||
"model": "o3-mini",
|
||||
"retry": 3,
|
||||
"prompt": "{{ARGS}}",
|
||||
"sys_prompt": "You are now the following python function:\n\n```\n# {{DESCRIPTION}}\n{{FUNCTION}}\n```\n\nThe user will provide your input arguments.\nOnly respond with your `return` value.\nDo not include any commentary or additional text in your response. \nDo not include ``` backticks or any other decorators.",
|
||||
"ollama_host": "localhost:11434",
|
||||
"prompt_values": {}
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 995,
|
||||
"y": 290.50000000000006
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "dc7cb15f-76cc-4533-b96c-dd9e3f7f75ed",
|
||||
"source_id": "4eab3a55-20f2-4c1d-804c-7377ba8202d2",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_FUNCTION",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "093bdca5-9f44-42f9-8e1c-276dd2971675",
|
||||
"source_id": "844530de-2354-46d8-b748-67306b7bbca1",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_ARGS",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "6c63d8ee-b63d-4ff6-bae0-7db8f99bb7af",
|
||||
"source_id": "0fd6ef54-c1cd-478d-b764-17e40f882b99",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_DESCRIPTION",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "caecc1de-fdbc-4fd9-9570-074057bb15f9",
|
||||
"source_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"sink_id": "26ff2973-3f9a-451d-b902-d45e5da0a7fe",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
|
||||
"graph_version": 18,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "4eab3a55-20f2-4c1d-804c-7377ba8202d2",
|
||||
"block_id": "7fcd3bcb-8e1b-4e69-903d-32d3d4a92158",
|
||||
"input_default": {
|
||||
"name": "Function Definition",
|
||||
"title": null,
|
||||
"value": "def fake_people(n: int) -> list[dict]:",
|
||||
"secret": false,
|
||||
"advanced": false,
|
||||
"description": "The function definition (text). This is what you would type on the first line of the function when programming.\n\ne.g \"def fake_people(n: int) -> list[dict]:\"",
|
||||
"placeholder_values": []
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": -672.6908629664215,
|
||||
"y": 302.42044359789116
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "dc7cb15f-76cc-4533-b96c-dd9e3f7f75ed",
|
||||
"source_id": "4eab3a55-20f2-4c1d-804c-7377ba8202d2",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_FUNCTION",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
|
||||
"graph_version": 18,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "844530de-2354-46d8-b748-67306b7bbca1",
|
||||
"block_id": "7fcd3bcb-8e1b-4e69-903d-32d3d4a92158",
|
||||
"input_default": {
|
||||
"name": "Arguments",
|
||||
"title": null,
|
||||
"value": "20",
|
||||
"secret": false,
|
||||
"advanced": false,
|
||||
"description": "The function's inputs\n\ne.g \"20\"",
|
||||
"placeholder_values": []
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": -158.1623599617334,
|
||||
"y": 295.410856928333
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "093bdca5-9f44-42f9-8e1c-276dd2971675",
|
||||
"source_id": "844530de-2354-46d8-b748-67306b7bbca1",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_ARGS",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
|
||||
"graph_version": 18,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "0fd6ef54-c1cd-478d-b764-17e40f882b99",
|
||||
"block_id": "90a56ffb-7024-4b2b-ab50-e26c5e5ab8ba",
|
||||
"input_default": {
|
||||
"name": "Description",
|
||||
"title": null,
|
||||
"value": "Generates n examples of fake data representing people, each with a name, DoB, Job title, and an age.",
|
||||
"secret": false,
|
||||
"advanced": false,
|
||||
"description": "Describe what the function does.\n\ne.g \"Generates n examples of fake data representing people, each with a name, DoB, Job title, and an age.\"",
|
||||
"placeholder_values": []
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 374.4548658057796,
|
||||
"y": 290.3779121974126
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "6c63d8ee-b63d-4ff6-bae0-7db8f99bb7af",
|
||||
"source_id": "0fd6ef54-c1cd-478d-b764-17e40f882b99",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_DESCRIPTION",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
|
||||
"graph_version": 18,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
{
|
||||
"id": "caecc1de-fdbc-4fd9-9570-074057bb15f9",
|
||||
"source_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"sink_id": "26ff2973-3f9a-451d-b902-d45e5da0a7fe",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "6c63d8ee-b63d-4ff6-bae0-7db8f99bb7af",
|
||||
"source_id": "0fd6ef54-c1cd-478d-b764-17e40f882b99",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_DESCRIPTION",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "093bdca5-9f44-42f9-8e1c-276dd2971675",
|
||||
"source_id": "844530de-2354-46d8-b748-67306b7bbca1",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_ARGS",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "dc7cb15f-76cc-4533-b96c-dd9e3f7f75ed",
|
||||
"source_id": "4eab3a55-20f2-4c1d-804c-7377ba8202d2",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_FUNCTION",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"forked_from_id": null,
|
||||
"forked_from_version": null,
|
||||
"sub_graphs": [],
|
||||
"user_id": "",
|
||||
"created_at": "2025-04-19T17:10:48.857Z",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Function Definition": {
|
||||
"advanced": false,
|
||||
"anyOf": [
|
||||
{
|
||||
"format": "short-text",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"secret": false,
|
||||
"title": "Function Definition",
|
||||
"description": "The function definition (text). This is what you would type on the first line of the function when programming.\n\ne.g \"def fake_people(n: int) -> list[dict]:\"",
|
||||
"default": "def fake_people(n: int) -> list[dict]:"
|
||||
},
|
||||
"Arguments": {
|
||||
"advanced": false,
|
||||
"anyOf": [
|
||||
{
|
||||
"format": "short-text",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"secret": false,
|
||||
"title": "Arguments",
|
||||
"description": "The function's inputs\n\ne.g \"20\"",
|
||||
"default": "20"
|
||||
},
|
||||
"Description": {
|
||||
"advanced": false,
|
||||
"anyOf": [
|
||||
{
|
||||
"format": "long-text",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"secret": false,
|
||||
"title": "Description",
|
||||
"description": "Describe what the function does.\n\ne.g \"Generates n examples of fake data representing people, each with a name, DoB, Job title, and an age.\"",
|
||||
"default": "Generates n examples of fake data representing people, each with a name, DoB, Job title, and an age."
|
||||
}
|
||||
},
|
||||
"required": []
|
||||
},
|
||||
"output_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"return": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "return",
|
||||
"description": "The value returned by the function"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"return"
|
||||
]
|
||||
},
|
||||
"has_external_trigger": false,
|
||||
"has_human_in_the_loop": false,
|
||||
"trigger_setup_info": null,
|
||||
"credentials_input_schema": {
|
||||
"properties": {
|
||||
"openai_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"openai"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "openai",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.OPENAI: 'openai'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator": "model",
|
||||
"discriminator_mapping": {
|
||||
"Llama-3.3-70B-Instruct": "llama_api",
|
||||
"Llama-3.3-8B-Instruct": "llama_api",
|
||||
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
|
||||
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
|
||||
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
|
||||
"amazon/nova-lite-v1": "open_router",
|
||||
"amazon/nova-micro-v1": "open_router",
|
||||
"amazon/nova-pro-v1": "open_router",
|
||||
"claude-3-7-sonnet-20250219": "anthropic",
|
||||
"claude-3-haiku-20240307": "anthropic",
|
||||
"claude-haiku-4-5-20251001": "anthropic",
|
||||
"claude-opus-4-1-20250805": "anthropic",
|
||||
"claude-opus-4-20250514": "anthropic",
|
||||
"claude-opus-4-5-20251101": "anthropic",
|
||||
"claude-sonnet-4-20250514": "anthropic",
|
||||
"claude-sonnet-4-5-20250929": "anthropic",
|
||||
"cohere/command-r-08-2024": "open_router",
|
||||
"cohere/command-r-plus-08-2024": "open_router",
|
||||
"deepseek/deepseek-chat": "open_router",
|
||||
"deepseek/deepseek-r1-0528": "open_router",
|
||||
"dolphin-mistral:latest": "ollama",
|
||||
"google/gemini-2.0-flash-001": "open_router",
|
||||
"google/gemini-2.0-flash-lite-001": "open_router",
|
||||
"google/gemini-2.5-flash": "open_router",
|
||||
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
|
||||
"google/gemini-2.5-pro-preview-03-25": "open_router",
|
||||
"google/gemini-3-pro-preview": "open_router",
|
||||
"gpt-3.5-turbo": "openai",
|
||||
"gpt-4-turbo": "openai",
|
||||
"gpt-4.1-2025-04-14": "openai",
|
||||
"gpt-4.1-mini-2025-04-14": "openai",
|
||||
"gpt-4o": "openai",
|
||||
"gpt-4o-mini": "openai",
|
||||
"gpt-5-2025-08-07": "openai",
|
||||
"gpt-5-chat-latest": "openai",
|
||||
"gpt-5-mini-2025-08-07": "openai",
|
||||
"gpt-5-nano-2025-08-07": "openai",
|
||||
"gpt-5.1-2025-11-13": "openai",
|
||||
"gryphe/mythomax-l2-13b": "open_router",
|
||||
"llama-3.1-8b-instant": "groq",
|
||||
"llama-3.3-70b-versatile": "groq",
|
||||
"llama3": "ollama",
|
||||
"llama3.1:405b": "ollama",
|
||||
"llama3.2": "ollama",
|
||||
"llama3.3": "ollama",
|
||||
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/llama-4-maverick": "open_router",
|
||||
"meta-llama/llama-4-scout": "open_router",
|
||||
"microsoft/wizardlm-2-8x22b": "open_router",
|
||||
"mistralai/mistral-nemo": "open_router",
|
||||
"moonshotai/kimi-k2": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
|
||||
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
|
||||
"o1": "openai",
|
||||
"o1-mini": "openai",
|
||||
"o3-2025-04-16": "openai",
|
||||
"o3-mini": "openai",
|
||||
"openai/gpt-oss-120b": "open_router",
|
||||
"openai/gpt-oss-20b": "open_router",
|
||||
"perplexity/sonar": "open_router",
|
||||
"perplexity/sonar-deep-research": "open_router",
|
||||
"perplexity/sonar-pro": "open_router",
|
||||
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
|
||||
"qwen/qwen3-coder": "open_router",
|
||||
"v0-1.0-md": "v0",
|
||||
"v0-1.5-lg": "v0",
|
||||
"v0-1.5-md": "v0",
|
||||
"x-ai/grok-4": "open_router",
|
||||
"x-ai/grok-4-fast": "open_router",
|
||||
"x-ai/grok-4.1-fast": "open_router",
|
||||
"x-ai/grok-code-fast-1": "open_router"
|
||||
},
|
||||
"discriminator_values": [
|
||||
"o3-mini"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"openai_api_key_credentials"
|
||||
],
|
||||
"title": "AIFunctionCredentialsInputSchema",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,403 @@
|
||||
{
|
||||
"id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
|
||||
"version": 12,
|
||||
"is_active": true,
|
||||
"name": "Flux AI Image Generator",
|
||||
"description": "Transform ideas into breathtaking images with this AI-powered Image Generator. Using cutting-edge Flux AI technology, the tool crafts highly detailed, photorealistic visuals from simple text prompts. Perfect for artists, marketers, and content creators, this generator produces unique images tailored to user specifications. From fantastical scenes to lifelike portraits, users can unleash creativity with professional-quality results in seconds. Easy to use and endlessly versatile, bring imagination to life with the AI Image Generator today!",
|
||||
"instructions": null,
|
||||
"recommended_schedule_cron": null,
|
||||
"nodes": [
|
||||
{
|
||||
"id": "7482c59d-725f-4686-82b9-0dfdc4e92316",
|
||||
"block_id": "cc10ff7b-7753-4ff2-9af6-9399b1a7eddc",
|
||||
"input_default": {
|
||||
"text": "Press the \"Advanced\" toggle and input your replicate API key.\n\nYou can get one here:\nhttps://replicate.com/account/api-tokens\n"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 872.8268131538296,
|
||||
"y": 614.9436919065381
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [],
|
||||
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "0d1dec1a-e4ee-4349-9673-449a01bbf14e",
|
||||
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
"input_default": {
|
||||
"name": "Generated Image"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1453.6844137728922,
|
||||
"y": 963.2466395125115
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "06665d23-2f3d-4445-8f22-573446fcff5b",
|
||||
"source_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
|
||||
"sink_id": "0d1dec1a-e4ee-4349-9673-449a01bbf14e",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [],
|
||||
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "6f24c45f-1548-4eda-9784-da06ce0abef8",
|
||||
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
"input_default": {
|
||||
"name": "Image Subject",
|
||||
"value": "Otto the friendly, purple \"Chief Automation Octopus\" helping people automate their tedious tasks.",
|
||||
"description": "The subject of the image"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": -314.43009631839783,
|
||||
"y": 962.935949165938
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "1077c61a-a32a-4ed7-becf-11bcf835b914",
|
||||
"source_id": "6f24c45f-1548-4eda-9784-da06ce0abef8",
|
||||
"sink_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_TOPIC",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
|
||||
"block_id": "90f8c45e-e983-4644-aa0b-b4ebe2f531bc",
|
||||
"input_default": {
|
||||
"prompt": "dog",
|
||||
"output_format": "png",
|
||||
"replicate_model_name": "Flux Pro 1.1"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 873.0119949791526,
|
||||
"y": 966.1604399052493
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "a17ec505-9377-4700-8fe0-124ca81d43a9",
|
||||
"source_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
|
||||
"sink_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "06665d23-2f3d-4445-8f22-573446fcff5b",
|
||||
"source_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
|
||||
"sink_id": "0d1dec1a-e4ee-4349-9673-449a01bbf14e",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
|
||||
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
|
||||
"input_default": {
|
||||
"model": "gpt-4o-mini",
|
||||
"prompt": "Generate an incredibly detailed, photorealistic image prompt about {{TOPIC}}, describing the camera it's taken with and prompting the diffusion model to use all the best quality techniques.\n\nOutput only the prompt with no additional commentary.",
|
||||
"prompt_values": {}
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 277.3057034159709,
|
||||
"y": 962.8382498113764
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "1077c61a-a32a-4ed7-becf-11bcf835b914",
|
||||
"source_id": "6f24c45f-1548-4eda-9784-da06ce0abef8",
|
||||
"sink_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_TOPIC",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "a17ec505-9377-4700-8fe0-124ca81d43a9",
|
||||
"source_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
|
||||
"sink_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
{
|
||||
"id": "1077c61a-a32a-4ed7-becf-11bcf835b914",
|
||||
"source_id": "6f24c45f-1548-4eda-9784-da06ce0abef8",
|
||||
"sink_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_TOPIC",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "06665d23-2f3d-4445-8f22-573446fcff5b",
|
||||
"source_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
|
||||
"sink_id": "0d1dec1a-e4ee-4349-9673-449a01bbf14e",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "a17ec505-9377-4700-8fe0-124ca81d43a9",
|
||||
"source_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
|
||||
"sink_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"forked_from_id": null,
|
||||
"forked_from_version": null,
|
||||
"sub_graphs": [],
|
||||
"user_id": "",
|
||||
"created_at": "2024-12-20T18:46:11.492Z",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Image Subject": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Image Subject",
|
||||
"description": "The subject of the image",
|
||||
"default": "Otto the friendly, purple \"Chief Automation Octopus\" helping people automate their tedious tasks."
|
||||
}
|
||||
},
|
||||
"required": []
|
||||
},
|
||||
"output_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Generated Image": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Generated Image"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"Generated Image"
|
||||
]
|
||||
},
|
||||
"has_external_trigger": false,
|
||||
"has_human_in_the_loop": false,
|
||||
"trigger_setup_info": null,
|
||||
"credentials_input_schema": {
|
||||
"properties": {
|
||||
"replicate_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"replicate"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "replicate",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.REPLICATE: 'replicate'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator_values": []
|
||||
},
|
||||
"openai_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"openai"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "openai",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.OPENAI: 'openai'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator": "model",
|
||||
"discriminator_mapping": {
|
||||
"Llama-3.3-70B-Instruct": "llama_api",
|
||||
"Llama-3.3-8B-Instruct": "llama_api",
|
||||
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
|
||||
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
|
||||
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
|
||||
"amazon/nova-lite-v1": "open_router",
|
||||
"amazon/nova-micro-v1": "open_router",
|
||||
"amazon/nova-pro-v1": "open_router",
|
||||
"claude-3-7-sonnet-20250219": "anthropic",
|
||||
"claude-3-haiku-20240307": "anthropic",
|
||||
"claude-haiku-4-5-20251001": "anthropic",
|
||||
"claude-opus-4-1-20250805": "anthropic",
|
||||
"claude-opus-4-20250514": "anthropic",
|
||||
"claude-opus-4-5-20251101": "anthropic",
|
||||
"claude-sonnet-4-20250514": "anthropic",
|
||||
"claude-sonnet-4-5-20250929": "anthropic",
|
||||
"cohere/command-r-08-2024": "open_router",
|
||||
"cohere/command-r-plus-08-2024": "open_router",
|
||||
"deepseek/deepseek-chat": "open_router",
|
||||
"deepseek/deepseek-r1-0528": "open_router",
|
||||
"dolphin-mistral:latest": "ollama",
|
||||
"google/gemini-2.0-flash-001": "open_router",
|
||||
"google/gemini-2.0-flash-lite-001": "open_router",
|
||||
"google/gemini-2.5-flash": "open_router",
|
||||
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
|
||||
"google/gemini-2.5-pro-preview-03-25": "open_router",
|
||||
"google/gemini-3-pro-preview": "open_router",
|
||||
"gpt-3.5-turbo": "openai",
|
||||
"gpt-4-turbo": "openai",
|
||||
"gpt-4.1-2025-04-14": "openai",
|
||||
"gpt-4.1-mini-2025-04-14": "openai",
|
||||
"gpt-4o": "openai",
|
||||
"gpt-4o-mini": "openai",
|
||||
"gpt-5-2025-08-07": "openai",
|
||||
"gpt-5-chat-latest": "openai",
|
||||
"gpt-5-mini-2025-08-07": "openai",
|
||||
"gpt-5-nano-2025-08-07": "openai",
|
||||
"gpt-5.1-2025-11-13": "openai",
|
||||
"gryphe/mythomax-l2-13b": "open_router",
|
||||
"llama-3.1-8b-instant": "groq",
|
||||
"llama-3.3-70b-versatile": "groq",
|
||||
"llama3": "ollama",
|
||||
"llama3.1:405b": "ollama",
|
||||
"llama3.2": "ollama",
|
||||
"llama3.3": "ollama",
|
||||
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/llama-4-maverick": "open_router",
|
||||
"meta-llama/llama-4-scout": "open_router",
|
||||
"microsoft/wizardlm-2-8x22b": "open_router",
|
||||
"mistralai/mistral-nemo": "open_router",
|
||||
"moonshotai/kimi-k2": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
|
||||
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
|
||||
"o1": "openai",
|
||||
"o1-mini": "openai",
|
||||
"o3-2025-04-16": "openai",
|
||||
"o3-mini": "openai",
|
||||
"openai/gpt-oss-120b": "open_router",
|
||||
"openai/gpt-oss-20b": "open_router",
|
||||
"perplexity/sonar": "open_router",
|
||||
"perplexity/sonar-deep-research": "open_router",
|
||||
"perplexity/sonar-pro": "open_router",
|
||||
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
|
||||
"qwen/qwen3-coder": "open_router",
|
||||
"v0-1.0-md": "v0",
|
||||
"v0-1.5-lg": "v0",
|
||||
"v0-1.5-md": "v0",
|
||||
"x-ai/grok-4": "open_router",
|
||||
"x-ai/grok-4-fast": "open_router",
|
||||
"x-ai/grok-4.1-fast": "open_router",
|
||||
"x-ai/grok-code-fast-1": "open_router"
|
||||
},
|
||||
"discriminator_values": [
|
||||
"gpt-4o-mini"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"replicate_api_key_credentials",
|
||||
"openai_api_key_credentials"
|
||||
],
|
||||
"title": "FluxAIImageGeneratorCredentialsInputSchema",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,505 @@
|
||||
{
|
||||
"id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
|
||||
"version": 12,
|
||||
"is_active": true,
|
||||
"name": "AI Webpage Copy Improver",
|
||||
"description": "Elevate your web content with this powerful AI Webpage Copy Improver. Designed for marketers, SEO specialists, and web developers, this tool analyses and enhances website copy for maximum impact. Using advanced language models, it optimizes text for better clarity, SEO performance, and increased conversion rates. The AI examines your existing content, identifies areas for improvement, and generates refined copy that maintains your brand voice while boosting engagement. From homepage headlines to product descriptions, transform your web presence with AI-driven insights. Improve readability, incorporate targeted keywords, and craft compelling calls-to-action - all with the click of a button. Take your digital marketing to the next level with the AI Webpage Copy Improver.",
|
||||
"instructions": null,
|
||||
"recommended_schedule_cron": null,
|
||||
"nodes": [
|
||||
{
|
||||
"id": "130ec496-f75d-4fe2-9cd6-8c00d08ea4a7",
|
||||
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
"input_default": {
|
||||
"name": "Improved Webpage Copy"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1039.5884372540172,
|
||||
"y": -0.8359099621230968
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "d4334477-3616-454f-a430-614ca27f5b36",
|
||||
"source_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"sink_id": "130ec496-f75d-4fe2-9cd6-8c00d08ea4a7",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [],
|
||||
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "cefccd07-fe70-4feb-bf76-46b20aaa5d35",
|
||||
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
"input_default": {
|
||||
"name": "Original Page Analysis",
|
||||
"description": "Analysis of the webpage as it currently stands."
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1037.7724103954706,
|
||||
"y": -606.5934325506903
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "f979ab78-0903-4f19-a7c2-a419d5d81aef",
|
||||
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"sink_id": "cefccd07-fe70-4feb-bf76-46b20aaa5d35",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [],
|
||||
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "375f8bc3-afd9-4025-ad8e-9aeb329af7ce",
|
||||
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
"input_default": {
|
||||
"name": "Homepage URL",
|
||||
"value": "https://agpt.co",
|
||||
"description": "Enter the URL of the homepage you want to improve"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": -1195.1455674454749,
|
||||
"y": 0
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "cbb12335-fefd-4560-9fff-98675130fbad",
|
||||
"source_id": "375f8bc3-afd9-4025-ad8e-9aeb329af7ce",
|
||||
"sink_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"source_name": "result",
|
||||
"sink_name": "url",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"block_id": "436c3984-57fd-4b85-8e9a-459b356883bd",
|
||||
"input_default": {
|
||||
"raw_content": false
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": -631.7330786555249,
|
||||
"y": 1.9638396496230826
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "cbb12335-fefd-4560-9fff-98675130fbad",
|
||||
"source_id": "375f8bc3-afd9-4025-ad8e-9aeb329af7ce",
|
||||
"sink_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"source_name": "result",
|
||||
"sink_name": "url",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "adfa6113-77b3-4e32-b136-3e694b87553e",
|
||||
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"source_name": "content",
|
||||
"sink_name": "prompt_values_#_CONTENT",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "5d5656fd-4208-4296-bc70-e39cc31caada",
|
||||
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"sink_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"source_name": "content",
|
||||
"sink_name": "prompt_values_#_CONTENT",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
|
||||
"input_default": {
|
||||
"model": "gpt-4o",
|
||||
"prompt": "Current Webpage Content:\n```\n{{CONTENT}}\n```\n\nBased on the following analysis of the webpage content:\n\n```\n{{ANALYSIS}}\n```\n\nRewrite and improve the content to address the identified issues. Focus on:\n1. Enhancing clarity and readability\n2. Optimizing for SEO (suggest and incorporate relevant keywords)\n3. Improving calls-to-action for better conversion rates\n4. Refining the structure and organization\n5. Maintaining brand consistency while improving the overall tone\n\nProvide the improved content in HTML format inside a code-block with \"```\" backticks, preserving the original structure where appropriate. Also, include a brief summary of the changes made and their potential impact.",
|
||||
"prompt_values": {}
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 488.37278423303917,
|
||||
"y": 0
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "adfa6113-77b3-4e32-b136-3e694b87553e",
|
||||
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"source_name": "content",
|
||||
"sink_name": "prompt_values_#_CONTENT",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "6bcca45d-c9d5-439e-ac43-e4a1264d8f57",
|
||||
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt_values_#_ANALYSIS",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "d4334477-3616-454f-a430-614ca27f5b36",
|
||||
"source_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"sink_id": "130ec496-f75d-4fe2-9cd6-8c00d08ea4a7",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
|
||||
"input_default": {
|
||||
"model": "gpt-4o",
|
||||
"prompt": "Analyze the following webpage content and provide a detailed report on its current state, including strengths and weaknesses in terms of clarity, SEO optimization, and potential for conversion:\n\n{{CONTENT}}\n\nInclude observations on:\n1. Overall readability and clarity\n2. Use of keywords and SEO-friendly language\n3. Effectiveness of calls-to-action\n4. Structure and organization of content\n5. Tone and brand consistency",
|
||||
"prompt_values": {}
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": -72.66206703605442,
|
||||
"y": -0.58403945075381
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "5d5656fd-4208-4296-bc70-e39cc31caada",
|
||||
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"sink_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"source_name": "content",
|
||||
"sink_name": "prompt_values_#_CONTENT",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "f979ab78-0903-4f19-a7c2-a419d5d81aef",
|
||||
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"sink_id": "cefccd07-fe70-4feb-bf76-46b20aaa5d35",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "6bcca45d-c9d5-439e-ac43-e4a1264d8f57",
|
||||
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt_values_#_ANALYSIS",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
{
|
||||
"id": "adfa6113-77b3-4e32-b136-3e694b87553e",
|
||||
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"source_name": "content",
|
||||
"sink_name": "prompt_values_#_CONTENT",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "d4334477-3616-454f-a430-614ca27f5b36",
|
||||
"source_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"sink_id": "130ec496-f75d-4fe2-9cd6-8c00d08ea4a7",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "5d5656fd-4208-4296-bc70-e39cc31caada",
|
||||
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"sink_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"source_name": "content",
|
||||
"sink_name": "prompt_values_#_CONTENT",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "f979ab78-0903-4f19-a7c2-a419d5d81aef",
|
||||
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"sink_id": "cefccd07-fe70-4feb-bf76-46b20aaa5d35",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "6bcca45d-c9d5-439e-ac43-e4a1264d8f57",
|
||||
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt_values_#_ANALYSIS",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "cbb12335-fefd-4560-9fff-98675130fbad",
|
||||
"source_id": "375f8bc3-afd9-4025-ad8e-9aeb329af7ce",
|
||||
"sink_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"source_name": "result",
|
||||
"sink_name": "url",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"forked_from_id": null,
|
||||
"forked_from_version": null,
|
||||
"sub_graphs": [],
|
||||
"user_id": "",
|
||||
"created_at": "2024-12-20T19:47:22.036Z",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Homepage URL": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Homepage URL",
|
||||
"description": "Enter the URL of the homepage you want to improve",
|
||||
"default": "https://agpt.co"
|
||||
}
|
||||
},
|
||||
"required": []
|
||||
},
|
||||
"output_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Improved Webpage Copy": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Improved Webpage Copy"
|
||||
},
|
||||
"Original Page Analysis": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Original Page Analysis",
|
||||
"description": "Analysis of the webpage as it currently stands."
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"Improved Webpage Copy",
|
||||
"Original Page Analysis"
|
||||
]
|
||||
},
|
||||
"has_external_trigger": false,
|
||||
"has_human_in_the_loop": false,
|
||||
"trigger_setup_info": null,
|
||||
"credentials_input_schema": {
|
||||
"properties": {
|
||||
"jina_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"jina"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "jina",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.JINA: 'jina'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator_values": []
|
||||
},
|
||||
"openai_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"openai"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "openai",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.OPENAI: 'openai'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator": "model",
|
||||
"discriminator_mapping": {
|
||||
"Llama-3.3-70B-Instruct": "llama_api",
|
||||
"Llama-3.3-8B-Instruct": "llama_api",
|
||||
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
|
||||
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
|
||||
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
|
||||
"amazon/nova-lite-v1": "open_router",
|
||||
"amazon/nova-micro-v1": "open_router",
|
||||
"amazon/nova-pro-v1": "open_router",
|
||||
"claude-3-7-sonnet-20250219": "anthropic",
|
||||
"claude-3-haiku-20240307": "anthropic",
|
||||
"claude-haiku-4-5-20251001": "anthropic",
|
||||
"claude-opus-4-1-20250805": "anthropic",
|
||||
"claude-opus-4-20250514": "anthropic",
|
||||
"claude-opus-4-5-20251101": "anthropic",
|
||||
"claude-sonnet-4-20250514": "anthropic",
|
||||
"claude-sonnet-4-5-20250929": "anthropic",
|
||||
"cohere/command-r-08-2024": "open_router",
|
||||
"cohere/command-r-plus-08-2024": "open_router",
|
||||
"deepseek/deepseek-chat": "open_router",
|
||||
"deepseek/deepseek-r1-0528": "open_router",
|
||||
"dolphin-mistral:latest": "ollama",
|
||||
"google/gemini-2.0-flash-001": "open_router",
|
||||
"google/gemini-2.0-flash-lite-001": "open_router",
|
||||
"google/gemini-2.5-flash": "open_router",
|
||||
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
|
||||
"google/gemini-2.5-pro-preview-03-25": "open_router",
|
||||
"google/gemini-3-pro-preview": "open_router",
|
||||
"gpt-3.5-turbo": "openai",
|
||||
"gpt-4-turbo": "openai",
|
||||
"gpt-4.1-2025-04-14": "openai",
|
||||
"gpt-4.1-mini-2025-04-14": "openai",
|
||||
"gpt-4o": "openai",
|
||||
"gpt-4o-mini": "openai",
|
||||
"gpt-5-2025-08-07": "openai",
|
||||
"gpt-5-chat-latest": "openai",
|
||||
"gpt-5-mini-2025-08-07": "openai",
|
||||
"gpt-5-nano-2025-08-07": "openai",
|
||||
"gpt-5.1-2025-11-13": "openai",
|
||||
"gryphe/mythomax-l2-13b": "open_router",
|
||||
"llama-3.1-8b-instant": "groq",
|
||||
"llama-3.3-70b-versatile": "groq",
|
||||
"llama3": "ollama",
|
||||
"llama3.1:405b": "ollama",
|
||||
"llama3.2": "ollama",
|
||||
"llama3.3": "ollama",
|
||||
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/llama-4-maverick": "open_router",
|
||||
"meta-llama/llama-4-scout": "open_router",
|
||||
"microsoft/wizardlm-2-8x22b": "open_router",
|
||||
"mistralai/mistral-nemo": "open_router",
|
||||
"moonshotai/kimi-k2": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
|
||||
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
|
||||
"o1": "openai",
|
||||
"o1-mini": "openai",
|
||||
"o3-2025-04-16": "openai",
|
||||
"o3-mini": "openai",
|
||||
"openai/gpt-oss-120b": "open_router",
|
||||
"openai/gpt-oss-20b": "open_router",
|
||||
"perplexity/sonar": "open_router",
|
||||
"perplexity/sonar-deep-research": "open_router",
|
||||
"perplexity/sonar-pro": "open_router",
|
||||
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
|
||||
"qwen/qwen3-coder": "open_router",
|
||||
"v0-1.0-md": "v0",
|
||||
"v0-1.5-lg": "v0",
|
||||
"v0-1.5-md": "v0",
|
||||
"x-ai/grok-4": "open_router",
|
||||
"x-ai/grok-4-fast": "open_router",
|
||||
"x-ai/grok-4.1-fast": "open_router",
|
||||
"x-ai/grok-code-fast-1": "open_router"
|
||||
},
|
||||
"discriminator_values": [
|
||||
"gpt-4o"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"jina_api_key_credentials",
|
||||
"openai_api_key_credentials"
|
||||
],
|
||||
"title": "AIWebpageCopyImproverCredentialsInputSchema",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,615 @@
|
||||
{
|
||||
"id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"version": 29,
|
||||
"is_active": true,
|
||||
"name": "Email Address Finder",
|
||||
"description": "Input information of a business and find their email address",
|
||||
"instructions": null,
|
||||
"recommended_schedule_cron": null,
|
||||
"nodes": [
|
||||
{
|
||||
"id": "04cad535-9f1a-4876-8b07-af5897d8c282",
|
||||
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
"input_default": {
|
||||
"name": "Address",
|
||||
"value": "USA"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1047.9357219838776,
|
||||
"y": 1067.9123910370954
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "aac29f7b-3cd1-4c91-9a2a-72a8301c0957",
|
||||
"source_id": "04cad535-9f1a-4876-8b07-af5897d8c282",
|
||||
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"source_name": "result",
|
||||
"sink_name": "values_#_ADDRESS",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"block_id": "3146e4fe-2cdd-4f29-bd12-0c9d5bb4deb0",
|
||||
"input_default": {
|
||||
"group": 1,
|
||||
"pattern": "<email>(.*?)<\\/email>"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 3381.2821481740634,
|
||||
"y": 246.091098184158
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "9f8188ce-1f3d-46fb-acda-b2a57c0e5da6",
|
||||
"source_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"sink_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"source_name": "response",
|
||||
"sink_name": "text",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "b15b5143-27b7-486e-a166-4095e72e5235",
|
||||
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"sink_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
|
||||
"source_name": "negative",
|
||||
"sink_name": "values_#_Result",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "23591872-3c6b-4562-87d3-5b6ade698e48",
|
||||
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
|
||||
"source_name": "positive",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
|
||||
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
"input_default": {
|
||||
"name": "Email"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 4525.4246310882,
|
||||
"y": 246.36913665010354
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "d87b07ea-dcec-4d38-a644-2c1d741ea3cb",
|
||||
"source_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
|
||||
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
|
||||
"source_name": "output",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "23591872-3c6b-4562-87d3-5b6ade698e48",
|
||||
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
|
||||
"source_name": "positive",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [],
|
||||
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "4a41df99-ffe2-4c12-b528-632979c9c030",
|
||||
"block_id": "87840993-2053-44b7-8da4-187ad4ee518c",
|
||||
"input_default": {},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 2182.7499999999995,
|
||||
"y": 242.00001144409185
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "2e411d3d-79ba-4958-9c1c-b76a45a2e649",
|
||||
"source_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"sink_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
|
||||
"source_name": "output",
|
||||
"sink_name": "query",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "899cc7d8-a96b-4107-b3c6-4c78edcf0c6b",
|
||||
"source_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
|
||||
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"source_name": "results",
|
||||
"sink_name": "prompt_values_#_WEBSITE_CONTENT",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
|
||||
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
"input_default": {
|
||||
"name": "Business Name",
|
||||
"value": "Tim Cook"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1049.9704155272595,
|
||||
"y": 244.49931152418344
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "946b522c-365f-4ee0-96f9-28863d9882ea",
|
||||
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
|
||||
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"source_name": "result",
|
||||
"sink_name": "values_#_NAME",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "43e920a7-0bb4-4fae-9a22-91df95c7342a",
|
||||
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
|
||||
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_BUSINESS_NAME",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"block_id": "db7d8f02-2f44-4c55-ab7a-eae0941f0c30",
|
||||
"input_default": {
|
||||
"format": "Email Address of {{NAME}}, {{ADDRESS}}",
|
||||
"values": {}
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1625.25,
|
||||
"y": 243.25001144409185
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "946b522c-365f-4ee0-96f9-28863d9882ea",
|
||||
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
|
||||
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"source_name": "result",
|
||||
"sink_name": "values_#_NAME",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "aac29f7b-3cd1-4c91-9a2a-72a8301c0957",
|
||||
"source_id": "04cad535-9f1a-4876-8b07-af5897d8c282",
|
||||
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"source_name": "result",
|
||||
"sink_name": "values_#_ADDRESS",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "2e411d3d-79ba-4958-9c1c-b76a45a2e649",
|
||||
"source_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"sink_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
|
||||
"source_name": "output",
|
||||
"sink_name": "query",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "266b7255-11c4-4b88-99e2-85db31a2e865",
|
||||
"block_id": "db7d8f02-2f44-4c55-ab7a-eae0941f0c30",
|
||||
"input_default": {
|
||||
"format": "Failed to find email. \nResult:\n{{RESULT}}",
|
||||
"values": {}
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 3949.7493830805934,
|
||||
"y": 705.209819698647
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "b15b5143-27b7-486e-a166-4095e72e5235",
|
||||
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"sink_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
|
||||
"source_name": "negative",
|
||||
"sink_name": "values_#_Result",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "d87b07ea-dcec-4d38-a644-2c1d741ea3cb",
|
||||
"source_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
|
||||
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
|
||||
"source_name": "output",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
|
||||
"input_default": {
|
||||
"model": "claude-sonnet-4-5-20250929",
|
||||
"prompt": "<business_website>\n{{WEBSITE_CONTENT}}\n</business_website>\n\nExtract the Contact Email of {{BUSINESS_NAME}}.\n\nIf no email that can be used to contact {{BUSINESS_NAME}} is present, output `N/A`.\nDo not share any emails other than the email for this specific entity.\n\nIf multiple present pick the likely best one.\n\nRespond with the email (or N/A) inside <email></email> tags.\n\nExample Response:\n\n<thoughts_or_comments>\nThere were many emails present, but luckily one was for {{BUSINESS_NAME}} which I have included below.\n</thoughts_or_comments>\n<email>\nexample@email.com\n</email>",
|
||||
"prompt_values": {}
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 2774.879259081777,
|
||||
"y": 243.3102035752969
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "43e920a7-0bb4-4fae-9a22-91df95c7342a",
|
||||
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
|
||||
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_BUSINESS_NAME",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "899cc7d8-a96b-4107-b3c6-4c78edcf0c6b",
|
||||
"source_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
|
||||
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"source_name": "results",
|
||||
"sink_name": "prompt_values_#_WEBSITE_CONTENT",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "9f8188ce-1f3d-46fb-acda-b2a57c0e5da6",
|
||||
"source_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"sink_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"source_name": "response",
|
||||
"sink_name": "text",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
{
|
||||
"id": "9f8188ce-1f3d-46fb-acda-b2a57c0e5da6",
|
||||
"source_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"sink_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"source_name": "response",
|
||||
"sink_name": "text",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "b15b5143-27b7-486e-a166-4095e72e5235",
|
||||
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"sink_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
|
||||
"source_name": "negative",
|
||||
"sink_name": "values_#_Result",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "d87b07ea-dcec-4d38-a644-2c1d741ea3cb",
|
||||
"source_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
|
||||
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
|
||||
"source_name": "output",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "946b522c-365f-4ee0-96f9-28863d9882ea",
|
||||
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
|
||||
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"source_name": "result",
|
||||
"sink_name": "values_#_NAME",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "23591872-3c6b-4562-87d3-5b6ade698e48",
|
||||
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
|
||||
"source_name": "positive",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "43e920a7-0bb4-4fae-9a22-91df95c7342a",
|
||||
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
|
||||
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_BUSINESS_NAME",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "2e411d3d-79ba-4958-9c1c-b76a45a2e649",
|
||||
"source_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"sink_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
|
||||
"source_name": "output",
|
||||
"sink_name": "query",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "aac29f7b-3cd1-4c91-9a2a-72a8301c0957",
|
||||
"source_id": "04cad535-9f1a-4876-8b07-af5897d8c282",
|
||||
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"source_name": "result",
|
||||
"sink_name": "values_#_ADDRESS",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "899cc7d8-a96b-4107-b3c6-4c78edcf0c6b",
|
||||
"source_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
|
||||
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"source_name": "results",
|
||||
"sink_name": "prompt_values_#_WEBSITE_CONTENT",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"forked_from_id": null,
|
||||
"forked_from_version": null,
|
||||
"sub_graphs": [],
|
||||
"user_id": "",
|
||||
"created_at": "2025-01-03T00:46:30.244Z",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Address": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Address",
|
||||
"default": "USA"
|
||||
},
|
||||
"Business Name": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Business Name",
|
||||
"default": "Tim Cook"
|
||||
}
|
||||
},
|
||||
"required": []
|
||||
},
|
||||
"output_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Email": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Email"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"Email"
|
||||
]
|
||||
},
|
||||
"has_external_trigger": false,
|
||||
"has_human_in_the_loop": false,
|
||||
"trigger_setup_info": null,
|
||||
"credentials_input_schema": {
|
||||
"properties": {
|
||||
"jina_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"jina"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "jina",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.JINA: 'jina'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator_values": []
|
||||
},
|
||||
"anthropic_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"anthropic"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "anthropic",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.ANTHROPIC: 'anthropic'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator": "model",
|
||||
"discriminator_mapping": {
|
||||
"Llama-3.3-70B-Instruct": "llama_api",
|
||||
"Llama-3.3-8B-Instruct": "llama_api",
|
||||
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
|
||||
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
|
||||
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
|
||||
"amazon/nova-lite-v1": "open_router",
|
||||
"amazon/nova-micro-v1": "open_router",
|
||||
"amazon/nova-pro-v1": "open_router",
|
||||
"claude-3-7-sonnet-20250219": "anthropic",
|
||||
"claude-3-haiku-20240307": "anthropic",
|
||||
"claude-haiku-4-5-20251001": "anthropic",
|
||||
"claude-opus-4-1-20250805": "anthropic",
|
||||
"claude-opus-4-20250514": "anthropic",
|
||||
"claude-opus-4-5-20251101": "anthropic",
|
||||
"claude-sonnet-4-20250514": "anthropic",
|
||||
"claude-sonnet-4-5-20250929": "anthropic",
|
||||
"cohere/command-r-08-2024": "open_router",
|
||||
"cohere/command-r-plus-08-2024": "open_router",
|
||||
"deepseek/deepseek-chat": "open_router",
|
||||
"deepseek/deepseek-r1-0528": "open_router",
|
||||
"dolphin-mistral:latest": "ollama",
|
||||
"google/gemini-2.0-flash-001": "open_router",
|
||||
"google/gemini-2.0-flash-lite-001": "open_router",
|
||||
"google/gemini-2.5-flash": "open_router",
|
||||
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
|
||||
"google/gemini-2.5-pro-preview-03-25": "open_router",
|
||||
"google/gemini-3-pro-preview": "open_router",
|
||||
"gpt-3.5-turbo": "openai",
|
||||
"gpt-4-turbo": "openai",
|
||||
"gpt-4.1-2025-04-14": "openai",
|
||||
"gpt-4.1-mini-2025-04-14": "openai",
|
||||
"gpt-4o": "openai",
|
||||
"gpt-4o-mini": "openai",
|
||||
"gpt-5-2025-08-07": "openai",
|
||||
"gpt-5-chat-latest": "openai",
|
||||
"gpt-5-mini-2025-08-07": "openai",
|
||||
"gpt-5-nano-2025-08-07": "openai",
|
||||
"gpt-5.1-2025-11-13": "openai",
|
||||
"gryphe/mythomax-l2-13b": "open_router",
|
||||
"llama-3.1-8b-instant": "groq",
|
||||
"llama-3.3-70b-versatile": "groq",
|
||||
"llama3": "ollama",
|
||||
"llama3.1:405b": "ollama",
|
||||
"llama3.2": "ollama",
|
||||
"llama3.3": "ollama",
|
||||
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/llama-4-maverick": "open_router",
|
||||
"meta-llama/llama-4-scout": "open_router",
|
||||
"microsoft/wizardlm-2-8x22b": "open_router",
|
||||
"mistralai/mistral-nemo": "open_router",
|
||||
"moonshotai/kimi-k2": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
|
||||
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
|
||||
"o1": "openai",
|
||||
"o1-mini": "openai",
|
||||
"o3-2025-04-16": "openai",
|
||||
"o3-mini": "openai",
|
||||
"openai/gpt-oss-120b": "open_router",
|
||||
"openai/gpt-oss-20b": "open_router",
|
||||
"perplexity/sonar": "open_router",
|
||||
"perplexity/sonar-deep-research": "open_router",
|
||||
"perplexity/sonar-pro": "open_router",
|
||||
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
|
||||
"qwen/qwen3-coder": "open_router",
|
||||
"v0-1.0-md": "v0",
|
||||
"v0-1.5-lg": "v0",
|
||||
"v0-1.5-md": "v0",
|
||||
"x-ai/grok-4": "open_router",
|
||||
"x-ai/grok-4-fast": "open_router",
|
||||
"x-ai/grok-4.1-fast": "open_router",
|
||||
"x-ai/grok-code-fast-1": "open_router"
|
||||
},
|
||||
"discriminator_values": [
|
||||
"claude-sonnet-4-5-20250929"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"jina_api_key_credentials",
|
||||
"anthropic_api_key_credentials"
|
||||
],
|
||||
"title": "EmailAddressFinderCredentialsInputSchema",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
@@ -11,7 +11,7 @@ from backend.data.block import (
|
||||
BlockType,
|
||||
get_block,
|
||||
)
|
||||
from backend.data.execution import ExecutionStatus, NodesInputMasks
|
||||
from backend.data.execution import ExecutionContext, ExecutionStatus, NodesInputMasks
|
||||
from backend.data.model import NodeExecutionStats, SchemaField
|
||||
from backend.util.json import validate_with_jsonschema
|
||||
from backend.util.retry import func_retry
|
||||
@@ -72,9 +72,9 @@ class AgentExecutorBlock(Block):
|
||||
input_data: Input,
|
||||
*,
|
||||
graph_exec_id: str,
|
||||
execution_context: ExecutionContext,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
|
||||
from backend.executor import utils as execution_utils
|
||||
|
||||
graph_exec = await execution_utils.add_graph_execution(
|
||||
@@ -83,8 +83,9 @@ class AgentExecutorBlock(Block):
|
||||
user_id=input_data.user_id,
|
||||
inputs=input_data.inputs,
|
||||
nodes_input_masks=input_data.nodes_input_masks,
|
||||
parent_graph_exec_id=graph_exec_id,
|
||||
is_sub_graph=True, # AgentExecutorBlock executions are always sub-graphs
|
||||
execution_context=execution_context.model_copy(
|
||||
update={"parent_execution_id": graph_exec_id},
|
||||
),
|
||||
)
|
||||
|
||||
logger = execution_utils.LogMetadata(
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import asyncio
|
||||
from enum import Enum
|
||||
from typing import Literal
|
||||
|
||||
@@ -19,11 +20,26 @@ from backend.data.model import (
|
||||
SchemaField,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.file import MediaFileType
|
||||
from backend.util.file import MediaFileType, store_media_file
|
||||
|
||||
|
||||
class GeminiImageModel(str, Enum):
|
||||
NANO_BANANA = "google/nano-banana"
|
||||
NANO_BANANA_PRO = "google/nano-banana-pro"
|
||||
|
||||
|
||||
class AspectRatio(str, Enum):
|
||||
MATCH_INPUT_IMAGE = "match_input_image"
|
||||
ASPECT_1_1 = "1:1"
|
||||
ASPECT_2_3 = "2:3"
|
||||
ASPECT_3_2 = "3:2"
|
||||
ASPECT_3_4 = "3:4"
|
||||
ASPECT_4_3 = "4:3"
|
||||
ASPECT_4_5 = "4:5"
|
||||
ASPECT_5_4 = "5:4"
|
||||
ASPECT_9_16 = "9:16"
|
||||
ASPECT_16_9 = "16:9"
|
||||
ASPECT_21_9 = "21:9"
|
||||
|
||||
|
||||
class OutputFormat(str, Enum):
|
||||
@@ -68,6 +84,11 @@ class AIImageCustomizerBlock(Block):
|
||||
default=[],
|
||||
title="Input Images",
|
||||
)
|
||||
aspect_ratio: AspectRatio = SchemaField(
|
||||
description="Aspect ratio of the generated image",
|
||||
default=AspectRatio.MATCH_INPUT_IMAGE,
|
||||
title="Aspect Ratio",
|
||||
)
|
||||
output_format: OutputFormat = SchemaField(
|
||||
description="Format of the output image",
|
||||
default=OutputFormat.PNG,
|
||||
@@ -91,6 +112,7 @@ class AIImageCustomizerBlock(Block):
|
||||
"prompt": "Make the scene more vibrant and colorful",
|
||||
"model": GeminiImageModel.NANO_BANANA,
|
||||
"images": [],
|
||||
"aspect_ratio": AspectRatio.MATCH_INPUT_IMAGE,
|
||||
"output_format": OutputFormat.JPG,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
@@ -115,11 +137,25 @@ class AIImageCustomizerBlock(Block):
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
# Convert local file paths to Data URIs (base64) so Replicate can access them
|
||||
processed_images = await asyncio.gather(
|
||||
*(
|
||||
store_media_file(
|
||||
graph_exec_id=graph_exec_id,
|
||||
file=img,
|
||||
user_id=user_id,
|
||||
return_content=True,
|
||||
)
|
||||
for img in input_data.images
|
||||
)
|
||||
)
|
||||
|
||||
result = await self.run_model(
|
||||
api_key=credentials.api_key,
|
||||
model_name=input_data.model.value,
|
||||
prompt=input_data.prompt,
|
||||
images=input_data.images,
|
||||
images=processed_images,
|
||||
aspect_ratio=input_data.aspect_ratio.value,
|
||||
output_format=input_data.output_format.value,
|
||||
)
|
||||
yield "image_url", result
|
||||
@@ -132,12 +168,14 @@ class AIImageCustomizerBlock(Block):
|
||||
model_name: str,
|
||||
prompt: str,
|
||||
images: list[MediaFileType],
|
||||
aspect_ratio: str,
|
||||
output_format: str,
|
||||
) -> MediaFileType:
|
||||
client = ReplicateClient(api_token=api_key.get_secret_value())
|
||||
|
||||
input_params: dict = {
|
||||
"prompt": prompt,
|
||||
"aspect_ratio": aspect_ratio,
|
||||
"output_format": output_format,
|
||||
}
|
||||
|
||||
|
||||
@@ -60,6 +60,14 @@ SIZE_TO_RECRAFT_DIMENSIONS = {
|
||||
ImageSize.TALL: "1024x1536",
|
||||
}
|
||||
|
||||
SIZE_TO_NANO_BANANA_RATIO = {
|
||||
ImageSize.SQUARE: "1:1",
|
||||
ImageSize.LANDSCAPE: "4:3",
|
||||
ImageSize.PORTRAIT: "3:4",
|
||||
ImageSize.WIDE: "16:9",
|
||||
ImageSize.TALL: "9:16",
|
||||
}
|
||||
|
||||
|
||||
class ImageStyle(str, Enum):
|
||||
"""
|
||||
@@ -98,6 +106,7 @@ class ImageGenModel(str, Enum):
|
||||
FLUX_ULTRA = "Flux 1.1 Pro Ultra"
|
||||
RECRAFT = "Recraft v3"
|
||||
SD3_5 = "Stable Diffusion 3.5 Medium"
|
||||
NANO_BANANA_PRO = "Nano Banana Pro"
|
||||
|
||||
|
||||
class AIImageGeneratorBlock(Block):
|
||||
@@ -261,6 +270,20 @@ class AIImageGeneratorBlock(Block):
|
||||
)
|
||||
return output
|
||||
|
||||
elif input_data.model == ImageGenModel.NANO_BANANA_PRO:
|
||||
# Use Nano Banana Pro (Google Gemini 3 Pro Image)
|
||||
input_params = {
|
||||
"prompt": modified_prompt,
|
||||
"aspect_ratio": SIZE_TO_NANO_BANANA_RATIO[input_data.size],
|
||||
"resolution": "2K", # Default to 2K for good quality/cost balance
|
||||
"output_format": "jpg",
|
||||
"safety_filter_level": "block_only_high", # Most permissive
|
||||
}
|
||||
output = await self._run_client(
|
||||
credentials, "google/nano-banana-pro", input_params
|
||||
)
|
||||
return output
|
||||
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to generate image: {str(e)}")
|
||||
|
||||
|
||||
@@ -1371,7 +1371,7 @@ async def create_base(
|
||||
if tables:
|
||||
params["tables"] = tables
|
||||
|
||||
print(params)
|
||||
logger.debug(f"Creating Airtable base with params: {params}")
|
||||
|
||||
response = await Requests().post(
|
||||
"https://api.airtable.com/v0/meta/bases",
|
||||
|
||||
224
autogpt_platform/backend/backend/blocks/codex.py
Normal file
224
autogpt_platform/backend/backend/blocks/codex.py
Normal file
@@ -0,0 +1,224 @@
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from typing import Any, Literal
|
||||
|
||||
from openai import AsyncOpenAI
|
||||
from openai.types.responses import Response as OpenAIResponse
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
NodeExecutionStats,
|
||||
SchemaField,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
|
||||
@dataclass
|
||||
class CodexCallResult:
|
||||
"""Structured response returned by Codex invocations."""
|
||||
|
||||
response: str
|
||||
reasoning: str
|
||||
response_id: str
|
||||
|
||||
|
||||
class CodexModel(str, Enum):
|
||||
"""Codex-capable OpenAI models."""
|
||||
|
||||
GPT5_1_CODEX = "gpt-5.1-codex"
|
||||
|
||||
|
||||
class CodexReasoningEffort(str, Enum):
|
||||
"""Configuration for the Responses API reasoning effort."""
|
||||
|
||||
NONE = "none"
|
||||
LOW = "low"
|
||||
MEDIUM = "medium"
|
||||
HIGH = "high"
|
||||
|
||||
|
||||
CodexCredentials = CredentialsMetaInput[
|
||||
Literal[ProviderName.OPENAI], Literal["api_key"]
|
||||
]
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="e2fcb203-3f2d-4ad4-a344-8df3bc7db36b",
|
||||
provider="openai",
|
||||
api_key=SecretStr("mock-openai-api-key"),
|
||||
title="Mock OpenAI API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
|
||||
|
||||
def CodexCredentialsField() -> CodexCredentials:
|
||||
return CredentialsField(
|
||||
description="OpenAI API key with access to Codex models (Responses API).",
|
||||
)
|
||||
|
||||
|
||||
class CodeGenerationBlock(Block):
|
||||
"""Block that talks to Codex models via the OpenAI Responses API."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
prompt: str = SchemaField(
|
||||
description="Primary coding request passed to the Codex model.",
|
||||
placeholder="Generate a Python function that reverses a list.",
|
||||
)
|
||||
system_prompt: str = SchemaField(
|
||||
title="System Prompt",
|
||||
default=(
|
||||
"You are Codex, an elite software engineer. "
|
||||
"Favor concise, working code and highlight important caveats."
|
||||
),
|
||||
description="Optional instructions injected via the Responses API instructions field.",
|
||||
advanced=True,
|
||||
)
|
||||
model: CodexModel = SchemaField(
|
||||
title="Codex Model",
|
||||
default=CodexModel.GPT5_1_CODEX,
|
||||
description="Codex-optimized model served via the Responses API.",
|
||||
advanced=False,
|
||||
)
|
||||
reasoning_effort: CodexReasoningEffort = SchemaField(
|
||||
title="Reasoning Effort",
|
||||
default=CodexReasoningEffort.MEDIUM,
|
||||
description="Controls the Responses API reasoning budget. Select 'none' to skip reasoning configs.",
|
||||
advanced=True,
|
||||
)
|
||||
max_output_tokens: int | None = SchemaField(
|
||||
title="Max Output Tokens",
|
||||
default=2048,
|
||||
description="Upper bound for generated tokens (hard limit 128,000). Leave blank to let OpenAI decide.",
|
||||
advanced=True,
|
||||
)
|
||||
credentials: CodexCredentials = CodexCredentialsField()
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
response: str = SchemaField(
|
||||
description="Code-focused response returned by the Codex model."
|
||||
)
|
||||
reasoning: str = SchemaField(
|
||||
description="Reasoning summary returned by the model, if available.",
|
||||
default="",
|
||||
)
|
||||
response_id: str = SchemaField(
|
||||
description="ID of the Responses API call for auditing/debugging.",
|
||||
default="",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="86a2a099-30df-47b4-b7e4-34ae5f83e0d5",
|
||||
description="Generate or refactor code using OpenAI's Codex (Responses API).",
|
||||
categories={BlockCategory.AI, BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=CodeGenerationBlock.Input,
|
||||
output_schema=CodeGenerationBlock.Output,
|
||||
test_input=[
|
||||
{
|
||||
"prompt": "Write a TypeScript function that deduplicates an array.",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
}
|
||||
],
|
||||
test_output=[
|
||||
("response", str),
|
||||
("reasoning", str),
|
||||
("response_id", str),
|
||||
],
|
||||
test_mock={
|
||||
"call_codex": lambda *_args, **_kwargs: CodexCallResult(
|
||||
response="function dedupe<T>(items: T[]): T[] { return [...new Set(items)]; }",
|
||||
reasoning="Used Set to remove duplicates in O(n).",
|
||||
response_id="resp_test",
|
||||
)
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
self.execution_stats = NodeExecutionStats()
|
||||
|
||||
async def call_codex(
|
||||
self,
|
||||
*,
|
||||
credentials: APIKeyCredentials,
|
||||
model: CodexModel,
|
||||
prompt: str,
|
||||
system_prompt: str,
|
||||
max_output_tokens: int | None,
|
||||
reasoning_effort: CodexReasoningEffort,
|
||||
) -> CodexCallResult:
|
||||
"""Invoke the OpenAI Responses API."""
|
||||
client = AsyncOpenAI(api_key=credentials.api_key.get_secret_value())
|
||||
|
||||
request_payload: dict[str, Any] = {
|
||||
"model": model.value,
|
||||
"input": prompt,
|
||||
}
|
||||
if system_prompt:
|
||||
request_payload["instructions"] = system_prompt
|
||||
if max_output_tokens is not None:
|
||||
request_payload["max_output_tokens"] = max_output_tokens
|
||||
if reasoning_effort != CodexReasoningEffort.NONE:
|
||||
request_payload["reasoning"] = {"effort": reasoning_effort.value}
|
||||
|
||||
response = await client.responses.create(**request_payload)
|
||||
if not isinstance(response, OpenAIResponse):
|
||||
raise TypeError(f"Expected OpenAIResponse, got {type(response).__name__}")
|
||||
|
||||
# Extract data directly from typed response
|
||||
text_output = response.output_text or ""
|
||||
reasoning_summary = (
|
||||
str(response.reasoning.summary)
|
||||
if response.reasoning and response.reasoning.summary
|
||||
else ""
|
||||
)
|
||||
response_id = response.id or ""
|
||||
|
||||
# Update usage stats
|
||||
self.execution_stats.input_token_count = (
|
||||
response.usage.input_tokens if response.usage else 0
|
||||
)
|
||||
self.execution_stats.output_token_count = (
|
||||
response.usage.output_tokens if response.usage else 0
|
||||
)
|
||||
self.execution_stats.llm_call_count += 1
|
||||
|
||||
return CodexCallResult(
|
||||
response=text_output,
|
||||
reasoning=reasoning_summary,
|
||||
response_id=response_id,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: APIKeyCredentials,
|
||||
**_kwargs,
|
||||
) -> BlockOutput:
|
||||
result = await self.call_codex(
|
||||
credentials=credentials,
|
||||
model=input_data.model,
|
||||
prompt=input_data.prompt,
|
||||
system_prompt=input_data.system_prompt,
|
||||
max_output_tokens=input_data.max_output_tokens,
|
||||
reasoning_effort=input_data.reasoning_effort,
|
||||
)
|
||||
|
||||
yield "response", result.response
|
||||
yield "reasoning", result.reasoning
|
||||
yield "response_id", result.response_id
|
||||
@@ -1,8 +1,9 @@
|
||||
import base64
|
||||
import io
|
||||
import mimetypes
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from typing import Any, Literal, cast
|
||||
|
||||
import discord
|
||||
from pydantic import SecretStr
|
||||
@@ -33,6 +34,19 @@ TEST_CREDENTIALS = TEST_BOT_CREDENTIALS
|
||||
TEST_CREDENTIALS_INPUT = TEST_BOT_CREDENTIALS_INPUT
|
||||
|
||||
|
||||
class ThreadArchiveDuration(str, Enum):
|
||||
"""Discord thread auto-archive duration options"""
|
||||
|
||||
ONE_HOUR = "60"
|
||||
ONE_DAY = "1440"
|
||||
THREE_DAYS = "4320"
|
||||
ONE_WEEK = "10080"
|
||||
|
||||
def to_minutes(self) -> int:
|
||||
"""Convert the duration string to minutes for Discord API"""
|
||||
return int(self.value)
|
||||
|
||||
|
||||
class ReadDiscordMessagesBlock(Block):
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: DiscordCredentials = DiscordCredentialsField()
|
||||
@@ -1166,3 +1180,211 @@ class DiscordChannelInfoBlock(Block):
|
||||
raise ValueError(f"Login error occurred: {login_err}")
|
||||
except Exception as e:
|
||||
raise ValueError(f"An error occurred: {e}")
|
||||
|
||||
|
||||
class CreateDiscordThreadBlock(Block):
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: DiscordCredentials = DiscordCredentialsField()
|
||||
channel_name: str = SchemaField(
|
||||
description="Channel ID or channel name to create the thread in"
|
||||
)
|
||||
server_name: str = SchemaField(
|
||||
description="Server name (only needed if using channel name)",
|
||||
advanced=True,
|
||||
default="",
|
||||
)
|
||||
thread_name: str = SchemaField(description="The name of the thread to create")
|
||||
is_private: bool = SchemaField(
|
||||
description="Whether to create a private thread (requires Boost Level 2+) or public thread",
|
||||
default=False,
|
||||
)
|
||||
auto_archive_duration: ThreadArchiveDuration = SchemaField(
|
||||
description="Duration before the thread is automatically archived",
|
||||
advanced=True,
|
||||
default=ThreadArchiveDuration.ONE_WEEK,
|
||||
)
|
||||
message_content: str = SchemaField(
|
||||
description="Optional initial message to send in the thread",
|
||||
advanced=True,
|
||||
default="",
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
status: str = SchemaField(description="Operation status")
|
||||
thread_id: str = SchemaField(description="ID of the created thread")
|
||||
thread_name: str = SchemaField(description="Name of the created thread")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="e8f3c9a2-7b5d-4f1e-9c6a-3d8e2b4f7a1c",
|
||||
input_schema=CreateDiscordThreadBlock.Input,
|
||||
output_schema=CreateDiscordThreadBlock.Output,
|
||||
description="Creates a new thread in a Discord channel.",
|
||||
categories={BlockCategory.SOCIAL},
|
||||
test_input={
|
||||
"channel_name": "general",
|
||||
"thread_name": "Test Thread",
|
||||
"is_private": False,
|
||||
"auto_archive_duration": ThreadArchiveDuration.ONE_HOUR,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_output=[
|
||||
("status", "Thread created successfully"),
|
||||
("thread_id", "123456789012345678"),
|
||||
("thread_name", "Test Thread"),
|
||||
],
|
||||
test_mock={
|
||||
"create_thread": lambda *args, **kwargs: {
|
||||
"status": "Thread created successfully",
|
||||
"thread_id": "123456789012345678",
|
||||
"thread_name": "Test Thread",
|
||||
}
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
async def create_thread(
|
||||
self,
|
||||
token: str,
|
||||
channel_name: str,
|
||||
server_name: str | None,
|
||||
thread_name: str,
|
||||
is_private: bool,
|
||||
auto_archive_duration: ThreadArchiveDuration,
|
||||
message_content: str,
|
||||
) -> dict:
|
||||
intents = discord.Intents.default()
|
||||
intents.guilds = True
|
||||
intents.message_content = True # Required for sending messages in threads
|
||||
client = discord.Client(intents=intents)
|
||||
|
||||
result = {}
|
||||
|
||||
@client.event
|
||||
async def on_ready():
|
||||
channel = None
|
||||
|
||||
# Try to parse as channel ID first
|
||||
try:
|
||||
channel_id = int(channel_name)
|
||||
try:
|
||||
channel = await client.fetch_channel(channel_id)
|
||||
except discord.errors.NotFound:
|
||||
result["status"] = f"Channel with ID {channel_id} not found"
|
||||
await client.close()
|
||||
return
|
||||
except discord.errors.Forbidden:
|
||||
result["status"] = (
|
||||
f"Bot does not have permission to view channel {channel_id}"
|
||||
)
|
||||
await client.close()
|
||||
return
|
||||
except ValueError:
|
||||
# Not an ID, treat as channel name
|
||||
# Collect all matching channels to detect duplicates
|
||||
matching_channels = []
|
||||
for guild in client.guilds:
|
||||
# Skip guilds if server_name is provided and doesn't match
|
||||
if (
|
||||
server_name
|
||||
and server_name.strip()
|
||||
and guild.name != server_name
|
||||
):
|
||||
continue
|
||||
for ch in guild.text_channels:
|
||||
if ch.name == channel_name:
|
||||
matching_channels.append(ch)
|
||||
|
||||
if not matching_channels:
|
||||
result["status"] = f"Channel not found: {channel_name}"
|
||||
await client.close()
|
||||
return
|
||||
elif len(matching_channels) > 1:
|
||||
result["status"] = (
|
||||
f"Multiple channels named '{channel_name}' found. "
|
||||
"Please specify server_name to disambiguate."
|
||||
)
|
||||
await client.close()
|
||||
return
|
||||
else:
|
||||
channel = matching_channels[0]
|
||||
|
||||
if not channel:
|
||||
result["status"] = "Failed to resolve channel"
|
||||
await client.close()
|
||||
return
|
||||
|
||||
# Type check - ensure it's a text channel that can create threads
|
||||
if not hasattr(channel, "create_thread"):
|
||||
result["status"] = (
|
||||
f"Channel {channel_name} cannot create threads (not a text channel)"
|
||||
)
|
||||
await client.close()
|
||||
return
|
||||
|
||||
# After the hasattr check, we know channel is a TextChannel
|
||||
channel = cast(discord.TextChannel, channel)
|
||||
|
||||
try:
|
||||
# Create the thread using discord.py 2.0+ API
|
||||
thread_type = (
|
||||
discord.ChannelType.private_thread
|
||||
if is_private
|
||||
else discord.ChannelType.public_thread
|
||||
)
|
||||
|
||||
# Cast to the specific Literal type that discord.py expects
|
||||
duration_minutes = cast(
|
||||
Literal[60, 1440, 4320, 10080], auto_archive_duration.to_minutes()
|
||||
)
|
||||
|
||||
# The 'type' parameter exists in discord.py 2.0+ but isn't in type stubs yet
|
||||
# pyright: ignore[reportCallIssue]
|
||||
thread = await channel.create_thread(
|
||||
name=thread_name,
|
||||
type=thread_type,
|
||||
auto_archive_duration=duration_minutes,
|
||||
)
|
||||
|
||||
# Send initial message if provided
|
||||
if message_content:
|
||||
await thread.send(message_content)
|
||||
|
||||
result["status"] = "Thread created successfully"
|
||||
result["thread_id"] = str(thread.id)
|
||||
result["thread_name"] = thread.name
|
||||
|
||||
except discord.errors.Forbidden as e:
|
||||
result["status"] = (
|
||||
f"Bot does not have permission to create threads in this channel. {str(e)}"
|
||||
)
|
||||
except Exception as e:
|
||||
result["status"] = f"Error creating thread: {str(e)}"
|
||||
finally:
|
||||
await client.close()
|
||||
|
||||
await client.start(token)
|
||||
return result
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
result = await self.create_thread(
|
||||
token=credentials.api_key.get_secret_value(),
|
||||
channel_name=input_data.channel_name,
|
||||
server_name=input_data.server_name or None,
|
||||
thread_name=input_data.thread_name,
|
||||
is_private=input_data.is_private,
|
||||
auto_archive_duration=input_data.auto_archive_duration,
|
||||
message_content=input_data.message_content,
|
||||
)
|
||||
|
||||
yield "status", result.get("status", "Unknown error")
|
||||
if "thread_id" in result:
|
||||
yield "thread_id", result["thread_id"]
|
||||
if "thread_name" in result:
|
||||
yield "thread_name", result["thread_name"]
|
||||
|
||||
except discord.errors.LoginFailure as login_err:
|
||||
raise ValueError(f"Login error occurred: {login_err}")
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
import smtplib
|
||||
import socket
|
||||
import ssl
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
from typing import Literal
|
||||
@@ -48,9 +50,7 @@ def SMTPCredentialsField() -> SMTPCredentialsInput:
|
||||
|
||||
|
||||
class SMTPConfig(BaseModel):
|
||||
smtp_server: str = SchemaField(
|
||||
default="smtp.example.com", description="SMTP server address"
|
||||
)
|
||||
smtp_server: str = SchemaField(description="SMTP server address")
|
||||
smtp_port: int = SchemaField(default=25, description="SMTP port number")
|
||||
|
||||
model_config = ConfigDict(title="SMTP Config")
|
||||
@@ -67,10 +67,7 @@ class SendEmailBlock(Block):
|
||||
body: str = SchemaField(
|
||||
description="Body of the email", placeholder="Enter the email body"
|
||||
)
|
||||
config: SMTPConfig = SchemaField(
|
||||
description="SMTP Config",
|
||||
default=SMTPConfig(),
|
||||
)
|
||||
config: SMTPConfig = SchemaField(description="SMTP Config")
|
||||
credentials: SMTPCredentialsInput = SMTPCredentialsField()
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
@@ -120,7 +117,7 @@ class SendEmailBlock(Block):
|
||||
msg["Subject"] = subject
|
||||
msg.attach(MIMEText(body, "plain"))
|
||||
|
||||
with smtplib.SMTP(smtp_server, smtp_port) as server:
|
||||
with smtplib.SMTP(smtp_server, smtp_port, timeout=30) as server:
|
||||
server.starttls()
|
||||
server.login(smtp_username, smtp_password)
|
||||
server.sendmail(smtp_username, to_email, msg.as_string())
|
||||
@@ -130,10 +127,59 @@ class SendEmailBlock(Block):
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: SMTPCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
yield "status", self.send_email(
|
||||
config=input_data.config,
|
||||
to_email=input_data.to_email,
|
||||
subject=input_data.subject,
|
||||
body=input_data.body,
|
||||
credentials=credentials,
|
||||
)
|
||||
try:
|
||||
status = self.send_email(
|
||||
config=input_data.config,
|
||||
to_email=input_data.to_email,
|
||||
subject=input_data.subject,
|
||||
body=input_data.body,
|
||||
credentials=credentials,
|
||||
)
|
||||
yield "status", status
|
||||
except socket.gaierror:
|
||||
yield "error", (
|
||||
f"Cannot connect to SMTP server '{input_data.config.smtp_server}'. "
|
||||
"Please verify the server address is correct."
|
||||
)
|
||||
except socket.timeout:
|
||||
yield "error", (
|
||||
f"Connection timeout to '{input_data.config.smtp_server}' "
|
||||
f"on port {input_data.config.smtp_port}. "
|
||||
"The server may be down or unreachable."
|
||||
)
|
||||
except ConnectionRefusedError:
|
||||
yield "error", (
|
||||
f"Connection refused to '{input_data.config.smtp_server}' "
|
||||
f"on port {input_data.config.smtp_port}. "
|
||||
"Common SMTP ports are: 587 (TLS), 465 (SSL), 25 (plain). "
|
||||
"Please verify the port is correct."
|
||||
)
|
||||
except smtplib.SMTPNotSupportedError:
|
||||
yield "error", (
|
||||
f"STARTTLS not supported by server '{input_data.config.smtp_server}'. "
|
||||
"Try using port 465 for SSL or port 25 for unencrypted connection."
|
||||
)
|
||||
except ssl.SSLError as e:
|
||||
yield "error", (
|
||||
f"SSL/TLS error when connecting to '{input_data.config.smtp_server}': {str(e)}. "
|
||||
"The server may require a different security protocol."
|
||||
)
|
||||
except smtplib.SMTPAuthenticationError:
|
||||
yield "error", (
|
||||
"Authentication failed. Please verify your username and password are correct."
|
||||
)
|
||||
except smtplib.SMTPRecipientsRefused:
|
||||
yield "error", (
|
||||
f"Recipient email address '{input_data.to_email}' was rejected by the server. "
|
||||
"Please verify the email address is valid."
|
||||
)
|
||||
except smtplib.SMTPSenderRefused:
|
||||
yield "error", (
|
||||
"Sender email address defined in the credentials that where used"
|
||||
"was rejected by the server. "
|
||||
"Please verify your account is authorized to send emails."
|
||||
)
|
||||
except smtplib.SMTPDataError as e:
|
||||
yield "error", f"Email data rejected by server: {str(e)}"
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
@@ -0,0 +1,108 @@
|
||||
{
|
||||
"action": "created",
|
||||
"discussion": {
|
||||
"repository_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT",
|
||||
"category": {
|
||||
"id": 12345678,
|
||||
"node_id": "DIC_kwDOJKSTjM4CXXXX",
|
||||
"repository_id": 614765452,
|
||||
"emoji": ":pray:",
|
||||
"name": "Q&A",
|
||||
"description": "Ask the community for help",
|
||||
"created_at": "2023-03-16T09:21:07Z",
|
||||
"updated_at": "2023-03-16T09:21:07Z",
|
||||
"slug": "q-a",
|
||||
"is_answerable": true
|
||||
},
|
||||
"answer_html_url": null,
|
||||
"answer_chosen_at": null,
|
||||
"answer_chosen_by": null,
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT/discussions/9999",
|
||||
"id": 5000000001,
|
||||
"node_id": "D_kwDOJKSTjM4AYYYY",
|
||||
"number": 9999,
|
||||
"title": "How do I configure custom blocks?",
|
||||
"user": {
|
||||
"login": "curious-user",
|
||||
"id": 22222222,
|
||||
"node_id": "MDQ6VXNlcjIyMjIyMjIy",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/22222222?v=4",
|
||||
"url": "https://api.github.com/users/curious-user",
|
||||
"html_url": "https://github.com/curious-user",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
},
|
||||
"state": "open",
|
||||
"state_reason": null,
|
||||
"locked": false,
|
||||
"comments": 0,
|
||||
"created_at": "2024-12-01T17:00:00Z",
|
||||
"updated_at": "2024-12-01T17:00:00Z",
|
||||
"author_association": "NONE",
|
||||
"active_lock_reason": null,
|
||||
"body": "## Question\n\nI'm trying to create a custom block for my specific use case. I've read the documentation but I'm not sure how to:\n\n1. Define the input/output schema\n2. Handle authentication\n3. Test my block locally\n\nCan someone point me to examples or provide guidance?\n\n## Environment\n\n- AutoGPT Platform version: latest\n- Python: 3.11",
|
||||
"reactions": {
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/discussions/9999/reactions",
|
||||
"total_count": 0,
|
||||
"+1": 0,
|
||||
"-1": 0,
|
||||
"laugh": 0,
|
||||
"hooray": 0,
|
||||
"confused": 0,
|
||||
"heart": 0,
|
||||
"rocket": 0,
|
||||
"eyes": 0
|
||||
},
|
||||
"timeline_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/discussions/9999/timeline"
|
||||
},
|
||||
"repository": {
|
||||
"id": 614765452,
|
||||
"node_id": "R_kgDOJKSTjA",
|
||||
"name": "AutoGPT",
|
||||
"full_name": "Significant-Gravitas/AutoGPT",
|
||||
"private": false,
|
||||
"owner": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"url": "https://api.github.com/users/Significant-Gravitas",
|
||||
"html_url": "https://github.com/Significant-Gravitas",
|
||||
"type": "Organization",
|
||||
"site_admin": false
|
||||
},
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on.",
|
||||
"fork": false,
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT",
|
||||
"created_at": "2023-03-16T09:21:07Z",
|
||||
"updated_at": "2024-12-01T17:00:00Z",
|
||||
"pushed_at": "2024-12-01T12:00:00Z",
|
||||
"stargazers_count": 170000,
|
||||
"watchers_count": 170000,
|
||||
"language": "Python",
|
||||
"has_discussions": true,
|
||||
"forks_count": 45000,
|
||||
"visibility": "public",
|
||||
"default_branch": "master"
|
||||
},
|
||||
"organization": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"url": "https://api.github.com/orgs/Significant-Gravitas",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"description": ""
|
||||
},
|
||||
"sender": {
|
||||
"login": "curious-user",
|
||||
"id": 22222222,
|
||||
"node_id": "MDQ6VXNlcjIyMjIyMjIy",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/22222222?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/curious-user",
|
||||
"html_url": "https://github.com/curious-user",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,112 @@
|
||||
{
|
||||
"action": "opened",
|
||||
"issue": {
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345",
|
||||
"repository_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT",
|
||||
"labels_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/labels{/name}",
|
||||
"comments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/comments",
|
||||
"events_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/events",
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT/issues/12345",
|
||||
"id": 2000000001,
|
||||
"node_id": "I_kwDOJKSTjM5wXXXX",
|
||||
"number": 12345,
|
||||
"title": "Bug: Application crashes when processing large files",
|
||||
"user": {
|
||||
"login": "bug-reporter",
|
||||
"id": 11111111,
|
||||
"node_id": "MDQ6VXNlcjExMTExMTEx",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/11111111?v=4",
|
||||
"url": "https://api.github.com/users/bug-reporter",
|
||||
"html_url": "https://github.com/bug-reporter",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
},
|
||||
"labels": [
|
||||
{
|
||||
"id": 5272676214,
|
||||
"node_id": "LA_kwDOJKSTjM8AAAABOkandg",
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels/bug",
|
||||
"name": "bug",
|
||||
"color": "d73a4a",
|
||||
"default": true,
|
||||
"description": "Something isn't working"
|
||||
}
|
||||
],
|
||||
"state": "open",
|
||||
"locked": false,
|
||||
"assignee": null,
|
||||
"assignees": [],
|
||||
"milestone": null,
|
||||
"comments": 0,
|
||||
"created_at": "2024-12-01T16:00:00Z",
|
||||
"updated_at": "2024-12-01T16:00:00Z",
|
||||
"closed_at": null,
|
||||
"author_association": "NONE",
|
||||
"active_lock_reason": null,
|
||||
"body": "## Description\n\nWhen I try to process a file larger than 100MB, the application crashes with an out of memory error.\n\n## Steps to Reproduce\n\n1. Open the application\n2. Select a file larger than 100MB\n3. Click 'Process'\n4. Application crashes\n\n## Expected Behavior\n\nThe application should handle large files gracefully.\n\n## Environment\n\n- OS: Ubuntu 22.04\n- Python: 3.11\n- AutoGPT Version: 1.0.0",
|
||||
"reactions": {
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/reactions",
|
||||
"total_count": 0,
|
||||
"+1": 0,
|
||||
"-1": 0,
|
||||
"laugh": 0,
|
||||
"hooray": 0,
|
||||
"confused": 0,
|
||||
"heart": 0,
|
||||
"rocket": 0,
|
||||
"eyes": 0
|
||||
},
|
||||
"timeline_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/timeline",
|
||||
"state_reason": null
|
||||
},
|
||||
"repository": {
|
||||
"id": 614765452,
|
||||
"node_id": "R_kgDOJKSTjA",
|
||||
"name": "AutoGPT",
|
||||
"full_name": "Significant-Gravitas/AutoGPT",
|
||||
"private": false,
|
||||
"owner": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"url": "https://api.github.com/users/Significant-Gravitas",
|
||||
"html_url": "https://github.com/Significant-Gravitas",
|
||||
"type": "Organization",
|
||||
"site_admin": false
|
||||
},
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on.",
|
||||
"fork": false,
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT",
|
||||
"created_at": "2023-03-16T09:21:07Z",
|
||||
"updated_at": "2024-12-01T16:00:00Z",
|
||||
"pushed_at": "2024-12-01T12:00:00Z",
|
||||
"stargazers_count": 170000,
|
||||
"watchers_count": 170000,
|
||||
"language": "Python",
|
||||
"forks_count": 45000,
|
||||
"open_issues_count": 190,
|
||||
"visibility": "public",
|
||||
"default_branch": "master"
|
||||
},
|
||||
"organization": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"url": "https://api.github.com/orgs/Significant-Gravitas",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"description": ""
|
||||
},
|
||||
"sender": {
|
||||
"login": "bug-reporter",
|
||||
"id": 11111111,
|
||||
"node_id": "MDQ6VXNlcjExMTExMTEx",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/11111111?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/bug-reporter",
|
||||
"html_url": "https://github.com/bug-reporter",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,97 @@
|
||||
{
|
||||
"action": "published",
|
||||
"release": {
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/releases/123456789",
|
||||
"assets_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/releases/123456789/assets",
|
||||
"upload_url": "https://uploads.github.com/repos/Significant-Gravitas/AutoGPT/releases/123456789/assets{?name,label}",
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT/releases/tag/v1.0.0",
|
||||
"id": 123456789,
|
||||
"author": {
|
||||
"login": "ntindle",
|
||||
"id": 12345678,
|
||||
"node_id": "MDQ6VXNlcjEyMzQ1Njc4",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/12345678?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/ntindle",
|
||||
"html_url": "https://github.com/ntindle",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
},
|
||||
"node_id": "RE_kwDOJKSTjM4HWwAA",
|
||||
"tag_name": "v1.0.0",
|
||||
"target_commitish": "master",
|
||||
"name": "AutoGPT Platform v1.0.0",
|
||||
"draft": false,
|
||||
"prerelease": false,
|
||||
"created_at": "2024-12-01T10:00:00Z",
|
||||
"published_at": "2024-12-01T12:00:00Z",
|
||||
"assets": [
|
||||
{
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/releases/assets/987654321",
|
||||
"id": 987654321,
|
||||
"node_id": "RA_kwDOJKSTjM4HWwBB",
|
||||
"name": "autogpt-v1.0.0.zip",
|
||||
"label": "Release Package",
|
||||
"content_type": "application/zip",
|
||||
"state": "uploaded",
|
||||
"size": 52428800,
|
||||
"download_count": 0,
|
||||
"created_at": "2024-12-01T11:30:00Z",
|
||||
"updated_at": "2024-12-01T11:35:00Z",
|
||||
"browser_download_url": "https://github.com/Significant-Gravitas/AutoGPT/releases/download/v1.0.0/autogpt-v1.0.0.zip"
|
||||
}
|
||||
],
|
||||
"tarball_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/tarball/v1.0.0",
|
||||
"zipball_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/zipball/v1.0.0",
|
||||
"body": "## What's New\n\n- Feature 1: Amazing new capability\n- Feature 2: Performance improvements\n- Bug fixes and stability improvements\n\n## Breaking Changes\n\nNone\n\n## Contributors\n\nThanks to all our contributors!"
|
||||
},
|
||||
"repository": {
|
||||
"id": 614765452,
|
||||
"node_id": "R_kgDOJKSTjA",
|
||||
"name": "AutoGPT",
|
||||
"full_name": "Significant-Gravitas/AutoGPT",
|
||||
"private": false,
|
||||
"owner": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"url": "https://api.github.com/users/Significant-Gravitas",
|
||||
"html_url": "https://github.com/Significant-Gravitas",
|
||||
"type": "Organization",
|
||||
"site_admin": false
|
||||
},
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on.",
|
||||
"fork": false,
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT",
|
||||
"created_at": "2023-03-16T09:21:07Z",
|
||||
"updated_at": "2024-12-01T12:00:00Z",
|
||||
"pushed_at": "2024-12-01T12:00:00Z",
|
||||
"stargazers_count": 170000,
|
||||
"watchers_count": 170000,
|
||||
"language": "Python",
|
||||
"forks_count": 45000,
|
||||
"visibility": "public",
|
||||
"default_branch": "master"
|
||||
},
|
||||
"organization": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"url": "https://api.github.com/orgs/Significant-Gravitas",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"description": ""
|
||||
},
|
||||
"sender": {
|
||||
"login": "ntindle",
|
||||
"id": 12345678,
|
||||
"node_id": "MDQ6VXNlcjEyMzQ1Njc4",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/12345678?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/ntindle",
|
||||
"html_url": "https://github.com/ntindle",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,53 @@
|
||||
{
|
||||
"action": "created",
|
||||
"starred_at": "2024-12-01T15:30:00Z",
|
||||
"repository": {
|
||||
"id": 614765452,
|
||||
"node_id": "R_kgDOJKSTjA",
|
||||
"name": "AutoGPT",
|
||||
"full_name": "Significant-Gravitas/AutoGPT",
|
||||
"private": false,
|
||||
"owner": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"url": "https://api.github.com/users/Significant-Gravitas",
|
||||
"html_url": "https://github.com/Significant-Gravitas",
|
||||
"type": "Organization",
|
||||
"site_admin": false
|
||||
},
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on.",
|
||||
"fork": false,
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT",
|
||||
"created_at": "2023-03-16T09:21:07Z",
|
||||
"updated_at": "2024-12-01T15:30:00Z",
|
||||
"pushed_at": "2024-12-01T12:00:00Z",
|
||||
"stargazers_count": 170001,
|
||||
"watchers_count": 170001,
|
||||
"language": "Python",
|
||||
"forks_count": 45000,
|
||||
"visibility": "public",
|
||||
"default_branch": "master"
|
||||
},
|
||||
"organization": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"url": "https://api.github.com/orgs/Significant-Gravitas",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"description": ""
|
||||
},
|
||||
"sender": {
|
||||
"login": "awesome-contributor",
|
||||
"id": 98765432,
|
||||
"node_id": "MDQ6VXNlcjk4NzY1NDMy",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/98765432?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/awesome-contributor",
|
||||
"html_url": "https://github.com/awesome-contributor",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
}
|
||||
}
|
||||
@@ -159,3 +159,391 @@ class GithubPullRequestTriggerBlock(GitHubTriggerBase, Block):
|
||||
|
||||
|
||||
# --8<-- [end:GithubTriggerExample]
|
||||
|
||||
|
||||
class GithubStarTriggerBlock(GitHubTriggerBase, Block):
|
||||
"""Trigger block for GitHub star events - useful for milestone celebrations."""
|
||||
|
||||
EXAMPLE_PAYLOAD_FILE = (
|
||||
Path(__file__).parent / "example_payloads" / "star.created.json"
|
||||
)
|
||||
|
||||
class Input(GitHubTriggerBase.Input):
|
||||
class EventsFilter(BaseModel):
|
||||
"""
|
||||
https://docs.github.com/en/webhooks/webhook-events-and-payloads#star
|
||||
"""
|
||||
|
||||
created: bool = False
|
||||
deleted: bool = False
|
||||
|
||||
events: EventsFilter = SchemaField(
|
||||
title="Events", description="The star events to subscribe to"
|
||||
)
|
||||
|
||||
class Output(GitHubTriggerBase.Output):
|
||||
event: str = SchemaField(
|
||||
description="The star event that triggered the webhook ('created' or 'deleted')"
|
||||
)
|
||||
starred_at: str = SchemaField(
|
||||
description="ISO timestamp when the repo was starred (empty if deleted)"
|
||||
)
|
||||
stargazers_count: int = SchemaField(
|
||||
description="Current number of stars on the repository"
|
||||
)
|
||||
repository_name: str = SchemaField(
|
||||
description="Full name of the repository (owner/repo)"
|
||||
)
|
||||
repository_url: str = SchemaField(description="URL to the repository")
|
||||
|
||||
def __init__(self):
|
||||
from backend.integrations.webhooks.github import GithubWebhookType
|
||||
|
||||
example_payload = json.loads(
|
||||
self.EXAMPLE_PAYLOAD_FILE.read_text(encoding="utf-8")
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
id="551e0a35-100b-49b7-89b8-3031322239b6",
|
||||
description="This block triggers on GitHub star events. "
|
||||
"Useful for celebrating milestones (e.g., 1k, 10k stars) or tracking engagement.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.INPUT},
|
||||
input_schema=GithubStarTriggerBlock.Input,
|
||||
output_schema=GithubStarTriggerBlock.Output,
|
||||
webhook_config=BlockWebhookConfig(
|
||||
provider=ProviderName.GITHUB,
|
||||
webhook_type=GithubWebhookType.REPO,
|
||||
resource_format="{repo}",
|
||||
event_filter_input="events",
|
||||
event_format="star.{event}",
|
||||
),
|
||||
test_input={
|
||||
"repo": "Significant-Gravitas/AutoGPT",
|
||||
"events": {"created": True},
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"payload": example_payload,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("payload", example_payload),
|
||||
("triggered_by_user", example_payload["sender"]),
|
||||
("event", example_payload["action"]),
|
||||
("starred_at", example_payload.get("starred_at", "")),
|
||||
("stargazers_count", example_payload["repository"]["stargazers_count"]),
|
||||
("repository_name", example_payload["repository"]["full_name"]),
|
||||
("repository_url", example_payload["repository"]["html_url"]),
|
||||
],
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
|
||||
async for name, value in super().run(input_data, **kwargs):
|
||||
yield name, value
|
||||
yield "event", input_data.payload["action"]
|
||||
yield "starred_at", input_data.payload.get("starred_at", "")
|
||||
yield "stargazers_count", input_data.payload["repository"]["stargazers_count"]
|
||||
yield "repository_name", input_data.payload["repository"]["full_name"]
|
||||
yield "repository_url", input_data.payload["repository"]["html_url"]
|
||||
|
||||
|
||||
class GithubReleaseTriggerBlock(GitHubTriggerBase, Block):
|
||||
"""Trigger block for GitHub release events - ideal for announcing new versions."""
|
||||
|
||||
EXAMPLE_PAYLOAD_FILE = (
|
||||
Path(__file__).parent / "example_payloads" / "release.published.json"
|
||||
)
|
||||
|
||||
class Input(GitHubTriggerBase.Input):
|
||||
class EventsFilter(BaseModel):
|
||||
"""
|
||||
https://docs.github.com/en/webhooks/webhook-events-and-payloads#release
|
||||
"""
|
||||
|
||||
published: bool = False
|
||||
unpublished: bool = False
|
||||
created: bool = False
|
||||
edited: bool = False
|
||||
deleted: bool = False
|
||||
prereleased: bool = False
|
||||
released: bool = False
|
||||
|
||||
events: EventsFilter = SchemaField(
|
||||
title="Events", description="The release events to subscribe to"
|
||||
)
|
||||
|
||||
class Output(GitHubTriggerBase.Output):
|
||||
event: str = SchemaField(
|
||||
description="The release event that triggered the webhook (e.g., 'published')"
|
||||
)
|
||||
release: dict = SchemaField(description="The full release object")
|
||||
release_url: str = SchemaField(description="URL to the release page")
|
||||
tag_name: str = SchemaField(description="The release tag name (e.g., 'v1.0.0')")
|
||||
release_name: str = SchemaField(description="Human-readable release name")
|
||||
body: str = SchemaField(description="Release notes/description")
|
||||
prerelease: bool = SchemaField(description="Whether this is a prerelease")
|
||||
draft: bool = SchemaField(description="Whether this is a draft release")
|
||||
assets: list = SchemaField(description="List of release assets/files")
|
||||
|
||||
def __init__(self):
|
||||
from backend.integrations.webhooks.github import GithubWebhookType
|
||||
|
||||
example_payload = json.loads(
|
||||
self.EXAMPLE_PAYLOAD_FILE.read_text(encoding="utf-8")
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
id="2052dd1b-74e1-46ac-9c87-c7a0e057b60b",
|
||||
description="This block triggers on GitHub release events. "
|
||||
"Perfect for automating announcements to Discord, Twitter, or other platforms.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.INPUT},
|
||||
input_schema=GithubReleaseTriggerBlock.Input,
|
||||
output_schema=GithubReleaseTriggerBlock.Output,
|
||||
webhook_config=BlockWebhookConfig(
|
||||
provider=ProviderName.GITHUB,
|
||||
webhook_type=GithubWebhookType.REPO,
|
||||
resource_format="{repo}",
|
||||
event_filter_input="events",
|
||||
event_format="release.{event}",
|
||||
),
|
||||
test_input={
|
||||
"repo": "Significant-Gravitas/AutoGPT",
|
||||
"events": {"published": True},
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"payload": example_payload,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("payload", example_payload),
|
||||
("triggered_by_user", example_payload["sender"]),
|
||||
("event", example_payload["action"]),
|
||||
("release", example_payload["release"]),
|
||||
("release_url", example_payload["release"]["html_url"]),
|
||||
("tag_name", example_payload["release"]["tag_name"]),
|
||||
("release_name", example_payload["release"]["name"]),
|
||||
("body", example_payload["release"]["body"]),
|
||||
("prerelease", example_payload["release"]["prerelease"]),
|
||||
("draft", example_payload["release"]["draft"]),
|
||||
("assets", example_payload["release"]["assets"]),
|
||||
],
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
|
||||
async for name, value in super().run(input_data, **kwargs):
|
||||
yield name, value
|
||||
release = input_data.payload["release"]
|
||||
yield "event", input_data.payload["action"]
|
||||
yield "release", release
|
||||
yield "release_url", release["html_url"]
|
||||
yield "tag_name", release["tag_name"]
|
||||
yield "release_name", release.get("name", "")
|
||||
yield "body", release.get("body", "")
|
||||
yield "prerelease", release["prerelease"]
|
||||
yield "draft", release["draft"]
|
||||
yield "assets", release["assets"]
|
||||
|
||||
|
||||
class GithubIssuesTriggerBlock(GitHubTriggerBase, Block):
|
||||
"""Trigger block for GitHub issues events - great for triage and notifications."""
|
||||
|
||||
EXAMPLE_PAYLOAD_FILE = (
|
||||
Path(__file__).parent / "example_payloads" / "issues.opened.json"
|
||||
)
|
||||
|
||||
class Input(GitHubTriggerBase.Input):
|
||||
class EventsFilter(BaseModel):
|
||||
"""
|
||||
https://docs.github.com/en/webhooks/webhook-events-and-payloads#issues
|
||||
"""
|
||||
|
||||
opened: bool = False
|
||||
edited: bool = False
|
||||
deleted: bool = False
|
||||
closed: bool = False
|
||||
reopened: bool = False
|
||||
assigned: bool = False
|
||||
unassigned: bool = False
|
||||
labeled: bool = False
|
||||
unlabeled: bool = False
|
||||
locked: bool = False
|
||||
unlocked: bool = False
|
||||
transferred: bool = False
|
||||
milestoned: bool = False
|
||||
demilestoned: bool = False
|
||||
pinned: bool = False
|
||||
unpinned: bool = False
|
||||
|
||||
events: EventsFilter = SchemaField(
|
||||
title="Events", description="The issue events to subscribe to"
|
||||
)
|
||||
|
||||
class Output(GitHubTriggerBase.Output):
|
||||
event: str = SchemaField(
|
||||
description="The issue event that triggered the webhook (e.g., 'opened')"
|
||||
)
|
||||
number: int = SchemaField(description="The issue number")
|
||||
issue: dict = SchemaField(description="The full issue object")
|
||||
issue_url: str = SchemaField(description="URL to the issue")
|
||||
issue_title: str = SchemaField(description="The issue title")
|
||||
issue_body: str = SchemaField(description="The issue body/description")
|
||||
labels: list = SchemaField(description="List of labels on the issue")
|
||||
assignees: list = SchemaField(description="List of assignees")
|
||||
state: str = SchemaField(description="Issue state ('open' or 'closed')")
|
||||
|
||||
def __init__(self):
|
||||
from backend.integrations.webhooks.github import GithubWebhookType
|
||||
|
||||
example_payload = json.loads(
|
||||
self.EXAMPLE_PAYLOAD_FILE.read_text(encoding="utf-8")
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
id="b2605464-e486-4bf4-aad3-d8a213c8a48a",
|
||||
description="This block triggers on GitHub issues events. "
|
||||
"Useful for automated triage, notifications, and welcoming first-time contributors.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.INPUT},
|
||||
input_schema=GithubIssuesTriggerBlock.Input,
|
||||
output_schema=GithubIssuesTriggerBlock.Output,
|
||||
webhook_config=BlockWebhookConfig(
|
||||
provider=ProviderName.GITHUB,
|
||||
webhook_type=GithubWebhookType.REPO,
|
||||
resource_format="{repo}",
|
||||
event_filter_input="events",
|
||||
event_format="issues.{event}",
|
||||
),
|
||||
test_input={
|
||||
"repo": "Significant-Gravitas/AutoGPT",
|
||||
"events": {"opened": True},
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"payload": example_payload,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("payload", example_payload),
|
||||
("triggered_by_user", example_payload["sender"]),
|
||||
("event", example_payload["action"]),
|
||||
("number", example_payload["issue"]["number"]),
|
||||
("issue", example_payload["issue"]),
|
||||
("issue_url", example_payload["issue"]["html_url"]),
|
||||
("issue_title", example_payload["issue"]["title"]),
|
||||
("issue_body", example_payload["issue"]["body"]),
|
||||
("labels", example_payload["issue"]["labels"]),
|
||||
("assignees", example_payload["issue"]["assignees"]),
|
||||
("state", example_payload["issue"]["state"]),
|
||||
],
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
|
||||
async for name, value in super().run(input_data, **kwargs):
|
||||
yield name, value
|
||||
issue = input_data.payload["issue"]
|
||||
yield "event", input_data.payload["action"]
|
||||
yield "number", issue["number"]
|
||||
yield "issue", issue
|
||||
yield "issue_url", issue["html_url"]
|
||||
yield "issue_title", issue["title"]
|
||||
yield "issue_body", issue.get("body") or ""
|
||||
yield "labels", issue["labels"]
|
||||
yield "assignees", issue["assignees"]
|
||||
yield "state", issue["state"]
|
||||
|
||||
|
||||
class GithubDiscussionTriggerBlock(GitHubTriggerBase, Block):
|
||||
"""Trigger block for GitHub discussion events - perfect for community Q&A sync."""
|
||||
|
||||
EXAMPLE_PAYLOAD_FILE = (
|
||||
Path(__file__).parent / "example_payloads" / "discussion.created.json"
|
||||
)
|
||||
|
||||
class Input(GitHubTriggerBase.Input):
|
||||
class EventsFilter(BaseModel):
|
||||
"""
|
||||
https://docs.github.com/en/webhooks/webhook-events-and-payloads#discussion
|
||||
"""
|
||||
|
||||
created: bool = False
|
||||
edited: bool = False
|
||||
deleted: bool = False
|
||||
answered: bool = False
|
||||
unanswered: bool = False
|
||||
labeled: bool = False
|
||||
unlabeled: bool = False
|
||||
locked: bool = False
|
||||
unlocked: bool = False
|
||||
category_changed: bool = False
|
||||
transferred: bool = False
|
||||
pinned: bool = False
|
||||
unpinned: bool = False
|
||||
|
||||
events: EventsFilter = SchemaField(
|
||||
title="Events", description="The discussion events to subscribe to"
|
||||
)
|
||||
|
||||
class Output(GitHubTriggerBase.Output):
|
||||
event: str = SchemaField(
|
||||
description="The discussion event that triggered the webhook"
|
||||
)
|
||||
number: int = SchemaField(description="The discussion number")
|
||||
discussion: dict = SchemaField(description="The full discussion object")
|
||||
discussion_url: str = SchemaField(description="URL to the discussion")
|
||||
title: str = SchemaField(description="The discussion title")
|
||||
body: str = SchemaField(description="The discussion body")
|
||||
category: dict = SchemaField(description="The discussion category object")
|
||||
category_name: str = SchemaField(description="Name of the category")
|
||||
state: str = SchemaField(description="Discussion state")
|
||||
|
||||
def __init__(self):
|
||||
from backend.integrations.webhooks.github import GithubWebhookType
|
||||
|
||||
example_payload = json.loads(
|
||||
self.EXAMPLE_PAYLOAD_FILE.read_text(encoding="utf-8")
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
id="87f847b3-d81a-424e-8e89-acadb5c9d52b",
|
||||
description="This block triggers on GitHub Discussions events. "
|
||||
"Great for syncing Q&A to Discord or auto-responding to common questions. "
|
||||
"Note: Discussions must be enabled on the repository.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.INPUT},
|
||||
input_schema=GithubDiscussionTriggerBlock.Input,
|
||||
output_schema=GithubDiscussionTriggerBlock.Output,
|
||||
webhook_config=BlockWebhookConfig(
|
||||
provider=ProviderName.GITHUB,
|
||||
webhook_type=GithubWebhookType.REPO,
|
||||
resource_format="{repo}",
|
||||
event_filter_input="events",
|
||||
event_format="discussion.{event}",
|
||||
),
|
||||
test_input={
|
||||
"repo": "Significant-Gravitas/AutoGPT",
|
||||
"events": {"created": True},
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"payload": example_payload,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("payload", example_payload),
|
||||
("triggered_by_user", example_payload["sender"]),
|
||||
("event", example_payload["action"]),
|
||||
("number", example_payload["discussion"]["number"]),
|
||||
("discussion", example_payload["discussion"]),
|
||||
("discussion_url", example_payload["discussion"]["html_url"]),
|
||||
("title", example_payload["discussion"]["title"]),
|
||||
("body", example_payload["discussion"]["body"]),
|
||||
("category", example_payload["discussion"]["category"]),
|
||||
("category_name", example_payload["discussion"]["category"]["name"]),
|
||||
("state", example_payload["discussion"]["state"]),
|
||||
],
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
|
||||
async for name, value in super().run(input_data, **kwargs):
|
||||
yield name, value
|
||||
discussion = input_data.payload["discussion"]
|
||||
yield "event", input_data.payload["action"]
|
||||
yield "number", discussion["number"]
|
||||
yield "discussion", discussion
|
||||
yield "discussion_url", discussion["html_url"]
|
||||
yield "title", discussion["title"]
|
||||
yield "body", discussion.get("body") or ""
|
||||
yield "category", discussion["category"]
|
||||
yield "category_name", discussion["category"]["name"]
|
||||
yield "state", discussion["state"]
|
||||
|
||||
155
autogpt_platform/backend/backend/blocks/google/_drive.py
Normal file
155
autogpt_platform/backend/backend/blocks/google/_drive.py
Normal file
@@ -0,0 +1,155 @@
|
||||
from typing import Any, Literal, Optional
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
AttachmentView = Literal[
|
||||
"DOCS",
|
||||
"DOCUMENTS",
|
||||
"SPREADSHEETS",
|
||||
"PRESENTATIONS",
|
||||
"DOCS_IMAGES",
|
||||
"FOLDERS",
|
||||
]
|
||||
ATTACHMENT_VIEWS: tuple[AttachmentView, ...] = (
|
||||
"DOCS",
|
||||
"DOCUMENTS",
|
||||
"SPREADSHEETS",
|
||||
"PRESENTATIONS",
|
||||
"DOCS_IMAGES",
|
||||
"FOLDERS",
|
||||
)
|
||||
|
||||
|
||||
class _GoogleDriveFileBase(BaseModel):
|
||||
"""Internal base class for Google Drive file representation."""
|
||||
|
||||
model_config = ConfigDict(populate_by_name=True)
|
||||
|
||||
id: str = Field(description="Google Drive file/folder ID")
|
||||
name: Optional[str] = Field(None, description="File/folder name")
|
||||
mime_type: Optional[str] = Field(
|
||||
None,
|
||||
alias="mimeType",
|
||||
description="MIME type (e.g., application/vnd.google-apps.document)",
|
||||
)
|
||||
url: Optional[str] = Field(None, description="URL to open the file")
|
||||
icon_url: Optional[str] = Field(None, alias="iconUrl", description="Icon URL")
|
||||
is_folder: Optional[bool] = Field(
|
||||
None, alias="isFolder", description="Whether this is a folder"
|
||||
)
|
||||
|
||||
|
||||
class GoogleDriveFile(_GoogleDriveFileBase):
|
||||
"""
|
||||
Represents a Google Drive file/folder with optional credentials for chaining.
|
||||
|
||||
Used for both inputs and outputs in Google Drive blocks. The `_credentials_id`
|
||||
field enables chaining between blocks - when one block outputs a file, the
|
||||
next block can use the same credentials to access it.
|
||||
|
||||
When used with GoogleDriveFileField(), the frontend renders a combined
|
||||
auth + file picker UI that automatically populates `_credentials_id`.
|
||||
"""
|
||||
|
||||
# Hidden field for credential ID - populated by frontend, preserved in outputs
|
||||
credentials_id: Optional[str] = Field(
|
||||
None,
|
||||
alias="_credentials_id",
|
||||
description="Internal: credential ID for authentication",
|
||||
)
|
||||
|
||||
|
||||
def GoogleDriveFileField(
|
||||
*,
|
||||
title: str,
|
||||
description: str | None = None,
|
||||
credentials_kwarg: str = "credentials",
|
||||
credentials_scopes: list[str] | None = None,
|
||||
allowed_views: list[AttachmentView] | None = None,
|
||||
allowed_mime_types: list[str] | None = None,
|
||||
placeholder: str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""
|
||||
Creates a Google Drive file input field with auto-generated credentials.
|
||||
|
||||
This field type produces a single UI element that handles both:
|
||||
1. Google OAuth authentication
|
||||
2. File selection via Google Drive Picker
|
||||
|
||||
The system automatically generates a credentials field, and the credentials
|
||||
are passed to the run() method using the specified kwarg name.
|
||||
|
||||
Args:
|
||||
title: Field title shown in UI
|
||||
description: Field description/help text
|
||||
credentials_kwarg: Name of the kwarg that will receive GoogleCredentials
|
||||
in the run() method (default: "credentials")
|
||||
credentials_scopes: OAuth scopes required (default: drive.file)
|
||||
allowed_views: List of view types to show in picker (default: ["DOCS"])
|
||||
allowed_mime_types: Filter by MIME types
|
||||
placeholder: Placeholder text for the button
|
||||
**kwargs: Additional SchemaField arguments
|
||||
|
||||
Returns:
|
||||
Field definition that produces GoogleDriveFile
|
||||
|
||||
Example:
|
||||
>>> class MyBlock(Block):
|
||||
... class Input(BlockSchemaInput):
|
||||
... spreadsheet: GoogleDriveFile = GoogleDriveFileField(
|
||||
... title="Select Spreadsheet",
|
||||
... credentials_kwarg="creds",
|
||||
... allowed_views=["SPREADSHEETS"],
|
||||
... )
|
||||
...
|
||||
... async def run(
|
||||
... self, input_data: Input, *, creds: GoogleCredentials, **kwargs
|
||||
... ):
|
||||
... # creds is automatically populated
|
||||
... file = input_data.spreadsheet
|
||||
"""
|
||||
|
||||
# Determine scopes - drive.file is sufficient for picker-selected files
|
||||
scopes = credentials_scopes or ["https://www.googleapis.com/auth/drive.file"]
|
||||
|
||||
# Build picker configuration with auto_credentials embedded
|
||||
picker_config = {
|
||||
"multiselect": False,
|
||||
"allow_folder_selection": False,
|
||||
"allowed_views": list(allowed_views) if allowed_views else ["DOCS"],
|
||||
"scopes": scopes,
|
||||
# Auto-credentials config tells frontend to include _credentials_id in output
|
||||
"auto_credentials": {
|
||||
"provider": "google",
|
||||
"type": "oauth2",
|
||||
"scopes": scopes,
|
||||
"kwarg_name": credentials_kwarg,
|
||||
},
|
||||
}
|
||||
|
||||
if allowed_mime_types:
|
||||
picker_config["allowed_mime_types"] = list(allowed_mime_types)
|
||||
|
||||
return SchemaField(
|
||||
default=None,
|
||||
title=title,
|
||||
description=description,
|
||||
placeholder=placeholder or "Select from Google Drive",
|
||||
# Use google-drive-picker format so frontend renders existing component
|
||||
format="google-drive-picker",
|
||||
advanced=False,
|
||||
json_schema_extra={
|
||||
"google_drive_picker_config": picker_config,
|
||||
# Also keep auto_credentials at top level for backend detection
|
||||
"auto_credentials": {
|
||||
"provider": "google",
|
||||
"type": "oauth2",
|
||||
"scopes": scopes,
|
||||
"kwarg_name": credentials_kwarg,
|
||||
},
|
||||
**kwargs,
|
||||
},
|
||||
)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -184,7 +184,13 @@ class SendWebRequestBlock(Block):
|
||||
)
|
||||
|
||||
# ─── Execute request ─────────────────────────────────────────
|
||||
response = await Requests().request(
|
||||
# Use raise_for_status=False so HTTP errors (4xx, 5xx) are returned
|
||||
# as response objects instead of raising exceptions, allowing proper
|
||||
# handling via client_error and server_error outputs
|
||||
response = await Requests(
|
||||
raise_for_status=False,
|
||||
retry_max_attempts=1, # allow callers to handle HTTP errors immediately
|
||||
).request(
|
||||
input_data.method.value,
|
||||
input_data.url,
|
||||
headers=input_data.headers,
|
||||
|
||||
166
autogpt_platform/backend/backend/blocks/human_in_the_loop.py
Normal file
166
autogpt_platform/backend/backend/blocks/human_in_the_loop.py
Normal file
@@ -0,0 +1,166 @@
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from prisma.enums import ReviewStatus
|
||||
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
BlockType,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext, ExecutionStatus
|
||||
from backend.data.human_review import ReviewResult
|
||||
from backend.data.model import SchemaField
|
||||
from backend.executor.manager import async_update_node_execution_status
|
||||
from backend.util.clients import get_database_manager_async_client
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class HumanInTheLoopBlock(Block):
|
||||
"""
|
||||
This block pauses execution and waits for human approval or modification of the data.
|
||||
|
||||
When executed, it creates a pending review entry and sets the node execution status
|
||||
to REVIEW. The execution will remain paused until a human user either:
|
||||
- Approves the data (with or without modifications)
|
||||
- Rejects the data
|
||||
|
||||
This is useful for workflows that require human validation or intervention before
|
||||
proceeding to the next steps.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
data: Any = SchemaField(description="The data to be reviewed by a human user")
|
||||
name: str = SchemaField(
|
||||
description="A descriptive name for what this data represents",
|
||||
)
|
||||
editable: bool = SchemaField(
|
||||
description="Whether the human reviewer can edit the data",
|
||||
default=True,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
approved_data: Any = SchemaField(
|
||||
description="The data when approved (may be modified by reviewer)"
|
||||
)
|
||||
rejected_data: Any = SchemaField(
|
||||
description="The data when rejected (may be modified by reviewer)"
|
||||
)
|
||||
review_message: str = SchemaField(
|
||||
description="Any message provided by the reviewer", default=""
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8b2a7b3c-6e9d-4a5f-8c1b-2e3f4a5b6c7d",
|
||||
description="Pause execution and wait for human approval or modification of data",
|
||||
categories={BlockCategory.BASIC},
|
||||
input_schema=HumanInTheLoopBlock.Input,
|
||||
output_schema=HumanInTheLoopBlock.Output,
|
||||
block_type=BlockType.HUMAN_IN_THE_LOOP,
|
||||
test_input={
|
||||
"data": {"name": "John Doe", "age": 30},
|
||||
"name": "User profile data",
|
||||
"editable": True,
|
||||
},
|
||||
test_output=[
|
||||
("approved_data", {"name": "John Doe", "age": 30}),
|
||||
],
|
||||
test_mock={
|
||||
"get_or_create_human_review": lambda *_args, **_kwargs: ReviewResult(
|
||||
data={"name": "John Doe", "age": 30},
|
||||
status=ReviewStatus.APPROVED,
|
||||
message="",
|
||||
processed=False,
|
||||
node_exec_id="test-node-exec-id",
|
||||
),
|
||||
"update_node_execution_status": lambda *_args, **_kwargs: None,
|
||||
"update_review_processed_status": lambda *_args, **_kwargs: None,
|
||||
},
|
||||
)
|
||||
|
||||
async def get_or_create_human_review(self, **kwargs):
|
||||
return await get_database_manager_async_client().get_or_create_human_review(
|
||||
**kwargs
|
||||
)
|
||||
|
||||
async def update_node_execution_status(self, **kwargs):
|
||||
return await async_update_node_execution_status(
|
||||
db_client=get_database_manager_async_client(), **kwargs
|
||||
)
|
||||
|
||||
async def update_review_processed_status(self, node_exec_id: str, processed: bool):
|
||||
return await get_database_manager_async_client().update_review_processed_status(
|
||||
node_exec_id, processed
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
user_id: str,
|
||||
node_exec_id: str,
|
||||
graph_exec_id: str,
|
||||
graph_id: str,
|
||||
graph_version: int,
|
||||
execution_context: ExecutionContext,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
if not execution_context.safe_mode:
|
||||
logger.info(
|
||||
f"HITL block skipping review for node {node_exec_id} - safe mode disabled"
|
||||
)
|
||||
yield "approved_data", input_data.data
|
||||
yield "review_message", "Auto-approved (safe mode disabled)"
|
||||
return
|
||||
|
||||
try:
|
||||
result = await self.get_or_create_human_review(
|
||||
user_id=user_id,
|
||||
node_exec_id=node_exec_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
graph_id=graph_id,
|
||||
graph_version=graph_version,
|
||||
input_data=input_data.data,
|
||||
message=input_data.name,
|
||||
editable=input_data.editable,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in HITL block for node {node_exec_id}: {str(e)}")
|
||||
raise
|
||||
|
||||
if result is None:
|
||||
logger.info(
|
||||
f"HITL block pausing execution for node {node_exec_id} - awaiting human review"
|
||||
)
|
||||
try:
|
||||
await self.update_node_execution_status(
|
||||
exec_id=node_exec_id,
|
||||
status=ExecutionStatus.REVIEW,
|
||||
)
|
||||
return
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to update node status for HITL block {node_exec_id}: {str(e)}"
|
||||
)
|
||||
raise
|
||||
|
||||
if not result.processed:
|
||||
await self.update_review_processed_status(
|
||||
node_exec_id=node_exec_id, processed=True
|
||||
)
|
||||
|
||||
if result.status == ReviewStatus.APPROVED:
|
||||
yield "approved_data", result.data
|
||||
if result.message:
|
||||
yield "review_message", result.message
|
||||
|
||||
elif result.status == ReviewStatus.REJECTED:
|
||||
yield "rejected_data", result.data
|
||||
if result.message:
|
||||
yield "review_message", result.message
|
||||
@@ -2,6 +2,8 @@ import copy
|
||||
from datetime import date, time
|
||||
from typing import Any, Optional
|
||||
|
||||
# Import for Google Drive file input block
|
||||
from backend.blocks.google._drive import AttachmentView, GoogleDriveFile
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
@@ -646,6 +648,119 @@ class AgentTableInputBlock(AgentInputBlock):
|
||||
yield "result", input_data.value if input_data.value is not None else []
|
||||
|
||||
|
||||
class AgentGoogleDriveFileInputBlock(AgentInputBlock):
|
||||
"""
|
||||
This block allows users to select a file from Google Drive.
|
||||
|
||||
It provides a Google Drive file picker UI that handles both authentication
|
||||
and file selection. The selected file information (ID, name, URL, etc.)
|
||||
is output for use by other blocks like Google Sheets Read.
|
||||
"""
|
||||
|
||||
class Input(AgentInputBlock.Input):
|
||||
value: Optional[GoogleDriveFile] = SchemaField(
|
||||
description="The selected Google Drive file.",
|
||||
default=None,
|
||||
advanced=False,
|
||||
title="Selected File",
|
||||
)
|
||||
allowed_views: list[AttachmentView] = SchemaField(
|
||||
description="Which views to show in the file picker (DOCS, SPREADSHEETS, PRESENTATIONS, etc.).",
|
||||
default_factory=lambda: ["DOCS", "SPREADSHEETS", "PRESENTATIONS"],
|
||||
advanced=False,
|
||||
title="Allowed Views",
|
||||
)
|
||||
allow_folder_selection: bool = SchemaField(
|
||||
description="Whether to allow selecting folders.",
|
||||
default=False,
|
||||
advanced=True,
|
||||
title="Allow Folder Selection",
|
||||
)
|
||||
|
||||
def generate_schema(self):
|
||||
"""Generate schema for the value field with Google Drive picker format."""
|
||||
schema = super().generate_schema()
|
||||
|
||||
# Default scopes for drive.file access
|
||||
scopes = ["https://www.googleapis.com/auth/drive.file"]
|
||||
|
||||
# Build picker configuration
|
||||
picker_config = {
|
||||
"multiselect": False, # Single file selection only for now
|
||||
"allow_folder_selection": self.allow_folder_selection,
|
||||
"allowed_views": (
|
||||
list(self.allowed_views) if self.allowed_views else ["DOCS"]
|
||||
),
|
||||
"scopes": scopes,
|
||||
# Auto-credentials config tells frontend to include _credentials_id in output
|
||||
"auto_credentials": {
|
||||
"provider": "google",
|
||||
"type": "oauth2",
|
||||
"scopes": scopes,
|
||||
"kwarg_name": "credentials",
|
||||
},
|
||||
}
|
||||
|
||||
# Set format and config for frontend to render Google Drive picker
|
||||
schema["format"] = "google-drive-picker"
|
||||
schema["google_drive_picker_config"] = picker_config
|
||||
# Also keep auto_credentials at top level for backend detection
|
||||
schema["auto_credentials"] = {
|
||||
"provider": "google",
|
||||
"type": "oauth2",
|
||||
"scopes": scopes,
|
||||
"kwarg_name": "credentials",
|
||||
}
|
||||
|
||||
if self.value is not None:
|
||||
schema["default"] = self.value.model_dump()
|
||||
|
||||
return schema
|
||||
|
||||
class Output(AgentInputBlock.Output):
|
||||
result: GoogleDriveFile = SchemaField(
|
||||
description="The selected Google Drive file with ID, name, URL, and other metadata."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
test_file = GoogleDriveFile.model_validate(
|
||||
{
|
||||
"id": "test-file-id",
|
||||
"name": "Test Spreadsheet",
|
||||
"mimeType": "application/vnd.google-apps.spreadsheet",
|
||||
"url": "https://docs.google.com/spreadsheets/d/test-file-id",
|
||||
}
|
||||
)
|
||||
super().__init__(
|
||||
id="d3b32f15-6fd7-40e3-be52-e083f51b19a2",
|
||||
description="Block for selecting a file from Google Drive.",
|
||||
disabled=not config.enable_agent_input_subtype_blocks,
|
||||
input_schema=AgentGoogleDriveFileInputBlock.Input,
|
||||
output_schema=AgentGoogleDriveFileInputBlock.Output,
|
||||
test_input=[
|
||||
{
|
||||
"name": "spreadsheet_input",
|
||||
"description": "Select a spreadsheet from Google Drive",
|
||||
"allowed_views": ["SPREADSHEETS"],
|
||||
"value": {
|
||||
"id": "test-file-id",
|
||||
"name": "Test Spreadsheet",
|
||||
"mimeType": "application/vnd.google-apps.spreadsheet",
|
||||
"url": "https://docs.google.com/spreadsheets/d/test-file-id",
|
||||
},
|
||||
}
|
||||
],
|
||||
test_output=[("result", test_file)],
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, *args, **kwargs) -> BlockOutput:
|
||||
"""
|
||||
Yields the selected Google Drive file.
|
||||
"""
|
||||
if input_data.value is not None:
|
||||
yield "result", input_data.value
|
||||
|
||||
|
||||
IO_BLOCK_IDs = [
|
||||
AgentInputBlock().id,
|
||||
AgentOutputBlock().id,
|
||||
@@ -658,4 +773,5 @@ IO_BLOCK_IDs = [
|
||||
AgentDropdownInputBlock().id,
|
||||
AgentToggleInputBlock().id,
|
||||
AgentTableInputBlock().id,
|
||||
AgentGoogleDriveFileInputBlock().id,
|
||||
]
|
||||
|
||||
@@ -265,3 +265,68 @@ class LinearClient:
|
||||
return [Issue(**issue) for issue in issues["searchIssues"]["nodes"]]
|
||||
except LinearAPIException as e:
|
||||
raise e
|
||||
|
||||
async def try_get_issues(
|
||||
self, project: str, status: str, is_assigned: bool, include_comments: bool
|
||||
) -> list[Issue]:
|
||||
try:
|
||||
query = """
|
||||
query IssuesByProjectStatusAndAssignee(
|
||||
$projectName: String!
|
||||
$statusName: String!
|
||||
$isAssigned: Boolean!
|
||||
$includeComments: Boolean! = false
|
||||
) {
|
||||
issues(
|
||||
filter: {
|
||||
project: { name: { eq: $projectName } }
|
||||
state: { name: { eq: $statusName } }
|
||||
assignee: { null: $isAssigned }
|
||||
}
|
||||
) {
|
||||
nodes {
|
||||
id
|
||||
title
|
||||
identifier
|
||||
description
|
||||
createdAt
|
||||
priority
|
||||
assignee {
|
||||
id
|
||||
name
|
||||
}
|
||||
project {
|
||||
id
|
||||
name
|
||||
}
|
||||
state {
|
||||
id
|
||||
name
|
||||
}
|
||||
comments @include(if: $includeComments) {
|
||||
nodes {
|
||||
id
|
||||
body
|
||||
createdAt
|
||||
user {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
variables: dict[str, Any] = {
|
||||
"projectName": project,
|
||||
"statusName": status,
|
||||
"isAssigned": not is_assigned,
|
||||
"includeComments": include_comments,
|
||||
}
|
||||
|
||||
issues = await self.query(query, variables)
|
||||
return [Issue(**issue) for issue in issues["issues"]["nodes"]]
|
||||
except LinearAPIException as e:
|
||||
raise e
|
||||
|
||||
@@ -203,3 +203,106 @@ class LinearSearchIssuesBlock(Block):
|
||||
yield "error", str(e)
|
||||
except Exception as e:
|
||||
yield "error", f"Unexpected error: {str(e)}"
|
||||
|
||||
|
||||
class LinearGetProjectIssuesBlock(Block):
|
||||
"""Block for getting issues from a Linear project filtered by status and assignee"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = linear.credentials_field(
|
||||
description="Linear credentials with read permissions",
|
||||
required_scopes={LinearScope.READ},
|
||||
)
|
||||
project: str = SchemaField(description="Name of the project to get issues from")
|
||||
status: str = SchemaField(
|
||||
description="Status/state name to filter issues by (e.g., 'In Progress', 'Done')"
|
||||
)
|
||||
is_assigned: bool = SchemaField(
|
||||
description="Filter by assignee status - True to get assigned issues, False to get unassigned issues",
|
||||
default=False,
|
||||
)
|
||||
include_comments: bool = SchemaField(
|
||||
description="Whether to include comments in the response",
|
||||
default=False,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
issues: list[Issue] = SchemaField(
|
||||
description="List of issues matching the criteria"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="c7d3f1e8-45a9-4b2c-9f81-3e6a8d7c5b1a",
|
||||
description="Gets issues from a Linear project filtered by status and assignee",
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
categories={BlockCategory.PRODUCTIVITY, BlockCategory.ISSUE_TRACKING},
|
||||
test_input={
|
||||
"project": "Test Project",
|
||||
"status": "In Progress",
|
||||
"is_assigned": False,
|
||||
"include_comments": False,
|
||||
"credentials": TEST_CREDENTIALS_INPUT_OAUTH,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS_OAUTH,
|
||||
test_output=[
|
||||
(
|
||||
"issues",
|
||||
[
|
||||
Issue(
|
||||
id="abc123",
|
||||
identifier="TST-123",
|
||||
title="Test issue",
|
||||
description="Test description",
|
||||
priority=1,
|
||||
)
|
||||
],
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"get_project_issues": lambda *args, **kwargs: [
|
||||
Issue(
|
||||
id="abc123",
|
||||
identifier="TST-123",
|
||||
title="Test issue",
|
||||
description="Test description",
|
||||
priority=1,
|
||||
)
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
async def get_project_issues(
|
||||
credentials: OAuth2Credentials | APIKeyCredentials,
|
||||
project: str,
|
||||
status: str,
|
||||
is_assigned: bool,
|
||||
include_comments: bool,
|
||||
) -> list[Issue]:
|
||||
client = LinearClient(credentials=credentials)
|
||||
response: list[Issue] = await client.try_get_issues(
|
||||
project=project,
|
||||
status=status,
|
||||
is_assigned=is_assigned,
|
||||
include_comments=include_comments,
|
||||
)
|
||||
return response
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: OAuth2Credentials | APIKeyCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
"""Execute getting project issues"""
|
||||
issues = await self.get_project_issues(
|
||||
credentials=credentials,
|
||||
project=input_data.project,
|
||||
status=input_data.status,
|
||||
is_assigned=input_data.is_assigned,
|
||||
include_comments=input_data.include_comments,
|
||||
)
|
||||
yield "issues", issues
|
||||
|
||||
@@ -1,9 +1,16 @@
|
||||
from backend.sdk import BaseModel
|
||||
|
||||
|
||||
class User(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
|
||||
|
||||
class Comment(BaseModel):
|
||||
id: str
|
||||
body: str
|
||||
createdAt: str | None = None
|
||||
user: User | None = None
|
||||
|
||||
|
||||
class CreateCommentInput(BaseModel):
|
||||
@@ -20,22 +27,26 @@ class CreateCommentResponseWrapper(BaseModel):
|
||||
commentCreate: CreateCommentResponse
|
||||
|
||||
|
||||
class Project(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
description: str | None = None
|
||||
priority: int | None = None
|
||||
progress: float | None = None
|
||||
content: str | None = None
|
||||
|
||||
|
||||
class Issue(BaseModel):
|
||||
id: str
|
||||
identifier: str
|
||||
title: str
|
||||
description: str | None
|
||||
priority: int
|
||||
project: Project | None = None
|
||||
createdAt: str | None = None
|
||||
comments: list[Comment] | None = None
|
||||
assignee: User | None = None
|
||||
|
||||
|
||||
class CreateIssueResponse(BaseModel):
|
||||
issue: Issue
|
||||
|
||||
|
||||
class Project(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
priority: int
|
||||
progress: float
|
||||
content: str | None
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
# This file contains a lot of prompt block strings that would trigger "line too long"
|
||||
# flake8: noqa: E501
|
||||
import ast
|
||||
import logging
|
||||
import re
|
||||
import secrets
|
||||
@@ -94,6 +93,7 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
O1_MINI = "o1-mini"
|
||||
# GPT-5 models
|
||||
GPT5 = "gpt-5-2025-08-07"
|
||||
GPT5_1 = "gpt-5.1-2025-11-13"
|
||||
GPT5_MINI = "gpt-5-mini-2025-08-07"
|
||||
GPT5_NANO = "gpt-5-nano-2025-08-07"
|
||||
GPT5_CHAT = "gpt-5-chat-latest"
|
||||
@@ -107,6 +107,7 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
CLAUDE_4_1_OPUS = "claude-opus-4-1-20250805"
|
||||
CLAUDE_4_OPUS = "claude-opus-4-20250514"
|
||||
CLAUDE_4_SONNET = "claude-sonnet-4-20250514"
|
||||
CLAUDE_4_5_OPUS = "claude-opus-4-5-20251101"
|
||||
CLAUDE_4_5_SONNET = "claude-sonnet-4-5-20250929"
|
||||
CLAUDE_4_5_HAIKU = "claude-haiku-4-5-20251001"
|
||||
CLAUDE_3_7_SONNET = "claude-3-7-sonnet-20250219"
|
||||
@@ -130,6 +131,7 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
OPENAI_GPT_OSS_120B = "openai/gpt-oss-120b"
|
||||
OPENAI_GPT_OSS_20B = "openai/gpt-oss-20b"
|
||||
GEMINI_2_5_PRO = "google/gemini-2.5-pro-preview-03-25"
|
||||
GEMINI_3_PRO_PREVIEW = "google/gemini-3-pro-preview"
|
||||
GEMINI_2_5_FLASH = "google/gemini-2.5-flash"
|
||||
GEMINI_2_0_FLASH = "google/gemini-2.0-flash-001"
|
||||
GEMINI_2_5_FLASH_LITE_PREVIEW = "google/gemini-2.5-flash-lite-preview-06-17"
|
||||
@@ -152,6 +154,9 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
META_LLAMA_4_SCOUT = "meta-llama/llama-4-scout"
|
||||
META_LLAMA_4_MAVERICK = "meta-llama/llama-4-maverick"
|
||||
GROK_4 = "x-ai/grok-4"
|
||||
GROK_4_FAST = "x-ai/grok-4-fast"
|
||||
GROK_4_1_FAST = "x-ai/grok-4.1-fast"
|
||||
GROK_CODE_FAST_1 = "x-ai/grok-code-fast-1"
|
||||
KIMI_K2 = "moonshotai/kimi-k2"
|
||||
QWEN3_235B_A22B_THINKING = "qwen/qwen3-235b-a22b-thinking-2507"
|
||||
QWEN3_CODER = "qwen/qwen3-coder"
|
||||
@@ -190,6 +195,7 @@ MODEL_METADATA = {
|
||||
LlmModel.O1_MINI: ModelMetadata("openai", 128000, 65536), # o1-mini-2024-09-12
|
||||
# GPT-5 models
|
||||
LlmModel.GPT5: ModelMetadata("openai", 400000, 128000),
|
||||
LlmModel.GPT5_1: ModelMetadata("openai", 400000, 128000),
|
||||
LlmModel.GPT5_MINI: ModelMetadata("openai", 400000, 128000),
|
||||
LlmModel.GPT5_NANO: ModelMetadata("openai", 400000, 128000),
|
||||
LlmModel.GPT5_CHAT: ModelMetadata("openai", 400000, 16384),
|
||||
@@ -213,6 +219,9 @@ MODEL_METADATA = {
|
||||
LlmModel.CLAUDE_4_SONNET: ModelMetadata(
|
||||
"anthropic", 200000, 64000
|
||||
), # claude-4-sonnet-20250514
|
||||
LlmModel.CLAUDE_4_5_OPUS: ModelMetadata(
|
||||
"anthropic", 200000, 64000
|
||||
), # claude-opus-4-5-20251101
|
||||
LlmModel.CLAUDE_4_5_SONNET: ModelMetadata(
|
||||
"anthropic", 200000, 64000
|
||||
), # claude-sonnet-4-5-20250929
|
||||
@@ -242,6 +251,7 @@ MODEL_METADATA = {
|
||||
LlmModel.OLLAMA_DOLPHIN: ModelMetadata("ollama", 32768, None),
|
||||
# https://openrouter.ai/models
|
||||
LlmModel.GEMINI_2_5_PRO: ModelMetadata("open_router", 1050000, 8192),
|
||||
LlmModel.GEMINI_3_PRO_PREVIEW: ModelMetadata("open_router", 1048576, 65535),
|
||||
LlmModel.GEMINI_2_5_FLASH: ModelMetadata("open_router", 1048576, 65535),
|
||||
LlmModel.GEMINI_2_0_FLASH: ModelMetadata("open_router", 1048576, 8192),
|
||||
LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: ModelMetadata(
|
||||
@@ -253,12 +263,12 @@ MODEL_METADATA = {
|
||||
LlmModel.COHERE_COMMAND_R_PLUS_08_2024: ModelMetadata("open_router", 128000, 4096),
|
||||
LlmModel.DEEPSEEK_CHAT: ModelMetadata("open_router", 64000, 2048),
|
||||
LlmModel.DEEPSEEK_R1_0528: ModelMetadata("open_router", 163840, 163840),
|
||||
LlmModel.PERPLEXITY_SONAR: ModelMetadata("open_router", 127000, 127000),
|
||||
LlmModel.PERPLEXITY_SONAR: ModelMetadata("open_router", 127000, 8000),
|
||||
LlmModel.PERPLEXITY_SONAR_PRO: ModelMetadata("open_router", 200000, 8000),
|
||||
LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: ModelMetadata(
|
||||
"open_router",
|
||||
128000,
|
||||
128000,
|
||||
16000,
|
||||
),
|
||||
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B: ModelMetadata(
|
||||
"open_router", 131000, 4096
|
||||
@@ -276,6 +286,9 @@ MODEL_METADATA = {
|
||||
LlmModel.META_LLAMA_4_SCOUT: ModelMetadata("open_router", 131072, 131072),
|
||||
LlmModel.META_LLAMA_4_MAVERICK: ModelMetadata("open_router", 1048576, 1000000),
|
||||
LlmModel.GROK_4: ModelMetadata("open_router", 256000, 256000),
|
||||
LlmModel.GROK_4_FAST: ModelMetadata("open_router", 2000000, 30000),
|
||||
LlmModel.GROK_4_1_FAST: ModelMetadata("open_router", 2000000, 30000),
|
||||
LlmModel.GROK_CODE_FAST_1: ModelMetadata("open_router", 256000, 10000),
|
||||
LlmModel.KIMI_K2: ModelMetadata("open_router", 131000, 131000),
|
||||
LlmModel.QWEN3_235B_A22B_THINKING: ModelMetadata("open_router", 262144, 262144),
|
||||
LlmModel.QWEN3_CODER: ModelMetadata("open_router", 262144, 262144),
|
||||
@@ -798,7 +811,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
default="",
|
||||
description="The system prompt to provide additional context to the model.",
|
||||
)
|
||||
conversation_history: list[dict] = SchemaField(
|
||||
conversation_history: list[dict] | None = SchemaField(
|
||||
default_factory=list,
|
||||
description="The conversation history to provide context for the prompt.",
|
||||
)
|
||||
@@ -905,7 +918,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
logger.debug(f"Calling LLM with input data: {input_data}")
|
||||
prompt = [json.to_dict(p) for p in input_data.conversation_history]
|
||||
prompt = [json.to_dict(p) for p in input_data.conversation_history or [] if p]
|
||||
|
||||
values = input_data.prompt_values
|
||||
if values:
|
||||
@@ -1633,6 +1646,17 @@ class AIListGeneratorBlock(AIBlockBase):
|
||||
ge=1,
|
||||
le=5,
|
||||
)
|
||||
force_json_output: bool = SchemaField(
|
||||
title="Restrict LLM to pure JSON output",
|
||||
default=False,
|
||||
description=(
|
||||
"Whether to force the LLM to produce a JSON-only response. "
|
||||
"This can increase the block's reliability, "
|
||||
"but may also reduce the quality of the response "
|
||||
"because it prohibits the LLM from reasoning "
|
||||
"before providing its JSON response."
|
||||
),
|
||||
)
|
||||
max_tokens: int | None = SchemaField(
|
||||
advanced=True,
|
||||
default=None,
|
||||
@@ -1645,7 +1669,7 @@ class AIListGeneratorBlock(AIBlockBase):
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
generated_list: List[str] = SchemaField(description="The generated list.")
|
||||
generated_list: list[str] = SchemaField(description="The generated list.")
|
||||
list_item: str = SchemaField(
|
||||
description="Each individual item in the list.",
|
||||
)
|
||||
@@ -1654,7 +1678,7 @@ class AIListGeneratorBlock(AIBlockBase):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="9c0b0450-d199-458b-a731-072189dd6593",
|
||||
description="Generate a Python list based on the given prompt using a Large Language Model (LLM).",
|
||||
description="Generate a list of values based on the given prompt using a Large Language Model (LLM).",
|
||||
categories={BlockCategory.AI, BlockCategory.TEXT},
|
||||
input_schema=AIListGeneratorBlock.Input,
|
||||
output_schema=AIListGeneratorBlock.Output,
|
||||
@@ -1671,6 +1695,7 @@ class AIListGeneratorBlock(AIBlockBase):
|
||||
"model": LlmModel.GPT4O,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"max_retries": 3,
|
||||
"force_json_output": False,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
@@ -1687,7 +1712,13 @@ class AIListGeneratorBlock(AIBlockBase):
|
||||
],
|
||||
test_mock={
|
||||
"llm_call": lambda input_data, credentials: {
|
||||
"response": "['Zylora Prime', 'Kharon-9', 'Vortexia', 'Oceara', 'Draknos']"
|
||||
"list": [
|
||||
"Zylora Prime",
|
||||
"Kharon-9",
|
||||
"Vortexia",
|
||||
"Oceara",
|
||||
"Draknos",
|
||||
]
|
||||
},
|
||||
},
|
||||
)
|
||||
@@ -1696,7 +1727,7 @@ class AIListGeneratorBlock(AIBlockBase):
|
||||
self,
|
||||
input_data: AIStructuredResponseGeneratorBlock.Input,
|
||||
credentials: APIKeyCredentials,
|
||||
) -> dict[str, str]:
|
||||
) -> dict[str, Any]:
|
||||
llm_block = AIStructuredResponseGeneratorBlock()
|
||||
response = await llm_block.run_once(
|
||||
input_data, "response", credentials=credentials
|
||||
@@ -1704,72 +1735,23 @@ class AIListGeneratorBlock(AIBlockBase):
|
||||
self.merge_llm_stats(llm_block)
|
||||
return response
|
||||
|
||||
@staticmethod
|
||||
def string_to_list(string):
|
||||
"""
|
||||
Converts a string representation of a list into an actual Python list object.
|
||||
"""
|
||||
logger.debug(f"Converting string to list. Input string: {string}")
|
||||
try:
|
||||
# Use ast.literal_eval to safely evaluate the string
|
||||
python_list = ast.literal_eval(string)
|
||||
if isinstance(python_list, list):
|
||||
logger.debug(f"Successfully converted string to list: {python_list}")
|
||||
return python_list
|
||||
else:
|
||||
logger.error(f"The provided string '{string}' is not a valid list")
|
||||
raise ValueError(f"The provided string '{string}' is not a valid list.")
|
||||
except (SyntaxError, ValueError) as e:
|
||||
logger.error(f"Failed to convert string to list: {e}")
|
||||
raise ValueError("Invalid list format. Could not convert to list.")
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
logger.debug(f"Starting AIListGeneratorBlock.run with input data: {input_data}")
|
||||
|
||||
# Check for API key
|
||||
api_key_check = credentials.api_key.get_secret_value()
|
||||
if not api_key_check:
|
||||
raise ValueError("No LLM API key provided.")
|
||||
# Create a proper expected format for the structured response generator
|
||||
expected_format = {
|
||||
"list": "A JSON array containing the generated string values"
|
||||
}
|
||||
if input_data.force_json_output:
|
||||
# Add reasoning field for better performance
|
||||
expected_format = {
|
||||
"reasoning": "... (optional)",
|
||||
**expected_format,
|
||||
}
|
||||
|
||||
# Prepare the system prompt
|
||||
sys_prompt = """You are a Python list generator. Your task is to generate a Python list based on the user's prompt.
|
||||
|Respond ONLY with a valid python list.
|
||||
|The list can contain strings, numbers, or nested lists as appropriate.
|
||||
|Do not include any explanations or additional text.
|
||||
|
||||
|Valid Example string formats:
|
||||
|
||||
|Example 1:
|
||||
|```
|
||||
|['1', '2', '3', '4']
|
||||
|```
|
||||
|
||||
|Example 2:
|
||||
|```
|
||||
|[['1', '2'], ['3', '4'], ['5', '6']]
|
||||
|```
|
||||
|
||||
|Example 3:
|
||||
|```
|
||||
|['1', ['2', '3'], ['4', ['5', '6']]]
|
||||
|```
|
||||
|
||||
|Example 4:
|
||||
|```
|
||||
|['a', 'b', 'c']
|
||||
|```
|
||||
|
||||
|Example 5:
|
||||
|```
|
||||
|['1', '2.5', 'string', 'True', ['False', 'None']]
|
||||
|```
|
||||
|
||||
|Do not include any explanations or additional text, just respond with the list in the format specified above.
|
||||
|Do not include code fences or any other formatting, just the raw list.
|
||||
"""
|
||||
# If a focus is provided, add it to the prompt
|
||||
# Build the prompt
|
||||
if input_data.focus:
|
||||
prompt = f"Generate a list with the following focus:\n<focus>\n\n{input_data.focus}</focus>"
|
||||
else:
|
||||
@@ -1777,7 +1759,7 @@ class AIListGeneratorBlock(AIBlockBase):
|
||||
if input_data.source_data:
|
||||
prompt = "Extract the main focus of the source data to a list.\ni.e if the source data is a news website, the focus would be the news stories rather than the social links in the footer."
|
||||
else:
|
||||
# No focus or source data provided, generat a random list
|
||||
# No focus or source data provided, generate a random list
|
||||
prompt = "Generate a random list."
|
||||
|
||||
# If the source data is provided, add it to the prompt
|
||||
@@ -1787,63 +1769,56 @@ class AIListGeneratorBlock(AIBlockBase):
|
||||
else:
|
||||
prompt += "\n\nInvent the data to generate the list from."
|
||||
|
||||
for attempt in range(input_data.max_retries):
|
||||
try:
|
||||
logger.debug("Calling LLM")
|
||||
llm_response = await self.llm_call(
|
||||
AIStructuredResponseGeneratorBlock.Input(
|
||||
sys_prompt=sys_prompt,
|
||||
prompt=prompt,
|
||||
credentials=input_data.credentials,
|
||||
model=input_data.model,
|
||||
expected_format={}, # Do not use structured response
|
||||
ollama_host=input_data.ollama_host,
|
||||
),
|
||||
credentials=credentials,
|
||||
)
|
||||
# Use the structured response generator to handle all the complexity
|
||||
response_obj = await self.llm_call(
|
||||
AIStructuredResponseGeneratorBlock.Input(
|
||||
sys_prompt=self.SYSTEM_PROMPT,
|
||||
prompt=prompt,
|
||||
credentials=input_data.credentials,
|
||||
model=input_data.model,
|
||||
expected_format=expected_format,
|
||||
force_json_output=input_data.force_json_output,
|
||||
retry=input_data.max_retries,
|
||||
max_tokens=input_data.max_tokens,
|
||||
ollama_host=input_data.ollama_host,
|
||||
),
|
||||
credentials=credentials,
|
||||
)
|
||||
logger.debug(f"Response object: {response_obj}")
|
||||
|
||||
logger.debug(f"LLM response: {llm_response}")
|
||||
# Extract the list from the response object
|
||||
if isinstance(response_obj, dict) and "list" in response_obj:
|
||||
parsed_list = response_obj["list"]
|
||||
else:
|
||||
# Fallback - treat the whole response as the list
|
||||
parsed_list = response_obj
|
||||
|
||||
# Extract Response string
|
||||
response_string = llm_response["response"]
|
||||
logger.debug(f"Response string: {response_string}")
|
||||
# Validate that we got a list
|
||||
if not isinstance(parsed_list, list):
|
||||
raise ValueError(
|
||||
f"Expected a list, but got {type(parsed_list).__name__}: {parsed_list}"
|
||||
)
|
||||
|
||||
# Convert the string to a Python list
|
||||
logger.debug("Converting string to Python list")
|
||||
parsed_list = self.string_to_list(response_string)
|
||||
logger.debug(f"Parsed list: {parsed_list}")
|
||||
logger.debug(f"Parsed list: {parsed_list}")
|
||||
|
||||
# If we reach here, we have a valid Python list
|
||||
logger.debug("Successfully generated a valid Python list")
|
||||
yield "generated_list", parsed_list
|
||||
yield "prompt", self.prompt
|
||||
# Yield the results
|
||||
yield "generated_list", parsed_list
|
||||
yield "prompt", self.prompt
|
||||
|
||||
# Yield each item in the list
|
||||
for item in parsed_list:
|
||||
yield "list_item", item
|
||||
return
|
||||
# Yield each item in the list
|
||||
for item in parsed_list:
|
||||
yield "list_item", item
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in attempt {attempt + 1}: {str(e)}")
|
||||
if attempt == input_data.max_retries - 1:
|
||||
logger.error(
|
||||
f"Failed to generate a valid Python list after {input_data.max_retries} attempts"
|
||||
)
|
||||
raise RuntimeError(
|
||||
f"Failed to generate a valid Python list after {input_data.max_retries} attempts. Last error: {str(e)}"
|
||||
)
|
||||
else:
|
||||
# Add a retry prompt
|
||||
logger.debug("Preparing retry prompt")
|
||||
prompt = f"""
|
||||
The previous attempt failed due to `{e}`
|
||||
Generate a valid Python list based on the original prompt.
|
||||
Remember to respond ONLY with a valid Python list as per the format specified earlier.
|
||||
Original prompt:
|
||||
```{prompt}```
|
||||
|
||||
Respond only with the list in the format specified with no commentary or apologies.
|
||||
"""
|
||||
logger.debug(f"Retry prompt: {prompt}")
|
||||
|
||||
logger.debug("AIListGeneratorBlock.run completed")
|
||||
SYSTEM_PROMPT = trim_prompt(
|
||||
"""
|
||||
|You are a JSON array generator. Your task is to generate a JSON array of string values based on the user's prompt.
|
||||
|
|
||||
|The 'list' field should contain a JSON array with the generated string values.
|
||||
|The array can contain ONLY strings.
|
||||
|
|
||||
|Valid JSON array formats include:
|
||||
|• ["string1", "string2", "string3"]
|
||||
|
|
||||
|Ensure you provide a proper JSON array with only string values in the 'list' field.
|
||||
"""
|
||||
)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from typing import Iterator, Literal
|
||||
|
||||
@@ -64,6 +65,7 @@ class RedditComment(BaseModel):
|
||||
|
||||
|
||||
settings = Settings()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_praw(creds: RedditCredentials) -> praw.Reddit:
|
||||
@@ -77,7 +79,7 @@ def get_praw(creds: RedditCredentials) -> praw.Reddit:
|
||||
me = client.user.me()
|
||||
if not me:
|
||||
raise ValueError("Invalid Reddit credentials.")
|
||||
print(f"Logged in as Reddit user: {me.name}")
|
||||
logger.info(f"Logged in as Reddit user: {me.name}")
|
||||
return client
|
||||
|
||||
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
import logging
|
||||
import re
|
||||
from collections import Counter
|
||||
from concurrent.futures import Future
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
import backend.blocks.llm as llm
|
||||
from backend.blocks.agent import AgentExecutorBlock
|
||||
from backend.data.block import (
|
||||
@@ -18,17 +21,43 @@ from backend.data.dynamic_fields import (
|
||||
extract_base_field_name,
|
||||
get_dynamic_field_description,
|
||||
is_dynamic_field,
|
||||
is_tool_pin,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import NodeExecutionStats, SchemaField
|
||||
from backend.util import json
|
||||
from backend.util.clients import get_database_manager_async_client
|
||||
from backend.util.prompt import MAIN_OBJECTIVE_PREFIX
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.data.graph import Link, Node
|
||||
from backend.executor.manager import ExecutionProcessor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ToolInfo(BaseModel):
|
||||
"""Processed tool call information."""
|
||||
|
||||
tool_call: Any # The original tool call object from LLM response
|
||||
tool_name: str # The function name
|
||||
tool_def: dict[str, Any] # The tool definition from tool_functions
|
||||
input_data: dict[str, Any] # Processed input data ready for tool execution
|
||||
field_mapping: dict[str, str] # Field name mapping for the tool
|
||||
|
||||
|
||||
class ExecutionParams(BaseModel):
|
||||
"""Tool execution parameters."""
|
||||
|
||||
user_id: str
|
||||
graph_id: str
|
||||
node_id: str
|
||||
graph_version: int
|
||||
graph_exec_id: str
|
||||
node_exec_id: str
|
||||
execution_context: "ExecutionContext"
|
||||
|
||||
|
||||
def _get_tool_requests(entry: dict[str, Any]) -> list[str]:
|
||||
"""
|
||||
Return a list of tool_call_ids if the entry is a tool request.
|
||||
@@ -104,6 +133,50 @@ def _create_tool_response(call_id: str, output: Any) -> dict[str, Any]:
|
||||
return {"role": "tool", "tool_call_id": call_id, "content": content}
|
||||
|
||||
|
||||
def _combine_tool_responses(tool_outputs: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
||||
"""
|
||||
Combine multiple Anthropic tool responses into a single user message.
|
||||
For non-Anthropic formats, returns the original list unchanged.
|
||||
"""
|
||||
if len(tool_outputs) <= 1:
|
||||
return tool_outputs
|
||||
|
||||
# Anthropic responses have role="user", type="message", and content is a list with tool_result items
|
||||
anthropic_responses = [
|
||||
output
|
||||
for output in tool_outputs
|
||||
if (
|
||||
output.get("role") == "user"
|
||||
and output.get("type") == "message"
|
||||
and isinstance(output.get("content"), list)
|
||||
and any(
|
||||
item.get("type") == "tool_result"
|
||||
for item in output.get("content", [])
|
||||
if isinstance(item, dict)
|
||||
)
|
||||
)
|
||||
]
|
||||
|
||||
if len(anthropic_responses) > 1:
|
||||
combined_content = [
|
||||
item for response in anthropic_responses for item in response["content"]
|
||||
]
|
||||
|
||||
combined_response = {
|
||||
"role": "user",
|
||||
"type": "message",
|
||||
"content": combined_content,
|
||||
}
|
||||
|
||||
non_anthropic_responses = [
|
||||
output for output in tool_outputs if output not in anthropic_responses
|
||||
]
|
||||
|
||||
return [combined_response] + non_anthropic_responses
|
||||
|
||||
return tool_outputs
|
||||
|
||||
|
||||
def _convert_raw_response_to_dict(raw_response: Any) -> dict[str, Any]:
|
||||
"""
|
||||
Safely convert raw_response to dictionary format for conversation history.
|
||||
@@ -120,13 +193,16 @@ def _convert_raw_response_to_dict(raw_response: Any) -> dict[str, Any]:
|
||||
return json.to_dict(raw_response)
|
||||
|
||||
|
||||
def get_pending_tool_calls(conversation_history: list[Any]) -> dict[str, int]:
|
||||
def get_pending_tool_calls(conversation_history: list[Any] | None) -> dict[str, int]:
|
||||
"""
|
||||
All the tool calls entry in the conversation history requires a response.
|
||||
This function returns the pending tool calls that has not generated an output yet.
|
||||
|
||||
Return: dict[str, int] - A dictionary of pending tool call IDs with their count.
|
||||
"""
|
||||
if not conversation_history:
|
||||
return {}
|
||||
|
||||
pending_calls = Counter()
|
||||
for history in conversation_history:
|
||||
for call_id in _get_tool_requests(history):
|
||||
@@ -172,7 +248,7 @@ class SmartDecisionMakerBlock(Block):
|
||||
"Function parameters that has no default value and not optional typed has to be provided. ",
|
||||
description="The system prompt to provide additional context to the model.",
|
||||
)
|
||||
conversation_history: list[dict] = SchemaField(
|
||||
conversation_history: list[dict] | None = SchemaField(
|
||||
default_factory=list,
|
||||
description="The conversation history to provide context for the prompt.",
|
||||
)
|
||||
@@ -200,6 +276,17 @@ class SmartDecisionMakerBlock(Block):
|
||||
default="localhost:11434",
|
||||
description="Ollama host for local models",
|
||||
)
|
||||
agent_mode_max_iterations: int = SchemaField(
|
||||
title="Agent Mode Max Iterations",
|
||||
description="Maximum iterations for agent mode. 0 = traditional mode (single LLM call, yield tool calls for external execution), -1 = infinite agent mode (loop until finished), 1+ = agent mode with max iterations limit.",
|
||||
advanced=True,
|
||||
default=0,
|
||||
)
|
||||
conversation_compaction: bool = SchemaField(
|
||||
default=True,
|
||||
title="Context window auto-compaction",
|
||||
description="Automatically compact the context window once it hits the limit",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_missing_links(cls, data: BlockInput, links: list["Link"]) -> set[str]:
|
||||
@@ -367,8 +454,9 @@ class SmartDecisionMakerBlock(Block):
|
||||
"required": sorted(required_fields),
|
||||
}
|
||||
|
||||
# Store field mapping for later use in output processing
|
||||
# Store field mapping and node info for later use in output processing
|
||||
tool_function["_field_mapping"] = field_mapping
|
||||
tool_function["_sink_node_id"] = sink_node.id
|
||||
|
||||
return {"type": "function", "function": tool_function}
|
||||
|
||||
@@ -431,10 +519,13 @@ class SmartDecisionMakerBlock(Block):
|
||||
"strict": True,
|
||||
}
|
||||
|
||||
# Store node info for later use in output processing
|
||||
tool_function["_sink_node_id"] = sink_node.id
|
||||
|
||||
return {"type": "function", "function": tool_function}
|
||||
|
||||
@staticmethod
|
||||
async def _create_function_signature(
|
||||
async def _create_tool_node_signatures(
|
||||
node_id: str,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""
|
||||
@@ -450,7 +541,7 @@ class SmartDecisionMakerBlock(Block):
|
||||
tools = [
|
||||
(link, node)
|
||||
for link, node in await db_client.get_connected_output_nodes(node_id)
|
||||
if link.source_name.startswith("tools_^_") and link.source_id == node_id
|
||||
if is_tool_pin(link.source_name) and link.source_id == node_id
|
||||
]
|
||||
if not tools:
|
||||
raise ValueError("There is no next node to execute.")
|
||||
@@ -498,6 +589,7 @@ class SmartDecisionMakerBlock(Block):
|
||||
Returns the response if successful, raises ValueError if validation fails.
|
||||
"""
|
||||
resp = await llm.llm_call(
|
||||
compress_prompt_to_fit=input_data.conversation_compaction,
|
||||
credentials=credentials,
|
||||
llm_model=input_data.model,
|
||||
prompt=current_prompt,
|
||||
@@ -538,8 +630,14 @@ class SmartDecisionMakerBlock(Block):
|
||||
),
|
||||
None,
|
||||
)
|
||||
if tool_def is None and len(tool_functions) == 1:
|
||||
tool_def = tool_functions[0]
|
||||
if tool_def is None:
|
||||
if len(tool_functions) == 1:
|
||||
tool_def = tool_functions[0]
|
||||
else:
|
||||
validation_errors_list.append(
|
||||
f"Tool call for '{tool_name}' does not match any known "
|
||||
"tool definition."
|
||||
)
|
||||
|
||||
# Get parameters schema from tool definition
|
||||
if (
|
||||
@@ -579,6 +677,291 @@ class SmartDecisionMakerBlock(Block):
|
||||
|
||||
return resp
|
||||
|
||||
def _process_tool_calls(
|
||||
self, response, tool_functions: list[dict[str, Any]]
|
||||
) -> list[ToolInfo]:
|
||||
"""Process tool calls and extract tool definitions, arguments, and input data.
|
||||
|
||||
Returns a list of tool info dicts with:
|
||||
- tool_call: The original tool call object
|
||||
- tool_name: The function name
|
||||
- tool_def: The tool definition from tool_functions
|
||||
- input_data: Processed input data dict (includes None values)
|
||||
- field_mapping: Field name mapping for the tool
|
||||
"""
|
||||
if not response.tool_calls:
|
||||
return []
|
||||
|
||||
processed_tools = []
|
||||
for tool_call in response.tool_calls:
|
||||
tool_name = tool_call.function.name
|
||||
tool_args = json.loads(tool_call.function.arguments)
|
||||
|
||||
tool_def = next(
|
||||
(
|
||||
tool
|
||||
for tool in tool_functions
|
||||
if tool["function"]["name"] == tool_name
|
||||
),
|
||||
None,
|
||||
)
|
||||
if not tool_def:
|
||||
if len(tool_functions) == 1:
|
||||
tool_def = tool_functions[0]
|
||||
else:
|
||||
continue
|
||||
|
||||
# Build input data for the tool
|
||||
input_data = {}
|
||||
field_mapping = tool_def["function"].get("_field_mapping", {})
|
||||
if "function" in tool_def and "parameters" in tool_def["function"]:
|
||||
expected_args = tool_def["function"]["parameters"].get("properties", {})
|
||||
for clean_arg_name in expected_args:
|
||||
original_field_name = field_mapping.get(
|
||||
clean_arg_name, clean_arg_name
|
||||
)
|
||||
arg_value = tool_args.get(clean_arg_name)
|
||||
# Include all expected parameters, even if None (for backward compatibility with tests)
|
||||
input_data[original_field_name] = arg_value
|
||||
|
||||
processed_tools.append(
|
||||
ToolInfo(
|
||||
tool_call=tool_call,
|
||||
tool_name=tool_name,
|
||||
tool_def=tool_def,
|
||||
input_data=input_data,
|
||||
field_mapping=field_mapping,
|
||||
)
|
||||
)
|
||||
|
||||
return processed_tools
|
||||
|
||||
def _update_conversation(
|
||||
self, prompt: list[dict], response, tool_outputs: list | None = None
|
||||
):
|
||||
"""Update conversation history with response and tool outputs."""
|
||||
# Don't add separate reasoning message with tool calls (breaks Anthropic's tool_use->tool_result pairing)
|
||||
assistant_message = _convert_raw_response_to_dict(response.raw_response)
|
||||
has_tool_calls = isinstance(assistant_message.get("content"), list) and any(
|
||||
item.get("type") == "tool_use"
|
||||
for item in assistant_message.get("content", [])
|
||||
)
|
||||
|
||||
if response.reasoning and not has_tool_calls:
|
||||
prompt.append(
|
||||
{"role": "assistant", "content": f"[Reasoning]: {response.reasoning}"}
|
||||
)
|
||||
|
||||
prompt.append(assistant_message)
|
||||
|
||||
if tool_outputs:
|
||||
prompt.extend(tool_outputs)
|
||||
|
||||
async def _execute_single_tool_with_manager(
|
||||
self,
|
||||
tool_info: ToolInfo,
|
||||
execution_params: ExecutionParams,
|
||||
execution_processor: "ExecutionProcessor",
|
||||
) -> dict:
|
||||
"""Execute a single tool using the execution manager for proper integration."""
|
||||
# Lazy imports to avoid circular dependencies
|
||||
from backend.data.execution import NodeExecutionEntry
|
||||
|
||||
tool_call = tool_info.tool_call
|
||||
tool_def = tool_info.tool_def
|
||||
raw_input_data = tool_info.input_data
|
||||
|
||||
# Get sink node and field mapping
|
||||
sink_node_id = tool_def["function"]["_sink_node_id"]
|
||||
|
||||
# Use proper database operations for tool execution
|
||||
db_client = get_database_manager_async_client()
|
||||
|
||||
# Get target node
|
||||
target_node = await db_client.get_node(sink_node_id)
|
||||
if not target_node:
|
||||
raise ValueError(f"Target node {sink_node_id} not found")
|
||||
|
||||
# Create proper node execution using upsert_execution_input
|
||||
node_exec_result = None
|
||||
final_input_data = None
|
||||
|
||||
# Add all inputs to the execution
|
||||
if not raw_input_data:
|
||||
raise ValueError(f"Tool call has no input data: {tool_call}")
|
||||
|
||||
for input_name, input_value in raw_input_data.items():
|
||||
node_exec_result, final_input_data = await db_client.upsert_execution_input(
|
||||
node_id=sink_node_id,
|
||||
graph_exec_id=execution_params.graph_exec_id,
|
||||
input_name=input_name,
|
||||
input_data=input_value,
|
||||
)
|
||||
|
||||
assert node_exec_result is not None, "node_exec_result should not be None"
|
||||
|
||||
# Create NodeExecutionEntry for execution manager
|
||||
node_exec_entry = NodeExecutionEntry(
|
||||
user_id=execution_params.user_id,
|
||||
graph_exec_id=execution_params.graph_exec_id,
|
||||
graph_id=execution_params.graph_id,
|
||||
graph_version=execution_params.graph_version,
|
||||
node_exec_id=node_exec_result.node_exec_id,
|
||||
node_id=sink_node_id,
|
||||
block_id=target_node.block_id,
|
||||
inputs=final_input_data or {},
|
||||
execution_context=execution_params.execution_context,
|
||||
)
|
||||
|
||||
# Use the execution manager to execute the tool node
|
||||
try:
|
||||
# Get NodeExecutionProgress from the execution manager's running nodes
|
||||
node_exec_progress = execution_processor.running_node_execution[
|
||||
sink_node_id
|
||||
]
|
||||
|
||||
# Use the execution manager's own graph stats
|
||||
graph_stats_pair = (
|
||||
execution_processor.execution_stats,
|
||||
execution_processor.execution_stats_lock,
|
||||
)
|
||||
|
||||
# Create a completed future for the task tracking system
|
||||
node_exec_future = Future()
|
||||
node_exec_progress.add_task(
|
||||
node_exec_id=node_exec_result.node_exec_id,
|
||||
task=node_exec_future,
|
||||
)
|
||||
|
||||
# Execute the node directly since we're in the SmartDecisionMaker context
|
||||
node_exec_future.set_result(
|
||||
await execution_processor.on_node_execution(
|
||||
node_exec=node_exec_entry,
|
||||
node_exec_progress=node_exec_progress,
|
||||
nodes_input_masks=None,
|
||||
graph_stats_pair=graph_stats_pair,
|
||||
)
|
||||
)
|
||||
|
||||
# Get outputs from database after execution completes using database manager client
|
||||
node_outputs = await db_client.get_execution_outputs_by_node_exec_id(
|
||||
node_exec_result.node_exec_id
|
||||
)
|
||||
|
||||
# Create tool response
|
||||
tool_response_content = (
|
||||
json.dumps(node_outputs)
|
||||
if node_outputs
|
||||
else "Tool executed successfully"
|
||||
)
|
||||
return _create_tool_response(tool_call.id, tool_response_content)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Tool execution with manager failed: {e}")
|
||||
# Return error response
|
||||
return _create_tool_response(
|
||||
tool_call.id, f"Tool execution failed: {str(e)}"
|
||||
)
|
||||
|
||||
async def _execute_tools_agent_mode(
|
||||
self,
|
||||
input_data,
|
||||
credentials,
|
||||
tool_functions: list[dict[str, Any]],
|
||||
prompt: list[dict],
|
||||
graph_exec_id: str,
|
||||
node_id: str,
|
||||
node_exec_id: str,
|
||||
user_id: str,
|
||||
graph_id: str,
|
||||
graph_version: int,
|
||||
execution_context: ExecutionContext,
|
||||
execution_processor: "ExecutionProcessor",
|
||||
):
|
||||
"""Execute tools in agent mode with a loop until finished."""
|
||||
max_iterations = input_data.agent_mode_max_iterations
|
||||
iteration = 0
|
||||
|
||||
# Execution parameters for tool execution
|
||||
execution_params = ExecutionParams(
|
||||
user_id=user_id,
|
||||
graph_id=graph_id,
|
||||
node_id=node_id,
|
||||
graph_version=graph_version,
|
||||
graph_exec_id=graph_exec_id,
|
||||
node_exec_id=node_exec_id,
|
||||
execution_context=execution_context,
|
||||
)
|
||||
|
||||
current_prompt = list(prompt)
|
||||
|
||||
while max_iterations < 0 or iteration < max_iterations:
|
||||
iteration += 1
|
||||
logger.debug(f"Agent mode iteration {iteration}")
|
||||
|
||||
# Prepare prompt for this iteration
|
||||
iteration_prompt = list(current_prompt)
|
||||
|
||||
# On the last iteration, add a special system message to encourage completion
|
||||
if max_iterations > 0 and iteration == max_iterations:
|
||||
last_iteration_message = {
|
||||
"role": "system",
|
||||
"content": f"{MAIN_OBJECTIVE_PREFIX}This is your last iteration ({iteration}/{max_iterations}). "
|
||||
"Try to complete the task with the information you have. If you cannot fully complete it, "
|
||||
"provide a summary of what you've accomplished and what remains to be done. "
|
||||
"Prefer finishing with a clear response rather than making additional tool calls.",
|
||||
}
|
||||
iteration_prompt.append(last_iteration_message)
|
||||
|
||||
# Get LLM response
|
||||
try:
|
||||
response = await self._attempt_llm_call_with_validation(
|
||||
credentials, input_data, iteration_prompt, tool_functions
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"LLM call failed in agent mode iteration {iteration}: {str(e)}"
|
||||
return
|
||||
|
||||
# Process tool calls
|
||||
processed_tools = self._process_tool_calls(response, tool_functions)
|
||||
|
||||
# If no tool calls, we're done
|
||||
if not processed_tools:
|
||||
yield "finished", response.response
|
||||
self._update_conversation(current_prompt, response)
|
||||
yield "conversations", current_prompt
|
||||
return
|
||||
|
||||
# Execute tools and collect responses
|
||||
tool_outputs = []
|
||||
for tool_info in processed_tools:
|
||||
try:
|
||||
tool_response = await self._execute_single_tool_with_manager(
|
||||
tool_info, execution_params, execution_processor
|
||||
)
|
||||
tool_outputs.append(tool_response)
|
||||
except Exception as e:
|
||||
logger.error(f"Tool execution failed: {e}")
|
||||
# Create error response for the tool
|
||||
error_response = _create_tool_response(
|
||||
tool_info.tool_call.id, f"Error: {str(e)}"
|
||||
)
|
||||
tool_outputs.append(error_response)
|
||||
|
||||
tool_outputs = _combine_tool_responses(tool_outputs)
|
||||
|
||||
self._update_conversation(current_prompt, response, tool_outputs)
|
||||
|
||||
# Yield intermediate conversation state
|
||||
yield "conversations", current_prompt
|
||||
|
||||
# If we reach max iterations, yield the current state
|
||||
if max_iterations < 0:
|
||||
yield "finished", f"Agent mode completed after {iteration} iterations"
|
||||
else:
|
||||
yield "finished", f"Agent mode completed after {max_iterations} iterations (limit reached)"
|
||||
yield "conversations", current_prompt
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
@@ -589,15 +972,19 @@ class SmartDecisionMakerBlock(Block):
|
||||
graph_exec_id: str,
|
||||
node_exec_id: str,
|
||||
user_id: str,
|
||||
graph_version: int,
|
||||
execution_context: ExecutionContext,
|
||||
execution_processor: "ExecutionProcessor",
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
tool_functions = await self._create_function_signature(node_id)
|
||||
|
||||
tool_functions = await self._create_tool_node_signatures(node_id)
|
||||
yield "tool_functions", json.dumps(tool_functions)
|
||||
|
||||
input_data.conversation_history = input_data.conversation_history or []
|
||||
prompt = [json.to_dict(p) for p in input_data.conversation_history if p]
|
||||
conversation_history = input_data.conversation_history or []
|
||||
prompt = [json.to_dict(p) for p in conversation_history if p]
|
||||
|
||||
pending_tool_calls = get_pending_tool_calls(input_data.conversation_history)
|
||||
pending_tool_calls = get_pending_tool_calls(conversation_history)
|
||||
if pending_tool_calls and input_data.last_tool_output is None:
|
||||
raise ValueError(f"Tool call requires an output for {pending_tool_calls}")
|
||||
|
||||
@@ -634,24 +1021,52 @@ class SmartDecisionMakerBlock(Block):
|
||||
input_data.prompt = llm.fmt.format_string(input_data.prompt, values)
|
||||
input_data.sys_prompt = llm.fmt.format_string(input_data.sys_prompt, values)
|
||||
|
||||
prefix = "[Main Objective Prompt]: "
|
||||
|
||||
if input_data.sys_prompt and not any(
|
||||
p["role"] == "system" and p["content"].startswith(prefix) for p in prompt
|
||||
p["role"] == "system" and p["content"].startswith(MAIN_OBJECTIVE_PREFIX)
|
||||
for p in prompt
|
||||
):
|
||||
prompt.append({"role": "system", "content": prefix + input_data.sys_prompt})
|
||||
prompt.append(
|
||||
{
|
||||
"role": "system",
|
||||
"content": MAIN_OBJECTIVE_PREFIX + input_data.sys_prompt,
|
||||
}
|
||||
)
|
||||
|
||||
if input_data.prompt and not any(
|
||||
p["role"] == "user" and p["content"].startswith(prefix) for p in prompt
|
||||
p["role"] == "user" and p["content"].startswith(MAIN_OBJECTIVE_PREFIX)
|
||||
for p in prompt
|
||||
):
|
||||
prompt.append({"role": "user", "content": prefix + input_data.prompt})
|
||||
prompt.append(
|
||||
{"role": "user", "content": MAIN_OBJECTIVE_PREFIX + input_data.prompt}
|
||||
)
|
||||
|
||||
# Execute tools based on the selected mode
|
||||
if input_data.agent_mode_max_iterations != 0:
|
||||
# In agent mode, execute tools directly in a loop until finished
|
||||
async for result in self._execute_tools_agent_mode(
|
||||
input_data=input_data,
|
||||
credentials=credentials,
|
||||
tool_functions=tool_functions,
|
||||
prompt=prompt,
|
||||
graph_exec_id=graph_exec_id,
|
||||
node_id=node_id,
|
||||
node_exec_id=node_exec_id,
|
||||
user_id=user_id,
|
||||
graph_id=graph_id,
|
||||
graph_version=graph_version,
|
||||
execution_context=execution_context,
|
||||
execution_processor=execution_processor,
|
||||
):
|
||||
yield result
|
||||
return
|
||||
|
||||
# One-off mode: single LLM call and yield tool calls for external execution
|
||||
current_prompt = list(prompt)
|
||||
max_attempts = max(1, int(input_data.retry))
|
||||
response = None
|
||||
|
||||
last_error = None
|
||||
for attempt in range(max_attempts):
|
||||
for _ in range(max_attempts):
|
||||
try:
|
||||
response = await self._attempt_llm_call_with_validation(
|
||||
credentials, input_data, current_prompt, tool_functions
|
||||
@@ -661,9 +1076,9 @@ class SmartDecisionMakerBlock(Block):
|
||||
except ValueError as e:
|
||||
last_error = e
|
||||
error_feedback = (
|
||||
"Your tool call had parameter errors. Please fix the following issues and try again:\n"
|
||||
"Your tool call had errors. Please fix the following issues and try again:\n"
|
||||
+ f"- {str(e)}\n"
|
||||
+ "\nPlease make sure to use the exact parameter names as specified in the function schema."
|
||||
+ "\nPlease make sure to use the exact tool and parameter names as specified in the function schema."
|
||||
)
|
||||
current_prompt = list(current_prompt) + [
|
||||
{"role": "user", "content": error_feedback}
|
||||
@@ -690,21 +1105,23 @@ class SmartDecisionMakerBlock(Block):
|
||||
),
|
||||
None,
|
||||
)
|
||||
if (
|
||||
tool_def
|
||||
and "function" in tool_def
|
||||
and "parameters" in tool_def["function"]
|
||||
):
|
||||
if not tool_def:
|
||||
# NOTE: This matches the logic in _attempt_llm_call_with_validation and
|
||||
# relies on its validation for the assumption that this is valid to use.
|
||||
if len(tool_functions) == 1:
|
||||
tool_def = tool_functions[0]
|
||||
else:
|
||||
# This should not happen due to prior validation
|
||||
continue
|
||||
|
||||
if "function" in tool_def and "parameters" in tool_def["function"]:
|
||||
expected_args = tool_def["function"]["parameters"].get("properties", {})
|
||||
else:
|
||||
expected_args = {arg: {} for arg in tool_args.keys()}
|
||||
|
||||
# Get field mapping from tool definition
|
||||
field_mapping = (
|
||||
tool_def.get("function", {}).get("_field_mapping", {})
|
||||
if tool_def
|
||||
else {}
|
||||
)
|
||||
# Get the sink node ID and field mapping from tool definition
|
||||
field_mapping = tool_def["function"].get("_field_mapping", {})
|
||||
sink_node_id = tool_def["function"]["_sink_node_id"]
|
||||
|
||||
for clean_arg_name in expected_args:
|
||||
# arg_name is now always the cleaned field name (for Anthropic API compliance)
|
||||
@@ -712,9 +1129,8 @@ class SmartDecisionMakerBlock(Block):
|
||||
original_field_name = field_mapping.get(clean_arg_name, clean_arg_name)
|
||||
arg_value = tool_args.get(clean_arg_name)
|
||||
|
||||
sanitized_tool_name = self.cleanup(tool_name)
|
||||
sanitized_arg_name = self.cleanup(original_field_name)
|
||||
emit_key = f"tools_^_{sanitized_tool_name}_~_{sanitized_arg_name}"
|
||||
emit_key = f"tools_^_{sink_node_id}_~_{sanitized_arg_name}"
|
||||
|
||||
logger.debug(
|
||||
"[SmartDecisionMakerBlock|geid:%s|neid:%s] emit %s",
|
||||
|
||||
@@ -1,17 +1,27 @@
|
||||
from typing import Type
|
||||
from typing import Any, Type
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.data.block import Block, get_blocks
|
||||
from backend.data.block import Block, BlockSchemaInput, get_blocks
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.test import execute_block_test
|
||||
|
||||
SKIP_BLOCK_TESTS = {
|
||||
"HumanInTheLoopBlock",
|
||||
}
|
||||
|
||||
@pytest.mark.parametrize("block", get_blocks().values(), ids=lambda b: b.name)
|
||||
|
||||
@pytest.mark.parametrize("block", get_blocks().values(), ids=lambda b: b().name)
|
||||
async def test_available_blocks(block: Type[Block]):
|
||||
await execute_block_test(block())
|
||||
block_instance = block()
|
||||
if block_instance.__class__.__name__ in SKIP_BLOCK_TESTS:
|
||||
pytest.skip(
|
||||
f"Skipping {block_instance.__class__.__name__} - requires external service"
|
||||
)
|
||||
await execute_block_test(block_instance)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("block", get_blocks().values(), ids=lambda b: b.name)
|
||||
@pytest.mark.parametrize("block", get_blocks().values(), ids=lambda b: b().name)
|
||||
async def test_block_ids_valid(block: Type[Block]):
|
||||
# add the tests here to check they are uuid4
|
||||
import uuid
|
||||
@@ -123,3 +133,148 @@ async def test_block_ids_valid(block: Type[Block]):
|
||||
), f"Block {block.name} ID is UUID version {parsed_uuid.version}, expected version 4"
|
||||
except ValueError:
|
||||
pytest.fail(f"Block {block.name} has invalid UUID format: {block_instance.id}")
|
||||
|
||||
|
||||
class TestAutoCredentialsFieldsValidation:
|
||||
"""Tests for auto_credentials field validation in BlockSchema."""
|
||||
|
||||
def test_duplicate_auto_credentials_kwarg_name_raises_error(self):
|
||||
"""Test that duplicate kwarg_name in auto_credentials raises ValueError."""
|
||||
|
||||
class DuplicateKwargSchema(BlockSchemaInput):
|
||||
"""Schema with duplicate auto_credentials kwarg_name."""
|
||||
|
||||
# Both fields explicitly use the same kwarg_name "credentials"
|
||||
file1: dict[str, Any] | None = SchemaField(
|
||||
description="First file input",
|
||||
default=None,
|
||||
json_schema_extra={
|
||||
"auto_credentials": {
|
||||
"provider": "google",
|
||||
"type": "oauth2",
|
||||
"scopes": ["https://www.googleapis.com/auth/drive.file"],
|
||||
"kwarg_name": "credentials",
|
||||
}
|
||||
},
|
||||
)
|
||||
file2: dict[str, Any] | None = SchemaField(
|
||||
description="Second file input",
|
||||
default=None,
|
||||
json_schema_extra={
|
||||
"auto_credentials": {
|
||||
"provider": "google",
|
||||
"type": "oauth2",
|
||||
"scopes": ["https://www.googleapis.com/auth/drive.file"],
|
||||
"kwarg_name": "credentials", # Duplicate kwarg_name!
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
DuplicateKwargSchema.get_auto_credentials_fields()
|
||||
|
||||
error_message = str(exc_info.value)
|
||||
assert "Duplicate auto_credentials kwarg_name 'credentials'" in error_message
|
||||
assert "file1" in error_message
|
||||
assert "file2" in error_message
|
||||
|
||||
def test_unique_auto_credentials_kwarg_names_succeed(self):
|
||||
"""Test that unique kwarg_name values work correctly."""
|
||||
|
||||
class UniqueKwargSchema(BlockSchemaInput):
|
||||
"""Schema with unique auto_credentials kwarg_name values."""
|
||||
|
||||
file1: dict[str, Any] | None = SchemaField(
|
||||
description="First file input",
|
||||
default=None,
|
||||
json_schema_extra={
|
||||
"auto_credentials": {
|
||||
"provider": "google",
|
||||
"type": "oauth2",
|
||||
"scopes": ["https://www.googleapis.com/auth/drive.file"],
|
||||
"kwarg_name": "file1_credentials",
|
||||
}
|
||||
},
|
||||
)
|
||||
file2: dict[str, Any] | None = SchemaField(
|
||||
description="Second file input",
|
||||
default=None,
|
||||
json_schema_extra={
|
||||
"auto_credentials": {
|
||||
"provider": "google",
|
||||
"type": "oauth2",
|
||||
"scopes": ["https://www.googleapis.com/auth/drive.file"],
|
||||
"kwarg_name": "file2_credentials", # Different kwarg_name
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
# Should not raise
|
||||
result = UniqueKwargSchema.get_auto_credentials_fields()
|
||||
|
||||
assert "file1_credentials" in result
|
||||
assert "file2_credentials" in result
|
||||
assert result["file1_credentials"]["field_name"] == "file1"
|
||||
assert result["file2_credentials"]["field_name"] == "file2"
|
||||
|
||||
def test_default_kwarg_name_is_credentials(self):
|
||||
"""Test that missing kwarg_name defaults to 'credentials'."""
|
||||
|
||||
class DefaultKwargSchema(BlockSchemaInput):
|
||||
"""Schema with auto_credentials missing kwarg_name."""
|
||||
|
||||
file: dict[str, Any] | None = SchemaField(
|
||||
description="File input",
|
||||
default=None,
|
||||
json_schema_extra={
|
||||
"auto_credentials": {
|
||||
"provider": "google",
|
||||
"type": "oauth2",
|
||||
"scopes": ["https://www.googleapis.com/auth/drive.file"],
|
||||
# No kwarg_name specified - should default to "credentials"
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
result = DefaultKwargSchema.get_auto_credentials_fields()
|
||||
|
||||
assert "credentials" in result
|
||||
assert result["credentials"]["field_name"] == "file"
|
||||
|
||||
def test_duplicate_default_kwarg_name_raises_error(self):
|
||||
"""Test that two fields with default kwarg_name raises ValueError."""
|
||||
|
||||
class DefaultDuplicateSchema(BlockSchemaInput):
|
||||
"""Schema where both fields omit kwarg_name, defaulting to 'credentials'."""
|
||||
|
||||
file1: dict[str, Any] | None = SchemaField(
|
||||
description="First file input",
|
||||
default=None,
|
||||
json_schema_extra={
|
||||
"auto_credentials": {
|
||||
"provider": "google",
|
||||
"type": "oauth2",
|
||||
"scopes": ["https://www.googleapis.com/auth/drive.file"],
|
||||
# No kwarg_name - defaults to "credentials"
|
||||
}
|
||||
},
|
||||
)
|
||||
file2: dict[str, Any] | None = SchemaField(
|
||||
description="Second file input",
|
||||
default=None,
|
||||
json_schema_extra={
|
||||
"auto_credentials": {
|
||||
"provider": "google",
|
||||
"type": "oauth2",
|
||||
"scopes": ["https://www.googleapis.com/auth/drive.file"],
|
||||
# No kwarg_name - also defaults to "credentials"
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
DefaultDuplicateSchema.get_auto_credentials_fields()
|
||||
|
||||
assert "Duplicate auto_credentials kwarg_name 'credentials'" in str(
|
||||
exc_info.value
|
||||
)
|
||||
|
||||
@@ -365,37 +365,22 @@ class TestLLMStatsTracking:
|
||||
assert outputs["response"] == "AI response to conversation"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ai_list_generator_with_retries(self):
|
||||
"""Test that AIListGeneratorBlock correctly tracks stats with retries."""
|
||||
async def test_ai_list_generator_basic_functionality(self):
|
||||
"""Test that AIListGeneratorBlock correctly works with structured responses."""
|
||||
import backend.blocks.llm as llm
|
||||
|
||||
block = llm.AIListGeneratorBlock()
|
||||
|
||||
# Counter to track calls
|
||||
call_count = 0
|
||||
|
||||
# Mock the llm_call to return a structured response
|
||||
async def mock_llm_call(input_data, credentials):
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
|
||||
# Update stats
|
||||
if hasattr(block, "execution_stats") and block.execution_stats:
|
||||
block.execution_stats.input_token_count += 40
|
||||
block.execution_stats.output_token_count += 20
|
||||
block.execution_stats.llm_call_count += 1
|
||||
else:
|
||||
block.execution_stats = NodeExecutionStats(
|
||||
input_token_count=40,
|
||||
output_token_count=20,
|
||||
llm_call_count=1,
|
||||
)
|
||||
|
||||
if call_count == 1:
|
||||
# First call returns invalid format
|
||||
return {"response": "not a valid list"}
|
||||
else:
|
||||
# Second call returns valid list
|
||||
return {"response": "['item1', 'item2', 'item3']"}
|
||||
# Update stats to simulate LLM call
|
||||
block.execution_stats = NodeExecutionStats(
|
||||
input_token_count=50,
|
||||
output_token_count=30,
|
||||
llm_call_count=1,
|
||||
)
|
||||
# Return a structured response with the expected format
|
||||
return {"list": ["item1", "item2", "item3"]}
|
||||
|
||||
block.llm_call = mock_llm_call # type: ignore
|
||||
|
||||
@@ -413,14 +398,20 @@ class TestLLMStatsTracking:
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
# Check stats - should have 2 calls
|
||||
assert call_count == 2
|
||||
assert block.execution_stats.input_token_count == 80 # 40 * 2
|
||||
assert block.execution_stats.output_token_count == 40 # 20 * 2
|
||||
assert block.execution_stats.llm_call_count == 2
|
||||
# Check stats
|
||||
assert block.execution_stats.input_token_count == 50
|
||||
assert block.execution_stats.output_token_count == 30
|
||||
assert block.execution_stats.llm_call_count == 1
|
||||
|
||||
# Check output
|
||||
assert outputs["generated_list"] == ["item1", "item2", "item3"]
|
||||
# Check that individual items were yielded
|
||||
# Note: outputs dict will only contain the last value for each key
|
||||
# So we need to check that the list_item output exists
|
||||
assert "list_item" in outputs
|
||||
# The list_item output should be the last item in the list
|
||||
assert outputs["list_item"] == "item3"
|
||||
assert "prompt" in outputs
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_merge_llm_stats(self):
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
import logging
|
||||
import threading
|
||||
from collections import defaultdict
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import ProviderName, User
|
||||
from backend.server.model import CreateGraph
|
||||
from backend.server.rest_api import AgentServer
|
||||
@@ -17,10 +21,10 @@ async def create_graph(s: SpinTestServer, g, u: User):
|
||||
|
||||
|
||||
async def create_credentials(s: SpinTestServer, u: User):
|
||||
import backend.blocks.llm as llm
|
||||
import backend.blocks.llm as llm_module
|
||||
|
||||
provider = ProviderName.OPENAI
|
||||
credentials = llm.TEST_CREDENTIALS
|
||||
credentials = llm_module.TEST_CREDENTIALS
|
||||
return await s.agent_server.test_create_credentials(u.id, provider, credentials)
|
||||
|
||||
|
||||
@@ -165,7 +169,7 @@ async def test_smart_decision_maker_function_signature(server: SpinTestServer):
|
||||
)
|
||||
test_graph = await create_graph(server, test_graph, test_user)
|
||||
|
||||
tool_functions = await SmartDecisionMakerBlock._create_function_signature(
|
||||
tool_functions = await SmartDecisionMakerBlock._create_tool_node_signatures(
|
||||
test_graph.nodes[0].id
|
||||
)
|
||||
assert tool_functions is not None, "Tool functions should not be None"
|
||||
@@ -196,8 +200,6 @@ async def test_smart_decision_maker_function_signature(server: SpinTestServer):
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_tracks_llm_stats():
|
||||
"""Test that SmartDecisionMakerBlock correctly tracks LLM usage stats."""
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import backend.blocks.llm as llm_module
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
|
||||
@@ -215,8 +217,7 @@ async def test_smart_decision_maker_tracks_llm_stats():
|
||||
"content": "I need to think about this.",
|
||||
}
|
||||
|
||||
# Mock the _create_function_signature method to avoid database calls
|
||||
from unittest.mock import AsyncMock
|
||||
# Mock the _create_tool_node_signatures method to avoid database calls
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
@@ -224,7 +225,7 @@ async def test_smart_decision_maker_tracks_llm_stats():
|
||||
return_value=mock_response,
|
||||
), patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_function_signature",
|
||||
"_create_tool_node_signatures",
|
||||
new_callable=AsyncMock,
|
||||
return_value=[],
|
||||
):
|
||||
@@ -234,10 +235,19 @@ async def test_smart_decision_maker_tracks_llm_stats():
|
||||
prompt="Should I continue with this task?",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
|
||||
# Execute the block
|
||||
outputs = {}
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
@@ -246,6 +256,9 @@ async def test_smart_decision_maker_tracks_llm_stats():
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
@@ -263,8 +276,6 @@ async def test_smart_decision_maker_tracks_llm_stats():
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_parameter_validation():
|
||||
"""Test that SmartDecisionMakerBlock correctly validates tool call parameters."""
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import backend.blocks.llm as llm_module
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
|
||||
@@ -293,6 +304,7 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
},
|
||||
"required": ["query", "max_keyword_difficulty"],
|
||||
},
|
||||
"_sink_node_id": "test-sink-node-id",
|
||||
},
|
||||
}
|
||||
]
|
||||
@@ -310,15 +322,13 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
mock_response_with_typo.reasoning = None
|
||||
mock_response_with_typo.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response_with_typo,
|
||||
) as mock_llm_call, patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_function_signature",
|
||||
"_create_tool_node_signatures",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_tool_functions,
|
||||
):
|
||||
@@ -328,8 +338,17 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
retry=2, # Set retry to 2 for testing
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
# Should raise ValueError after retries due to typo'd parameter name
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
outputs = {}
|
||||
@@ -341,6 +360,9 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
@@ -367,15 +389,13 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
mock_response_missing_required.reasoning = None
|
||||
mock_response_missing_required.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response_missing_required,
|
||||
), patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_function_signature",
|
||||
"_create_tool_node_signatures",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_tool_functions,
|
||||
):
|
||||
@@ -384,8 +404,17 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
# Should raise ValueError due to missing required parameter
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
outputs = {}
|
||||
@@ -397,6 +426,9 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
@@ -417,15 +449,13 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
mock_response_valid.reasoning = None
|
||||
mock_response_valid.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response_valid,
|
||||
), patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_function_signature",
|
||||
"_create_tool_node_signatures",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_tool_functions,
|
||||
):
|
||||
@@ -434,10 +464,19 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
|
||||
# Should succeed - optional parameter missing is OK
|
||||
outputs = {}
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
@@ -446,17 +485,20 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
# Verify tool outputs were generated correctly
|
||||
assert "tools_^_search_keywords_~_query" in outputs
|
||||
assert outputs["tools_^_search_keywords_~_query"] == "test"
|
||||
assert "tools_^_search_keywords_~_max_keyword_difficulty" in outputs
|
||||
assert outputs["tools_^_search_keywords_~_max_keyword_difficulty"] == 50
|
||||
assert "tools_^_test-sink-node-id_~_query" in outputs
|
||||
assert outputs["tools_^_test-sink-node-id_~_query"] == "test"
|
||||
assert "tools_^_test-sink-node-id_~_max_keyword_difficulty" in outputs
|
||||
assert outputs["tools_^_test-sink-node-id_~_max_keyword_difficulty"] == 50
|
||||
# Optional parameter should be None when not provided
|
||||
assert "tools_^_search_keywords_~_optional_param" in outputs
|
||||
assert outputs["tools_^_search_keywords_~_optional_param"] is None
|
||||
assert "tools_^_test-sink-node-id_~_optional_param" in outputs
|
||||
assert outputs["tools_^_test-sink-node-id_~_optional_param"] is None
|
||||
|
||||
# Test case 4: Valid tool call with ALL parameters (should succeed)
|
||||
mock_tool_call_all_params = MagicMock()
|
||||
@@ -471,15 +513,13 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
mock_response_all_params.reasoning = None
|
||||
mock_response_all_params.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response_all_params,
|
||||
), patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_function_signature",
|
||||
"_create_tool_node_signatures",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_tool_functions,
|
||||
):
|
||||
@@ -488,10 +528,19 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
|
||||
# Should succeed with all parameters
|
||||
outputs = {}
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
@@ -500,20 +549,21 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
# Verify all tool outputs were generated correctly
|
||||
assert outputs["tools_^_search_keywords_~_query"] == "test"
|
||||
assert outputs["tools_^_search_keywords_~_max_keyword_difficulty"] == 50
|
||||
assert outputs["tools_^_search_keywords_~_optional_param"] == "custom_value"
|
||||
assert outputs["tools_^_test-sink-node-id_~_query"] == "test"
|
||||
assert outputs["tools_^_test-sink-node-id_~_max_keyword_difficulty"] == 50
|
||||
assert outputs["tools_^_test-sink-node-id_~_optional_param"] == "custom_value"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_raw_response_conversion():
|
||||
"""Test that SmartDecisionMaker correctly handles different raw_response types with retry mechanism."""
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import backend.blocks.llm as llm_module
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
|
||||
@@ -530,6 +580,7 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
"properties": {"param": {"type": "string"}},
|
||||
"required": ["param"],
|
||||
},
|
||||
"_sink_node_id": "test-sink-node-id",
|
||||
},
|
||||
}
|
||||
]
|
||||
@@ -582,13 +633,12 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
)
|
||||
|
||||
# Mock llm_call to return different responses on different calls
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call", new_callable=AsyncMock
|
||||
) as mock_llm_call, patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_function_signature",
|
||||
"_create_tool_node_signatures",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_tool_functions,
|
||||
):
|
||||
@@ -601,10 +651,19 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
retry=2,
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
|
||||
# Should succeed after retry, demonstrating our helper function works
|
||||
outputs = {}
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
@@ -613,12 +672,15 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
# Verify the tool output was generated successfully
|
||||
assert "tools_^_test_tool_~_param" in outputs
|
||||
assert outputs["tools_^_test_tool_~_param"] == "test_value"
|
||||
assert "tools_^_test-sink-node-id_~_param" in outputs
|
||||
assert outputs["tools_^_test-sink-node-id_~_param"] == "test_value"
|
||||
|
||||
# Verify conversation history was properly maintained
|
||||
assert "conversations" in outputs
|
||||
@@ -648,15 +710,13 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
"I'll help you with that." # Ollama returns string
|
||||
)
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response_ollama,
|
||||
), patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_function_signature",
|
||||
"_create_tool_node_signatures",
|
||||
new_callable=AsyncMock,
|
||||
return_value=[], # No tools for this test
|
||||
):
|
||||
@@ -664,9 +724,18 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
prompt="Simple prompt",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
|
||||
outputs = {}
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
@@ -675,6 +744,9 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
@@ -694,15 +766,13 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
"content": "Test response",
|
||||
} # Dict format
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response_dict,
|
||||
), patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_function_signature",
|
||||
"_create_tool_node_signatures",
|
||||
new_callable=AsyncMock,
|
||||
return_value=[],
|
||||
):
|
||||
@@ -710,6 +780,160 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
prompt="Another test",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
|
||||
outputs = {}
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
graph_id="test-graph-id",
|
||||
node_id="test-node-id",
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
assert "finished" in outputs
|
||||
assert outputs["finished"] == "Test response"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_agent_mode():
|
||||
"""Test that agent mode executes tools directly and loops until finished."""
|
||||
import backend.blocks.llm as llm_module
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
|
||||
block = SmartDecisionMakerBlock()
|
||||
|
||||
# Mock tool call that requires multiple iterations
|
||||
mock_tool_call_1 = MagicMock()
|
||||
mock_tool_call_1.id = "call_1"
|
||||
mock_tool_call_1.function.name = "search_keywords"
|
||||
mock_tool_call_1.function.arguments = (
|
||||
'{"query": "test", "max_keyword_difficulty": 50}'
|
||||
)
|
||||
|
||||
mock_response_1 = MagicMock()
|
||||
mock_response_1.response = None
|
||||
mock_response_1.tool_calls = [mock_tool_call_1]
|
||||
mock_response_1.prompt_tokens = 50
|
||||
mock_response_1.completion_tokens = 25
|
||||
mock_response_1.reasoning = "Using search tool"
|
||||
mock_response_1.raw_response = {
|
||||
"role": "assistant",
|
||||
"content": None,
|
||||
"tool_calls": [{"id": "call_1", "type": "function"}],
|
||||
}
|
||||
|
||||
# Final response with no tool calls (finished)
|
||||
mock_response_2 = MagicMock()
|
||||
mock_response_2.response = "Task completed successfully"
|
||||
mock_response_2.tool_calls = []
|
||||
mock_response_2.prompt_tokens = 30
|
||||
mock_response_2.completion_tokens = 15
|
||||
mock_response_2.reasoning = None
|
||||
mock_response_2.raw_response = {
|
||||
"role": "assistant",
|
||||
"content": "Task completed successfully",
|
||||
}
|
||||
|
||||
# Mock the LLM call to return different responses on each iteration
|
||||
llm_call_mock = AsyncMock()
|
||||
llm_call_mock.side_effect = [mock_response_1, mock_response_2]
|
||||
|
||||
# Mock tool node signatures
|
||||
mock_tool_signatures = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "search_keywords",
|
||||
"_sink_node_id": "test-sink-node-id",
|
||||
"_field_mapping": {},
|
||||
"parameters": {
|
||||
"properties": {
|
||||
"query": {"type": "string"},
|
||||
"max_keyword_difficulty": {"type": "integer"},
|
||||
},
|
||||
"required": ["query", "max_keyword_difficulty"],
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
# Mock database and execution components
|
||||
mock_db_client = AsyncMock()
|
||||
mock_node = MagicMock()
|
||||
mock_node.block_id = "test-block-id"
|
||||
mock_db_client.get_node.return_value = mock_node
|
||||
|
||||
# Mock upsert_execution_input to return proper NodeExecutionResult and input data
|
||||
mock_node_exec_result = MagicMock()
|
||||
mock_node_exec_result.node_exec_id = "test-tool-exec-id"
|
||||
mock_input_data = {"query": "test", "max_keyword_difficulty": 50}
|
||||
mock_db_client.upsert_execution_input.return_value = (
|
||||
mock_node_exec_result,
|
||||
mock_input_data,
|
||||
)
|
||||
|
||||
# No longer need mock_execute_node since we use execution_processor.on_node_execution
|
||||
|
||||
with patch("backend.blocks.llm.llm_call", llm_call_mock), patch.object(
|
||||
block, "_create_tool_node_signatures", return_value=mock_tool_signatures
|
||||
), patch(
|
||||
"backend.blocks.smart_decision_maker.get_database_manager_async_client",
|
||||
return_value=mock_db_client,
|
||||
), patch(
|
||||
"backend.executor.manager.async_update_node_execution_status",
|
||||
new_callable=AsyncMock,
|
||||
), patch(
|
||||
"backend.integrations.creds_manager.IntegrationCredentialsManager"
|
||||
):
|
||||
|
||||
# Create a mock execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(
|
||||
safe_mode=False,
|
||||
)
|
||||
|
||||
# Create a mock execution processor for agent mode tests
|
||||
|
||||
mock_execution_processor = AsyncMock()
|
||||
# Configure the execution processor mock with required attributes
|
||||
mock_execution_processor.running_node_execution = defaultdict(MagicMock)
|
||||
mock_execution_processor.execution_stats = MagicMock()
|
||||
mock_execution_processor.execution_stats_lock = threading.Lock()
|
||||
|
||||
# Mock the on_node_execution method to return successful stats
|
||||
mock_node_stats = MagicMock()
|
||||
mock_node_stats.error = None # No error
|
||||
mock_execution_processor.on_node_execution = AsyncMock(
|
||||
return_value=mock_node_stats
|
||||
)
|
||||
|
||||
# Mock the get_execution_outputs_by_node_exec_id method
|
||||
mock_db_client.get_execution_outputs_by_node_exec_id.return_value = {
|
||||
"result": {"status": "success", "data": "search completed"}
|
||||
}
|
||||
|
||||
# Test agent mode with max_iterations = 3
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Complete this task using tools",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=3, # Enable agent mode with 3 max iterations
|
||||
)
|
||||
|
||||
outputs = {}
|
||||
@@ -721,8 +945,115 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
# Verify agent mode behavior
|
||||
assert "tool_functions" in outputs # tool_functions is yielded in both modes
|
||||
assert "finished" in outputs
|
||||
assert outputs["finished"] == "Test response"
|
||||
assert outputs["finished"] == "Task completed successfully"
|
||||
assert "conversations" in outputs
|
||||
|
||||
# Verify the conversation includes tool responses
|
||||
conversations = outputs["conversations"]
|
||||
assert len(conversations) > 2 # Should have multiple conversation entries
|
||||
|
||||
# Verify LLM was called twice (once for tool call, once for finish)
|
||||
assert llm_call_mock.call_count == 2
|
||||
|
||||
# Verify tool was executed via execution processor
|
||||
assert mock_execution_processor.on_node_execution.call_count == 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_traditional_mode_default():
|
||||
"""Test that default behavior (agent_mode_max_iterations=0) works as traditional mode."""
|
||||
import backend.blocks.llm as llm_module
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
|
||||
block = SmartDecisionMakerBlock()
|
||||
|
||||
# Mock tool call
|
||||
mock_tool_call = MagicMock()
|
||||
mock_tool_call.function.name = "search_keywords"
|
||||
mock_tool_call.function.arguments = (
|
||||
'{"query": "test", "max_keyword_difficulty": 50}'
|
||||
)
|
||||
|
||||
mock_response = MagicMock()
|
||||
mock_response.response = None
|
||||
mock_response.tool_calls = [mock_tool_call]
|
||||
mock_response.prompt_tokens = 50
|
||||
mock_response.completion_tokens = 25
|
||||
mock_response.reasoning = None
|
||||
mock_response.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
mock_tool_signatures = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "search_keywords",
|
||||
"_sink_node_id": "test-sink-node-id",
|
||||
"_field_mapping": {},
|
||||
"parameters": {
|
||||
"properties": {
|
||||
"query": {"type": "string"},
|
||||
"max_keyword_difficulty": {"type": "integer"},
|
||||
},
|
||||
"required": ["query", "max_keyword_difficulty"],
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response,
|
||||
), patch.object(
|
||||
block, "_create_tool_node_signatures", return_value=mock_tool_signatures
|
||||
):
|
||||
|
||||
# Test default behavior (traditional mode)
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Test prompt",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0, # Traditional mode
|
||||
)
|
||||
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
outputs = {}
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
graph_id="test-graph-id",
|
||||
node_id="test-node-id",
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
# Verify traditional mode behavior
|
||||
assert (
|
||||
"tool_functions" in outputs
|
||||
) # Should yield tool_functions in traditional mode
|
||||
assert (
|
||||
"tools_^_test-sink-node-id_~_query" in outputs
|
||||
) # Should yield individual tool parameters
|
||||
assert "tools_^_test-sink-node-id_~_max_keyword_difficulty" in outputs
|
||||
assert "conversations" in outputs
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""Comprehensive tests for SmartDecisionMakerBlock dynamic field handling."""
|
||||
|
||||
import json
|
||||
from unittest.mock import AsyncMock, Mock, patch
|
||||
from unittest.mock import AsyncMock, MagicMock, Mock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -192,7 +192,7 @@ async def test_create_block_function_signature_with_object_fields():
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_function_signature():
|
||||
async def test_create_tool_node_signatures():
|
||||
"""Test that the mapping between sanitized and original field names is built correctly."""
|
||||
block = SmartDecisionMakerBlock()
|
||||
|
||||
@@ -241,7 +241,7 @@ async def test_create_function_signature():
|
||||
]
|
||||
|
||||
# Call the method that builds signatures
|
||||
tool_functions = await block._create_function_signature("test_node_id")
|
||||
tool_functions = await block._create_tool_node_signatures("test_node_id")
|
||||
|
||||
# Verify we got 2 tool functions (one for dict, one for list)
|
||||
assert len(tool_functions) == 2
|
||||
@@ -308,10 +308,47 @@ async def test_output_yielding_with_dynamic_fields():
|
||||
) as mock_llm:
|
||||
mock_llm.return_value = mock_response
|
||||
|
||||
# Mock the function signature creation
|
||||
with patch.object(
|
||||
block, "_create_function_signature", new_callable=AsyncMock
|
||||
# Mock the database manager to avoid HTTP calls during tool execution
|
||||
with patch(
|
||||
"backend.blocks.smart_decision_maker.get_database_manager_async_client"
|
||||
) as mock_db_manager, patch.object(
|
||||
block, "_create_tool_node_signatures", new_callable=AsyncMock
|
||||
) as mock_sig:
|
||||
# Set up the mock database manager
|
||||
mock_db_client = AsyncMock()
|
||||
mock_db_manager.return_value = mock_db_client
|
||||
|
||||
# Mock the node retrieval
|
||||
mock_target_node = Mock()
|
||||
mock_target_node.id = "test-sink-node-id"
|
||||
mock_target_node.block_id = "CreateDictionaryBlock"
|
||||
mock_target_node.block = Mock()
|
||||
mock_target_node.block.name = "Create Dictionary"
|
||||
mock_db_client.get_node.return_value = mock_target_node
|
||||
|
||||
# Mock the execution result creation
|
||||
mock_node_exec_result = Mock()
|
||||
mock_node_exec_result.node_exec_id = "mock-node-exec-id"
|
||||
mock_final_input_data = {
|
||||
"values_#_name": "Alice",
|
||||
"values_#_age": 30,
|
||||
"values_#_email": "alice@example.com",
|
||||
}
|
||||
mock_db_client.upsert_execution_input.return_value = (
|
||||
mock_node_exec_result,
|
||||
mock_final_input_data,
|
||||
)
|
||||
|
||||
# Mock the output retrieval
|
||||
mock_outputs = {
|
||||
"values_#_name": "Alice",
|
||||
"values_#_age": 30,
|
||||
"values_#_email": "alice@example.com",
|
||||
}
|
||||
mock_db_client.get_execution_outputs_by_node_exec_id.return_value = (
|
||||
mock_outputs
|
||||
)
|
||||
|
||||
mock_sig.return_value = [
|
||||
{
|
||||
"type": "function",
|
||||
@@ -325,6 +362,7 @@ async def test_output_yielding_with_dynamic_fields():
|
||||
"values___email": {"type": "string"},
|
||||
},
|
||||
},
|
||||
"_sink_node_id": "test-sink-node-id",
|
||||
},
|
||||
}
|
||||
]
|
||||
@@ -336,10 +374,16 @@ async def test_output_yielding_with_dynamic_fields():
|
||||
prompt="Create a user dictionary",
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT,
|
||||
model=llm.LlmModel.GPT4O,
|
||||
agent_mode_max_iterations=0, # Use traditional mode to test output yielding
|
||||
)
|
||||
|
||||
# Run the block
|
||||
outputs = {}
|
||||
from backend.data.execution import ExecutionContext
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
async for output_name, output_value in block.run(
|
||||
input_data,
|
||||
credentials=llm.TEST_CREDENTIALS,
|
||||
@@ -348,19 +392,22 @@ async def test_output_yielding_with_dynamic_fields():
|
||||
graph_exec_id="test_exec",
|
||||
node_exec_id="test_node_exec",
|
||||
user_id="test_user",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_value
|
||||
|
||||
# Verify the outputs use sanitized field names (matching frontend normalizeToolName)
|
||||
assert "tools_^_createdictionaryblock_~_values___name" in outputs
|
||||
assert outputs["tools_^_createdictionaryblock_~_values___name"] == "Alice"
|
||||
# Verify the outputs use sink node ID in output keys
|
||||
assert "tools_^_test-sink-node-id_~_values___name" in outputs
|
||||
assert outputs["tools_^_test-sink-node-id_~_values___name"] == "Alice"
|
||||
|
||||
assert "tools_^_createdictionaryblock_~_values___age" in outputs
|
||||
assert outputs["tools_^_createdictionaryblock_~_values___age"] == 30
|
||||
assert "tools_^_test-sink-node-id_~_values___age" in outputs
|
||||
assert outputs["tools_^_test-sink-node-id_~_values___age"] == 30
|
||||
|
||||
assert "tools_^_createdictionaryblock_~_values___email" in outputs
|
||||
assert "tools_^_test-sink-node-id_~_values___email" in outputs
|
||||
assert (
|
||||
outputs["tools_^_createdictionaryblock_~_values___email"]
|
||||
outputs["tools_^_test-sink-node-id_~_values___email"]
|
||||
== "alice@example.com"
|
||||
)
|
||||
|
||||
@@ -488,7 +535,7 @@ async def test_validation_errors_dont_pollute_conversation():
|
||||
|
||||
# Mock the function signature creation
|
||||
with patch.object(
|
||||
block, "_create_function_signature", new_callable=AsyncMock
|
||||
block, "_create_tool_node_signatures", new_callable=AsyncMock
|
||||
) as mock_sig:
|
||||
mock_sig.return_value = [
|
||||
{
|
||||
@@ -505,49 +552,113 @@ async def test_validation_errors_dont_pollute_conversation():
|
||||
},
|
||||
"required": ["correct_param"],
|
||||
},
|
||||
"_sink_node_id": "test-sink-node-id",
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
# Create input data
|
||||
from backend.blocks import llm
|
||||
# Mock the database manager to avoid HTTP calls during tool execution
|
||||
with patch(
|
||||
"backend.blocks.smart_decision_maker.get_database_manager_async_client"
|
||||
) as mock_db_manager:
|
||||
# Set up the mock database manager for agent mode
|
||||
mock_db_client = AsyncMock()
|
||||
mock_db_manager.return_value = mock_db_client
|
||||
|
||||
input_data = block.input_schema(
|
||||
prompt="Test prompt",
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT,
|
||||
model=llm.LlmModel.GPT4O,
|
||||
retry=3, # Allow retries
|
||||
)
|
||||
# Mock the node retrieval
|
||||
mock_target_node = Mock()
|
||||
mock_target_node.id = "test-sink-node-id"
|
||||
mock_target_node.block_id = "TestBlock"
|
||||
mock_target_node.block = Mock()
|
||||
mock_target_node.block.name = "Test Block"
|
||||
mock_db_client.get_node.return_value = mock_target_node
|
||||
|
||||
# Run the block
|
||||
outputs = {}
|
||||
async for output_name, output_value in block.run(
|
||||
input_data,
|
||||
credentials=llm.TEST_CREDENTIALS,
|
||||
graph_id="test_graph",
|
||||
node_id="test_node",
|
||||
graph_exec_id="test_exec",
|
||||
node_exec_id="test_node_exec",
|
||||
user_id="test_user",
|
||||
):
|
||||
outputs[output_name] = output_value
|
||||
# Mock the execution result creation
|
||||
mock_node_exec_result = Mock()
|
||||
mock_node_exec_result.node_exec_id = "mock-node-exec-id"
|
||||
mock_final_input_data = {"correct_param": "value"}
|
||||
mock_db_client.upsert_execution_input.return_value = (
|
||||
mock_node_exec_result,
|
||||
mock_final_input_data,
|
||||
)
|
||||
|
||||
# Verify we had 2 LLM calls (initial + retry)
|
||||
assert call_count == 2
|
||||
# Mock the output retrieval
|
||||
mock_outputs = {"correct_param": "value"}
|
||||
mock_db_client.get_execution_outputs_by_node_exec_id.return_value = (
|
||||
mock_outputs
|
||||
)
|
||||
|
||||
# Check the final conversation output
|
||||
final_conversation = outputs.get("conversations", [])
|
||||
# Create input data
|
||||
from backend.blocks import llm
|
||||
|
||||
# The final conversation should NOT contain the validation error message
|
||||
error_messages = [
|
||||
msg
|
||||
for msg in final_conversation
|
||||
if msg.get("role") == "user"
|
||||
and "parameter errors" in msg.get("content", "")
|
||||
]
|
||||
assert (
|
||||
len(error_messages) == 0
|
||||
), "Validation error leaked into final conversation"
|
||||
input_data = block.input_schema(
|
||||
prompt="Test prompt",
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT,
|
||||
model=llm.LlmModel.GPT4O,
|
||||
retry=3, # Allow retries
|
||||
agent_mode_max_iterations=1,
|
||||
)
|
||||
|
||||
# The final conversation should only have the successful response
|
||||
assert final_conversation[-1]["content"] == "valid"
|
||||
# Run the block
|
||||
outputs = {}
|
||||
from backend.data.execution import ExecutionContext
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a proper mock execution processor for agent mode
|
||||
from collections import defaultdict
|
||||
|
||||
mock_execution_processor = AsyncMock()
|
||||
mock_execution_processor.execution_stats = MagicMock()
|
||||
mock_execution_processor.execution_stats_lock = MagicMock()
|
||||
|
||||
# Create a mock NodeExecutionProgress for the sink node
|
||||
mock_node_exec_progress = MagicMock()
|
||||
mock_node_exec_progress.add_task = MagicMock()
|
||||
mock_node_exec_progress.pop_output = MagicMock(
|
||||
return_value=None
|
||||
) # No outputs to process
|
||||
|
||||
# Set up running_node_execution as a defaultdict that returns our mock for any key
|
||||
mock_execution_processor.running_node_execution = defaultdict(
|
||||
lambda: mock_node_exec_progress
|
||||
)
|
||||
|
||||
# Mock the on_node_execution method that gets called during tool execution
|
||||
mock_node_stats = MagicMock()
|
||||
mock_node_stats.error = None
|
||||
mock_execution_processor.on_node_execution.return_value = (
|
||||
mock_node_stats
|
||||
)
|
||||
|
||||
async for output_name, output_value in block.run(
|
||||
input_data,
|
||||
credentials=llm.TEST_CREDENTIALS,
|
||||
graph_id="test_graph",
|
||||
node_id="test_node",
|
||||
graph_exec_id="test_exec",
|
||||
node_exec_id="test_node_exec",
|
||||
user_id="test_user",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_value
|
||||
|
||||
# Verify we had at least 1 LLM call
|
||||
assert call_count >= 1
|
||||
|
||||
# Check the final conversation output
|
||||
final_conversation = outputs.get("conversations", [])
|
||||
|
||||
# The final conversation should NOT contain validation error messages
|
||||
# Even if retries don't happen in agent mode, we should not leak errors
|
||||
error_messages = [
|
||||
msg
|
||||
for msg in final_conversation
|
||||
if msg.get("role") == "user"
|
||||
and "parameter errors" in msg.get("content", "")
|
||||
]
|
||||
assert (
|
||||
len(error_messages) == 0
|
||||
), "Validation error leaked into final conversation"
|
||||
|
||||
@@ -14,7 +14,7 @@ from backend.data.block import (
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import UserContext
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
# Shared timezone literal type for all time/date blocks
|
||||
@@ -188,10 +188,9 @@ class GetCurrentTimeBlock(Block):
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, user_context: UserContext, **kwargs
|
||||
self, input_data: Input, *, execution_context: ExecutionContext, **kwargs
|
||||
) -> BlockOutput:
|
||||
# Extract timezone from user_context (always present)
|
||||
effective_timezone = user_context.timezone
|
||||
effective_timezone = execution_context.user_timezone
|
||||
|
||||
# Get the appropriate timezone
|
||||
tz = _get_timezone(input_data.format_type, effective_timezone)
|
||||
@@ -298,10 +297,10 @@ class GetCurrentDateBlock(Block):
|
||||
],
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
# Extract timezone from user_context (required keyword argument)
|
||||
user_context: UserContext = kwargs["user_context"]
|
||||
effective_timezone = user_context.timezone
|
||||
async def run(
|
||||
self, input_data: Input, *, execution_context: ExecutionContext, **kwargs
|
||||
) -> BlockOutput:
|
||||
effective_timezone = execution_context.user_timezone
|
||||
|
||||
try:
|
||||
offset = int(input_data.offset)
|
||||
@@ -404,10 +403,10 @@ class GetCurrentDateAndTimeBlock(Block):
|
||||
],
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
# Extract timezone from user_context (required keyword argument)
|
||||
user_context: UserContext = kwargs["user_context"]
|
||||
effective_timezone = user_context.timezone
|
||||
async def run(
|
||||
self, input_data: Input, *, execution_context: ExecutionContext, **kwargs
|
||||
) -> BlockOutput:
|
||||
effective_timezone = execution_context.user_timezone
|
||||
|
||||
# Get the appropriate timezone
|
||||
tz = _get_timezone(input_data.format_type, effective_timezone)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from datetime import datetime
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Any, Dict
|
||||
|
||||
from backend.blocks.twitter._mappers import (
|
||||
@@ -237,6 +237,12 @@ class TweetDurationBuilder:
|
||||
|
||||
def add_start_time(self, start_time: datetime | None):
|
||||
if start_time:
|
||||
# Twitter API requires start_time to be at least 10 seconds before now
|
||||
max_start_time = datetime.now(timezone.utc) - timedelta(seconds=10)
|
||||
if start_time.tzinfo is None:
|
||||
start_time = start_time.replace(tzinfo=timezone.utc)
|
||||
if start_time > max_start_time:
|
||||
start_time = max_start_time
|
||||
self.params["start_time"] = start_time
|
||||
return self
|
||||
|
||||
|
||||
@@ -51,8 +51,10 @@ class ResponseDataSerializer(BaseSerializer):
|
||||
return serialized_item
|
||||
|
||||
@classmethod
|
||||
def serialize_list(cls, data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
def serialize_list(cls, data: List[Dict[str, Any]] | None) -> List[Dict[str, Any]]:
|
||||
"""Serializes a list of dictionary items"""
|
||||
if not data:
|
||||
return []
|
||||
return [cls.serialize_dict(item) for item in data]
|
||||
|
||||
|
||||
|
||||
@@ -408,7 +408,7 @@ class ListExpansionInputs(BlockSchemaInput):
|
||||
|
||||
class TweetTimeWindowInputs(BlockSchemaInput):
|
||||
start_time: datetime | None = SchemaField(
|
||||
description="Start time in YYYY-MM-DDTHH:mm:ssZ format",
|
||||
description="Start time in YYYY-MM-DDTHH:mm:ssZ format. If set to a time less than 10 seconds ago, it will be automatically adjusted to 10 seconds ago (Twitter API requirement).",
|
||||
placeholder="Enter start time",
|
||||
default=None,
|
||||
advanced=False,
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
import logging
|
||||
from typing import Literal
|
||||
from urllib.parse import parse_qs, urlparse
|
||||
|
||||
from pydantic import SecretStr
|
||||
from youtube_transcript_api._api import YouTubeTranscriptApi
|
||||
from youtube_transcript_api._errors import NoTranscriptFound
|
||||
from youtube_transcript_api._transcripts import FetchedTranscript
|
||||
from youtube_transcript_api.formatters import TextFormatter
|
||||
from youtube_transcript_api.proxies import WebshareProxyConfig
|
||||
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
@@ -12,7 +16,42 @@ from backend.data.block import (
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.model import SchemaField
|
||||
from backend.data.model import (
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
UserPasswordCredentials,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
TEST_CREDENTIALS = UserPasswordCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="webshare_proxy",
|
||||
username=SecretStr("mock-webshare-username"),
|
||||
password=SecretStr("mock-webshare-password"),
|
||||
title="Mock Webshare Proxy credentials",
|
||||
)
|
||||
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
|
||||
WebshareProxyCredentials = UserPasswordCredentials
|
||||
WebshareProxyCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.WEBSHARE_PROXY],
|
||||
Literal["user_password"],
|
||||
]
|
||||
|
||||
|
||||
def WebshareProxyCredentialsField() -> WebshareProxyCredentialsInput:
|
||||
return CredentialsField(
|
||||
description="Webshare proxy credentials for fetching YouTube transcripts",
|
||||
)
|
||||
|
||||
|
||||
class TranscribeYoutubeVideoBlock(Block):
|
||||
@@ -22,6 +61,7 @@ class TranscribeYoutubeVideoBlock(Block):
|
||||
description="The URL of the YouTube video to transcribe",
|
||||
placeholder="https://www.youtube.com/watch?v=dQw4w9WgXcQ",
|
||||
)
|
||||
credentials: WebshareProxyCredentialsInput = WebshareProxyCredentialsField()
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_id: str = SchemaField(description="The extracted YouTube video ID")
|
||||
@@ -35,9 +75,12 @@ class TranscribeYoutubeVideoBlock(Block):
|
||||
id="f3a8f7e1-4b1d-4e5f-9f2a-7c3d5a2e6b4c",
|
||||
input_schema=TranscribeYoutubeVideoBlock.Input,
|
||||
output_schema=TranscribeYoutubeVideoBlock.Output,
|
||||
description="Transcribes a YouTube video.",
|
||||
description="Transcribes a YouTube video using a proxy.",
|
||||
categories={BlockCategory.SOCIAL},
|
||||
test_input={"youtube_url": "https://www.youtube.com/watch?v=dQw4w9WgXcQ"},
|
||||
test_input={
|
||||
"youtube_url": "https://www.youtube.com/watch?v=dQw4w9WgXcQ",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_output=[
|
||||
("video_id", "dQw4w9WgXcQ"),
|
||||
(
|
||||
@@ -45,8 +88,9 @@ class TranscribeYoutubeVideoBlock(Block):
|
||||
"Never gonna give you up\nNever gonna let you down",
|
||||
),
|
||||
],
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_mock={
|
||||
"get_transcript": lambda video_id: [
|
||||
"get_transcript": lambda video_id, credentials: [
|
||||
{"text": "Never gonna give you up"},
|
||||
{"text": "Never gonna let you down"},
|
||||
],
|
||||
@@ -69,16 +113,27 @@ class TranscribeYoutubeVideoBlock(Block):
|
||||
return parsed_url.path.split("/")[2]
|
||||
raise ValueError(f"Invalid YouTube URL: {url}")
|
||||
|
||||
@staticmethod
|
||||
def get_transcript(video_id: str) -> FetchedTranscript:
|
||||
def get_transcript(
|
||||
self, video_id: str, credentials: WebshareProxyCredentials
|
||||
) -> FetchedTranscript:
|
||||
"""
|
||||
Get transcript for a video, preferring English but falling back to any available language.
|
||||
|
||||
:param video_id: The YouTube video ID
|
||||
:param credentials: The Webshare proxy credentials
|
||||
:return: The fetched transcript
|
||||
:raises: Any exception except NoTranscriptFound for requested languages
|
||||
"""
|
||||
api = YouTubeTranscriptApi()
|
||||
logger.warning(
|
||||
"Using Webshare proxy for YouTube transcript fetch (video_id=%s)",
|
||||
video_id,
|
||||
)
|
||||
proxy_config = WebshareProxyConfig(
|
||||
proxy_username=credentials.username.get_secret_value(),
|
||||
proxy_password=credentials.password.get_secret_value(),
|
||||
)
|
||||
|
||||
api = YouTubeTranscriptApi(proxy_config=proxy_config)
|
||||
try:
|
||||
# Try to get English transcript first (default behavior)
|
||||
return api.fetch(video_id=video_id)
|
||||
@@ -101,11 +156,17 @@ class TranscribeYoutubeVideoBlock(Block):
|
||||
transcript_text = formatter.format_transcript(transcript)
|
||||
return transcript_text
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: WebshareProxyCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
video_id = self.extract_video_id(input_data.youtube_url)
|
||||
yield "video_id", video_id
|
||||
|
||||
transcript = self.get_transcript(video_id)
|
||||
transcript = self.get_transcript(video_id, credentials)
|
||||
transcript_text = self.format_transcript(transcript=transcript)
|
||||
|
||||
yield "transcript", transcript_text
|
||||
|
||||
@@ -5,6 +5,8 @@ from datetime import datetime
|
||||
from faker import Faker
|
||||
from prisma import Prisma
|
||||
|
||||
from backend.data.db import query_raw_with_schema
|
||||
|
||||
faker = Faker()
|
||||
|
||||
|
||||
@@ -15,9 +17,9 @@ async def check_cron_job(db):
|
||||
|
||||
try:
|
||||
# Check if pg_cron extension exists
|
||||
extension_check = await db.query_raw("CREATE EXTENSION pg_cron;")
|
||||
extension_check = await query_raw_with_schema("CREATE EXTENSION pg_cron;")
|
||||
print(extension_check)
|
||||
extension_check = await db.query_raw(
|
||||
extension_check = await query_raw_with_schema(
|
||||
"SELECT COUNT(*) as count FROM pg_extension WHERE extname = 'pg_cron'"
|
||||
)
|
||||
if extension_check[0]["count"] == 0:
|
||||
@@ -25,7 +27,7 @@ async def check_cron_job(db):
|
||||
return False
|
||||
|
||||
# Check if the refresh job exists
|
||||
job_check = await db.query_raw(
|
||||
job_check = await query_raw_with_schema(
|
||||
"""
|
||||
SELECT jobname, schedule, command
|
||||
FROM cron.job
|
||||
@@ -55,33 +57,33 @@ async def get_materialized_view_counts(db):
|
||||
print("-" * 40)
|
||||
|
||||
# Get counts from mv_agent_run_counts
|
||||
agent_runs = await db.query_raw(
|
||||
agent_runs = await query_raw_with_schema(
|
||||
"""
|
||||
SELECT COUNT(*) as total_agents,
|
||||
SUM(run_count) as total_runs,
|
||||
MAX(run_count) as max_runs,
|
||||
MIN(run_count) as min_runs
|
||||
FROM mv_agent_run_counts
|
||||
FROM {schema_prefix}mv_agent_run_counts
|
||||
"""
|
||||
)
|
||||
|
||||
# Get counts from mv_review_stats
|
||||
review_stats = await db.query_raw(
|
||||
review_stats = await query_raw_with_schema(
|
||||
"""
|
||||
SELECT COUNT(*) as total_listings,
|
||||
SUM(review_count) as total_reviews,
|
||||
AVG(avg_rating) as overall_avg_rating
|
||||
FROM mv_review_stats
|
||||
FROM {schema_prefix}mv_review_stats
|
||||
"""
|
||||
)
|
||||
|
||||
# Get sample data from StoreAgent view
|
||||
store_agents = await db.query_raw(
|
||||
store_agents = await query_raw_with_schema(
|
||||
"""
|
||||
SELECT COUNT(*) as total_store_agents,
|
||||
AVG(runs) as avg_runs,
|
||||
AVG(rating) as avg_rating
|
||||
FROM "StoreAgent"
|
||||
FROM {schema_prefix}"StoreAgent"
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
@@ -5,6 +5,8 @@ import asyncio
|
||||
|
||||
from prisma import Prisma
|
||||
|
||||
from backend.data.db import query_raw_with_schema
|
||||
|
||||
|
||||
async def check_store_data(db):
|
||||
"""Check what store data exists in the database."""
|
||||
@@ -89,11 +91,11 @@ async def check_store_data(db):
|
||||
sa.creator_username,
|
||||
sa.categories,
|
||||
sa.updated_at
|
||||
FROM "StoreAgent" sa
|
||||
FROM {schema_prefix}"StoreAgent" sa
|
||||
LIMIT 10;
|
||||
"""
|
||||
|
||||
store_agents = await db.query_raw(query)
|
||||
store_agents = await query_raw_with_schema(query)
|
||||
print(f"Total store agents in view: {len(store_agents)}")
|
||||
|
||||
if store_agents:
|
||||
@@ -111,22 +113,22 @@ async def check_store_data(db):
|
||||
# Check for any APPROVED store listing versions
|
||||
query = """
|
||||
SELECT COUNT(*) as count
|
||||
FROM "StoreListingVersion"
|
||||
FROM {schema_prefix}"StoreListingVersion"
|
||||
WHERE "submissionStatus" = 'APPROVED'
|
||||
"""
|
||||
|
||||
result = await db.query_raw(query)
|
||||
result = await query_raw_with_schema(query)
|
||||
approved_count = result[0]["count"] if result else 0
|
||||
print(f"Approved store listing versions: {approved_count}")
|
||||
|
||||
# Check for store listings with hasApprovedVersion = true
|
||||
query = """
|
||||
SELECT COUNT(*) as count
|
||||
FROM "StoreListing"
|
||||
FROM {schema_prefix}"StoreListing"
|
||||
WHERE "hasApprovedVersion" = true AND "isDeleted" = false
|
||||
"""
|
||||
|
||||
result = await db.query_raw(query)
|
||||
result = await query_raw_with_schema(query)
|
||||
has_approved_count = result[0]["count"] if result else 0
|
||||
print(f"Store listings with approved versions: {has_approved_count}")
|
||||
|
||||
@@ -134,10 +136,10 @@ async def check_store_data(db):
|
||||
query = """
|
||||
SELECT COUNT(DISTINCT "agentGraphId") as unique_agents,
|
||||
COUNT(*) as total_executions
|
||||
FROM "AgentGraphExecution"
|
||||
FROM {schema_prefix}"AgentGraphExecution"
|
||||
"""
|
||||
|
||||
result = await db.query_raw(query)
|
||||
result = await query_raw_with_schema(query)
|
||||
if result:
|
||||
print("\nAgent Graph Executions:")
|
||||
print(f" Unique agents with executions: {result[0]['unique_agents']}")
|
||||
|
||||
@@ -1,12 +1,45 @@
|
||||
import logging
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Optional
|
||||
|
||||
import prisma.types
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.db import query_raw_with_schema
|
||||
from backend.util.json import SafeJson
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AccuracyAlertData(BaseModel):
|
||||
"""Alert data when accuracy drops significantly."""
|
||||
|
||||
graph_id: str
|
||||
user_id: Optional[str]
|
||||
drop_percent: float
|
||||
three_day_avg: float
|
||||
seven_day_avg: float
|
||||
detected_at: datetime
|
||||
|
||||
|
||||
class AccuracyLatestData(BaseModel):
|
||||
"""Latest execution accuracy data point."""
|
||||
|
||||
date: datetime
|
||||
daily_score: Optional[float]
|
||||
three_day_avg: Optional[float]
|
||||
seven_day_avg: Optional[float]
|
||||
fourteen_day_avg: Optional[float]
|
||||
|
||||
|
||||
class AccuracyTrendsResponse(BaseModel):
|
||||
"""Response model for accuracy trends and alerts."""
|
||||
|
||||
latest_data: AccuracyLatestData
|
||||
alert: Optional[AccuracyAlertData]
|
||||
historical_data: Optional[list[AccuracyLatestData]] = None
|
||||
|
||||
|
||||
async def log_raw_analytics(
|
||||
user_id: str,
|
||||
type: str,
|
||||
@@ -43,3 +76,217 @@ async def log_raw_metric(
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
async def get_accuracy_trends_and_alerts(
|
||||
graph_id: str,
|
||||
days_back: int = 30,
|
||||
user_id: Optional[str] = None,
|
||||
drop_threshold: float = 10.0,
|
||||
include_historical: bool = False,
|
||||
) -> AccuracyTrendsResponse:
|
||||
"""Get accuracy trends and detect alerts for a specific graph."""
|
||||
query_template = """
|
||||
WITH daily_scores AS (
|
||||
SELECT
|
||||
DATE(e."createdAt") as execution_date,
|
||||
AVG(CASE
|
||||
WHEN e.stats IS NOT NULL
|
||||
AND e.stats::json->>'correctness_score' IS NOT NULL
|
||||
AND e.stats::json->>'correctness_score' != 'null'
|
||||
THEN (e.stats::json->>'correctness_score')::float * 100
|
||||
ELSE NULL
|
||||
END) as daily_score
|
||||
FROM {schema_prefix}"AgentGraphExecution" e
|
||||
WHERE e."agentGraphId" = $1::text
|
||||
AND e."isDeleted" = false
|
||||
AND e."createdAt" >= $2::timestamp
|
||||
AND e."executionStatus" IN ('COMPLETED', 'FAILED', 'TERMINATED')
|
||||
{user_filter}
|
||||
GROUP BY DATE(e."createdAt")
|
||||
HAVING COUNT(*) >= 3 -- Need at least 3 executions per day
|
||||
),
|
||||
trends AS (
|
||||
SELECT
|
||||
execution_date,
|
||||
daily_score,
|
||||
AVG(daily_score) OVER (
|
||||
ORDER BY execution_date
|
||||
ROWS BETWEEN 2 PRECEDING AND CURRENT ROW
|
||||
) as three_day_avg,
|
||||
AVG(daily_score) OVER (
|
||||
ORDER BY execution_date
|
||||
ROWS BETWEEN 6 PRECEDING AND CURRENT ROW
|
||||
) as seven_day_avg,
|
||||
AVG(daily_score) OVER (
|
||||
ORDER BY execution_date
|
||||
ROWS BETWEEN 13 PRECEDING AND CURRENT ROW
|
||||
) as fourteen_day_avg
|
||||
FROM daily_scores
|
||||
)
|
||||
SELECT *,
|
||||
CASE
|
||||
WHEN three_day_avg IS NOT NULL AND seven_day_avg IS NOT NULL AND seven_day_avg > 0
|
||||
THEN ((seven_day_avg - three_day_avg) / seven_day_avg * 100)
|
||||
ELSE NULL
|
||||
END as drop_percent
|
||||
FROM trends
|
||||
ORDER BY execution_date DESC
|
||||
{limit_clause}
|
||||
"""
|
||||
|
||||
start_date = datetime.now(timezone.utc) - timedelta(days=days_back)
|
||||
params = [graph_id, start_date]
|
||||
user_filter = ""
|
||||
if user_id:
|
||||
user_filter = 'AND e."userId" = $3::text'
|
||||
params.append(user_id)
|
||||
|
||||
# Determine limit clause
|
||||
limit_clause = "" if include_historical else "LIMIT 1"
|
||||
|
||||
final_query = query_template.format(
|
||||
schema_prefix="{schema_prefix}",
|
||||
user_filter=user_filter,
|
||||
limit_clause=limit_clause,
|
||||
)
|
||||
|
||||
result = await query_raw_with_schema(final_query, *params)
|
||||
|
||||
if not result:
|
||||
return AccuracyTrendsResponse(
|
||||
latest_data=AccuracyLatestData(
|
||||
date=datetime.now(timezone.utc),
|
||||
daily_score=None,
|
||||
three_day_avg=None,
|
||||
seven_day_avg=None,
|
||||
fourteen_day_avg=None,
|
||||
),
|
||||
alert=None,
|
||||
)
|
||||
|
||||
latest = result[0]
|
||||
|
||||
alert = None
|
||||
if (
|
||||
latest["drop_percent"] is not None
|
||||
and latest["drop_percent"] >= drop_threshold
|
||||
and latest["three_day_avg"] is not None
|
||||
and latest["seven_day_avg"] is not None
|
||||
):
|
||||
alert = AccuracyAlertData(
|
||||
graph_id=graph_id,
|
||||
user_id=user_id,
|
||||
drop_percent=float(latest["drop_percent"]),
|
||||
three_day_avg=float(latest["three_day_avg"]),
|
||||
seven_day_avg=float(latest["seven_day_avg"]),
|
||||
detected_at=datetime.now(timezone.utc),
|
||||
)
|
||||
|
||||
# Prepare historical data if requested
|
||||
historical_data = None
|
||||
if include_historical:
|
||||
historical_data = []
|
||||
for row in result:
|
||||
historical_data.append(
|
||||
AccuracyLatestData(
|
||||
date=row["execution_date"],
|
||||
daily_score=(
|
||||
float(row["daily_score"])
|
||||
if row["daily_score"] is not None
|
||||
else None
|
||||
),
|
||||
three_day_avg=(
|
||||
float(row["three_day_avg"])
|
||||
if row["three_day_avg"] is not None
|
||||
else None
|
||||
),
|
||||
seven_day_avg=(
|
||||
float(row["seven_day_avg"])
|
||||
if row["seven_day_avg"] is not None
|
||||
else None
|
||||
),
|
||||
fourteen_day_avg=(
|
||||
float(row["fourteen_day_avg"])
|
||||
if row["fourteen_day_avg"] is not None
|
||||
else None
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
return AccuracyTrendsResponse(
|
||||
latest_data=AccuracyLatestData(
|
||||
date=latest["execution_date"],
|
||||
daily_score=(
|
||||
float(latest["daily_score"])
|
||||
if latest["daily_score"] is not None
|
||||
else None
|
||||
),
|
||||
three_day_avg=(
|
||||
float(latest["three_day_avg"])
|
||||
if latest["three_day_avg"] is not None
|
||||
else None
|
||||
),
|
||||
seven_day_avg=(
|
||||
float(latest["seven_day_avg"])
|
||||
if latest["seven_day_avg"] is not None
|
||||
else None
|
||||
),
|
||||
fourteen_day_avg=(
|
||||
float(latest["fourteen_day_avg"])
|
||||
if latest["fourteen_day_avg"] is not None
|
||||
else None
|
||||
),
|
||||
),
|
||||
alert=alert,
|
||||
historical_data=historical_data,
|
||||
)
|
||||
|
||||
|
||||
class MarketplaceGraphData(BaseModel):
|
||||
"""Data structure for marketplace graph monitoring."""
|
||||
|
||||
graph_id: str
|
||||
user_id: Optional[str]
|
||||
execution_count: int
|
||||
|
||||
|
||||
async def get_marketplace_graphs_for_monitoring(
|
||||
days_back: int = 30,
|
||||
min_executions: int = 10,
|
||||
) -> list[MarketplaceGraphData]:
|
||||
"""Get published marketplace graphs with recent executions for monitoring."""
|
||||
query_template = """
|
||||
WITH marketplace_graphs AS (
|
||||
SELECT DISTINCT
|
||||
slv."agentGraphId" as graph_id,
|
||||
slv."agentGraphVersion" as graph_version
|
||||
FROM {schema_prefix}"StoreListing" sl
|
||||
JOIN {schema_prefix}"StoreListingVersion" slv ON sl."activeVersionId" = slv."id"
|
||||
WHERE sl."hasApprovedVersion" = true
|
||||
AND sl."isDeleted" = false
|
||||
)
|
||||
SELECT DISTINCT
|
||||
mg.graph_id,
|
||||
NULL as user_id, -- Marketplace graphs don't have a specific user_id for monitoring
|
||||
COUNT(*) as execution_count
|
||||
FROM marketplace_graphs mg
|
||||
JOIN {schema_prefix}"AgentGraphExecution" e ON e."agentGraphId" = mg.graph_id
|
||||
WHERE e."createdAt" >= $1::timestamp
|
||||
AND e."isDeleted" = false
|
||||
AND e."executionStatus" IN ('COMPLETED', 'FAILED', 'TERMINATED')
|
||||
GROUP BY mg.graph_id
|
||||
HAVING COUNT(*) >= $2
|
||||
ORDER BY execution_count DESC
|
||||
"""
|
||||
start_date = datetime.now(timezone.utc) - timedelta(days=days_back)
|
||||
result = await query_raw_with_schema(query_template, start_date, min_executions)
|
||||
|
||||
return [
|
||||
MarketplaceGraphData(
|
||||
graph_id=row["graph_id"],
|
||||
user_id=row["user_id"],
|
||||
execution_count=int(row["execution_count"]),
|
||||
)
|
||||
for row in result
|
||||
]
|
||||
|
||||
@@ -29,6 +29,13 @@ from backend.data.model import NodeExecutionStats
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util import json
|
||||
from backend.util.cache import cached
|
||||
from backend.util.exceptions import (
|
||||
BlockError,
|
||||
BlockExecutionError,
|
||||
BlockInputError,
|
||||
BlockOutputError,
|
||||
BlockUnknownError,
|
||||
)
|
||||
from backend.util.settings import Config
|
||||
|
||||
from .model import (
|
||||
@@ -64,6 +71,7 @@ class BlockType(Enum):
|
||||
AGENT = "Agent"
|
||||
AI = "AI"
|
||||
AYRSHARE = "Ayrshare"
|
||||
HUMAN_IN_THE_LOOP = "Human In The Loop"
|
||||
|
||||
|
||||
class BlockCategory(Enum):
|
||||
@@ -258,14 +266,61 @@ class BlockSchema(BaseModel):
|
||||
)
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_auto_credentials_fields(cls) -> dict[str, dict[str, Any]]:
|
||||
"""
|
||||
Get fields that have auto_credentials metadata (e.g., GoogleDriveFileInput).
|
||||
|
||||
Returns a dict mapping kwarg_name -> {field_name, auto_credentials_config}
|
||||
|
||||
Raises:
|
||||
ValueError: If multiple fields have the same kwarg_name, as this would
|
||||
cause silent overwriting and only the last field would be processed.
|
||||
"""
|
||||
result: dict[str, dict[str, Any]] = {}
|
||||
schema = cls.jsonschema()
|
||||
properties = schema.get("properties", {})
|
||||
|
||||
for field_name, field_schema in properties.items():
|
||||
auto_creds = field_schema.get("auto_credentials")
|
||||
if auto_creds:
|
||||
kwarg_name = auto_creds.get("kwarg_name", "credentials")
|
||||
if kwarg_name in result:
|
||||
raise ValueError(
|
||||
f"Duplicate auto_credentials kwarg_name '{kwarg_name}' "
|
||||
f"in fields '{result[kwarg_name]['field_name']}' and "
|
||||
f"'{field_name}' on {cls.__qualname__}"
|
||||
)
|
||||
result[kwarg_name] = {
|
||||
"field_name": field_name,
|
||||
"config": auto_creds,
|
||||
}
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
def get_credentials_fields_info(cls) -> dict[str, CredentialsFieldInfo]:
|
||||
return {
|
||||
field_name: CredentialsFieldInfo.model_validate(
|
||||
result = {}
|
||||
|
||||
# Regular credentials fields
|
||||
for field_name in cls.get_credentials_fields().keys():
|
||||
result[field_name] = CredentialsFieldInfo.model_validate(
|
||||
cls.get_field_schema(field_name), by_alias=True
|
||||
)
|
||||
for field_name in cls.get_credentials_fields().keys()
|
||||
}
|
||||
|
||||
# Auto-generated credentials fields (from GoogleDriveFileInput etc.)
|
||||
for kwarg_name, info in cls.get_auto_credentials_fields().items():
|
||||
config = info["config"]
|
||||
# Build a schema-like dict that CredentialsFieldInfo can parse
|
||||
auto_schema = {
|
||||
"credentials_provider": [config.get("provider", "google")],
|
||||
"credentials_types": [config.get("type", "oauth2")],
|
||||
"credentials_scopes": config.get("scopes"),
|
||||
}
|
||||
result[kwarg_name] = CredentialsFieldInfo.model_validate(
|
||||
auto_schema, by_alias=True
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
|
||||
@@ -542,9 +597,29 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
||||
)
|
||||
|
||||
async def execute(self, input_data: BlockInput, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
async for output_name, output_data in self._execute(input_data, **kwargs):
|
||||
yield output_name, output_data
|
||||
except Exception as ex:
|
||||
if isinstance(ex, BlockError):
|
||||
raise ex
|
||||
else:
|
||||
raise (
|
||||
BlockExecutionError
|
||||
if isinstance(ex, ValueError)
|
||||
else BlockUnknownError
|
||||
)(
|
||||
message=str(ex),
|
||||
block_name=self.name,
|
||||
block_id=self.id,
|
||||
) from ex
|
||||
|
||||
async def _execute(self, input_data: BlockInput, **kwargs) -> BlockOutput:
|
||||
if error := self.input_schema.validate_data(input_data):
|
||||
raise ValueError(
|
||||
f"Unable to execute block with invalid input data: {error}"
|
||||
raise BlockInputError(
|
||||
message=f"Unable to execute block with invalid input data: {error}",
|
||||
block_name=self.name,
|
||||
block_id=self.id,
|
||||
)
|
||||
|
||||
async for output_name, output_data in self.run(
|
||||
@@ -552,11 +627,17 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
||||
**kwargs,
|
||||
):
|
||||
if output_name == "error":
|
||||
raise RuntimeError(output_data)
|
||||
raise BlockExecutionError(
|
||||
message=output_data, block_name=self.name, block_id=self.id
|
||||
)
|
||||
if self.block_type == BlockType.STANDARD and (
|
||||
error := self.output_schema.validate_field(output_name, output_data)
|
||||
):
|
||||
raise ValueError(f"Block produced an invalid output data: {error}")
|
||||
raise BlockOutputError(
|
||||
message=f"Block produced an invalid output data: {error}",
|
||||
block_name=self.name,
|
||||
block_id=self.id,
|
||||
)
|
||||
yield output_name, output_data
|
||||
|
||||
def is_triggered_by_event_type(
|
||||
@@ -767,3 +848,12 @@ def get_io_block_ids() -> Sequence[str]:
|
||||
for id, B in get_blocks().items()
|
||||
if B().block_type in (BlockType.INPUT, BlockType.OUTPUT)
|
||||
]
|
||||
|
||||
|
||||
@cached(ttl_seconds=3600)
|
||||
def get_human_in_the_loop_block_ids() -> Sequence[str]:
|
||||
return [
|
||||
id
|
||||
for id, B in get_blocks().items()
|
||||
if B().block_type == BlockType.HUMAN_IN_THE_LOOP
|
||||
]
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
from typing import Type
|
||||
|
||||
from backend.blocks.ai_image_customizer import AIImageCustomizerBlock, GeminiImageModel
|
||||
from backend.blocks.ai_image_generator_block import AIImageGeneratorBlock, ImageGenModel
|
||||
from backend.blocks.ai_music_generator import AIMusicGeneratorBlock
|
||||
from backend.blocks.ai_shortform_video_block import (
|
||||
AIAdMakerVideoCreatorBlock,
|
||||
@@ -9,6 +11,7 @@ from backend.blocks.ai_shortform_video_block import (
|
||||
from backend.blocks.apollo.organization import SearchOrganizationsBlock
|
||||
from backend.blocks.apollo.people import SearchPeopleBlock
|
||||
from backend.blocks.apollo.person import GetPersonDetailBlock
|
||||
from backend.blocks.codex import CodeGenerationBlock, CodexModel
|
||||
from backend.blocks.enrichlayer.linkedin import (
|
||||
GetLinkedinProfileBlock,
|
||||
GetLinkedinProfilePictureBlock,
|
||||
@@ -61,9 +64,10 @@ MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.O1_MINI: 4,
|
||||
# GPT-5 models
|
||||
LlmModel.GPT5: 2,
|
||||
LlmModel.GPT5_1: 5,
|
||||
LlmModel.GPT5_MINI: 1,
|
||||
LlmModel.GPT5_NANO: 1,
|
||||
LlmModel.GPT5_CHAT: 2,
|
||||
LlmModel.GPT5_CHAT: 5,
|
||||
LlmModel.GPT41: 2,
|
||||
LlmModel.GPT41_MINI: 1,
|
||||
LlmModel.GPT4O_MINI: 1,
|
||||
@@ -74,6 +78,7 @@ MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.CLAUDE_4_OPUS: 21,
|
||||
LlmModel.CLAUDE_4_SONNET: 5,
|
||||
LlmModel.CLAUDE_4_5_HAIKU: 4,
|
||||
LlmModel.CLAUDE_4_5_OPUS: 14,
|
||||
LlmModel.CLAUDE_4_5_SONNET: 9,
|
||||
LlmModel.CLAUDE_3_7_SONNET: 5,
|
||||
LlmModel.CLAUDE_3_HAIKU: 1,
|
||||
@@ -92,6 +97,7 @@ MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.OPENAI_GPT_OSS_120B: 1,
|
||||
LlmModel.OPENAI_GPT_OSS_20B: 1,
|
||||
LlmModel.GEMINI_2_5_PRO: 4,
|
||||
LlmModel.GEMINI_3_PRO_PREVIEW: 5,
|
||||
LlmModel.MISTRAL_NEMO: 1,
|
||||
LlmModel.COHERE_COMMAND_R_08_2024: 1,
|
||||
LlmModel.COHERE_COMMAND_R_PLUS_08_2024: 3,
|
||||
@@ -113,6 +119,9 @@ MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.LLAMA_API_LLAMA3_3_8B: 1,
|
||||
LlmModel.LLAMA_API_LLAMA3_3_70B: 1,
|
||||
LlmModel.GROK_4: 9,
|
||||
LlmModel.GROK_4_FAST: 1,
|
||||
LlmModel.GROK_4_1_FAST: 1,
|
||||
LlmModel.GROK_CODE_FAST_1: 1,
|
||||
LlmModel.KIMI_K2: 1,
|
||||
LlmModel.QWEN3_235B_A22B_THINKING: 1,
|
||||
LlmModel.QWEN3_CODER: 9,
|
||||
@@ -258,6 +267,20 @@ BLOCK_COSTS: dict[Type[Block], list[BlockCost]] = {
|
||||
AIStructuredResponseGeneratorBlock: LLM_COST,
|
||||
AITextSummarizerBlock: LLM_COST,
|
||||
AIListGeneratorBlock: LLM_COST,
|
||||
CodeGenerationBlock: [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": CodexModel.GPT5_1_CODEX,
|
||||
"credentials": {
|
||||
"id": openai_credentials.id,
|
||||
"provider": openai_credentials.provider,
|
||||
"type": openai_credentials.type,
|
||||
},
|
||||
},
|
||||
cost_amount=5,
|
||||
)
|
||||
],
|
||||
CreateTalkingAvatarVideoBlock: [
|
||||
BlockCost(
|
||||
cost_amount=15,
|
||||
@@ -535,4 +558,85 @@ BLOCK_COSTS: dict[Type[Block], list[BlockCost]] = {
|
||||
},
|
||||
)
|
||||
],
|
||||
AIImageGeneratorBlock: [
|
||||
BlockCost(
|
||||
cost_amount=5, # SD3.5 Medium: ~$0.035 per image
|
||||
cost_filter={
|
||||
"model": ImageGenModel.SD3_5,
|
||||
"credentials": {
|
||||
"id": replicate_credentials.id,
|
||||
"provider": replicate_credentials.provider,
|
||||
"type": replicate_credentials.type,
|
||||
},
|
||||
},
|
||||
),
|
||||
BlockCost(
|
||||
cost_amount=6, # Flux 1.1 Pro: ~$0.04 per image
|
||||
cost_filter={
|
||||
"model": ImageGenModel.FLUX,
|
||||
"credentials": {
|
||||
"id": replicate_credentials.id,
|
||||
"provider": replicate_credentials.provider,
|
||||
"type": replicate_credentials.type,
|
||||
},
|
||||
},
|
||||
),
|
||||
BlockCost(
|
||||
cost_amount=10, # Flux 1.1 Pro Ultra: ~$0.08 per image
|
||||
cost_filter={
|
||||
"model": ImageGenModel.FLUX_ULTRA,
|
||||
"credentials": {
|
||||
"id": replicate_credentials.id,
|
||||
"provider": replicate_credentials.provider,
|
||||
"type": replicate_credentials.type,
|
||||
},
|
||||
},
|
||||
),
|
||||
BlockCost(
|
||||
cost_amount=7, # Recraft v3: ~$0.05 per image
|
||||
cost_filter={
|
||||
"model": ImageGenModel.RECRAFT,
|
||||
"credentials": {
|
||||
"id": replicate_credentials.id,
|
||||
"provider": replicate_credentials.provider,
|
||||
"type": replicate_credentials.type,
|
||||
},
|
||||
},
|
||||
),
|
||||
BlockCost(
|
||||
cost_amount=14, # Nano Banana Pro: $0.14 per image at 2K
|
||||
cost_filter={
|
||||
"model": ImageGenModel.NANO_BANANA_PRO,
|
||||
"credentials": {
|
||||
"id": replicate_credentials.id,
|
||||
"provider": replicate_credentials.provider,
|
||||
"type": replicate_credentials.type,
|
||||
},
|
||||
},
|
||||
),
|
||||
],
|
||||
AIImageCustomizerBlock: [
|
||||
BlockCost(
|
||||
cost_amount=10, # Nano Banana (original)
|
||||
cost_filter={
|
||||
"model": GeminiImageModel.NANO_BANANA,
|
||||
"credentials": {
|
||||
"id": replicate_credentials.id,
|
||||
"provider": replicate_credentials.provider,
|
||||
"type": replicate_credentials.type,
|
||||
},
|
||||
},
|
||||
),
|
||||
BlockCost(
|
||||
cost_amount=14, # Nano Banana Pro: $0.14 per image at 2K
|
||||
cost_filter={
|
||||
"model": GeminiImageModel.NANO_BANANA_PRO,
|
||||
"credentials": {
|
||||
"id": replicate_credentials.id,
|
||||
"provider": replicate_credentials.provider,
|
||||
"type": replicate_credentials.type,
|
||||
},
|
||||
},
|
||||
),
|
||||
],
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ from prisma.models import CreditTransaction, UserBalance
|
||||
from backend.blocks.llm import AITextGeneratorBlock
|
||||
from backend.data.block import get_block
|
||||
from backend.data.credit import BetaUserCredit, UsageTransactionMetadata
|
||||
from backend.data.execution import NodeExecutionEntry, UserContext
|
||||
from backend.data.execution import ExecutionContext, NodeExecutionEntry
|
||||
from backend.data.user import DEFAULT_USER_ID
|
||||
from backend.executor.utils import block_usage_cost
|
||||
from backend.integrations.credentials_store import openai_credentials
|
||||
@@ -73,6 +73,7 @@ async def test_block_credit_usage(server: SpinTestServer):
|
||||
NodeExecutionEntry(
|
||||
user_id=DEFAULT_USER_ID,
|
||||
graph_id="test_graph",
|
||||
graph_version=1,
|
||||
node_id="test_node",
|
||||
graph_exec_id="test_graph_exec",
|
||||
node_exec_id="test_node_exec",
|
||||
@@ -85,7 +86,7 @@ async def test_block_credit_usage(server: SpinTestServer):
|
||||
"type": openai_credentials.type,
|
||||
},
|
||||
},
|
||||
user_context=UserContext(timezone="UTC"),
|
||||
execution_context=ExecutionContext(user_timezone="UTC"),
|
||||
),
|
||||
)
|
||||
assert spending_amount_1 > 0
|
||||
@@ -94,12 +95,13 @@ async def test_block_credit_usage(server: SpinTestServer):
|
||||
NodeExecutionEntry(
|
||||
user_id=DEFAULT_USER_ID,
|
||||
graph_id="test_graph",
|
||||
graph_version=1,
|
||||
node_id="test_node",
|
||||
graph_exec_id="test_graph_exec",
|
||||
node_exec_id="test_node_exec",
|
||||
block_id=AITextGeneratorBlock().id,
|
||||
inputs={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
|
||||
user_context=UserContext(timezone="UTC"),
|
||||
execution_context=ExecutionContext(user_timezone="UTC"),
|
||||
),
|
||||
)
|
||||
assert spending_amount_2 == 0
|
||||
|
||||
@@ -92,6 +92,18 @@ def get_dynamic_field_description(field_name: str) -> str:
|
||||
return f"Value for {field_name}"
|
||||
|
||||
|
||||
def is_tool_pin(name: str) -> bool:
|
||||
"""Check if a pin name represents a tool connection."""
|
||||
return name.startswith("tools_^_") or name == "tools"
|
||||
|
||||
|
||||
def sanitize_pin_name(name: str) -> str:
|
||||
sanitized_name = extract_base_field_name(name)
|
||||
if is_tool_pin(sanitized_name):
|
||||
return "tools"
|
||||
return sanitized_name
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# Dynamic field parsing and merging utilities
|
||||
# --------------------------------------------------------------------------- #
|
||||
@@ -137,30 +149,64 @@ def _tokenise(path: str) -> list[tuple[str, str]] | None:
|
||||
return tokens
|
||||
|
||||
|
||||
def parse_execution_output(output: tuple[str, Any], name: str) -> Any:
|
||||
def parse_execution_output(
|
||||
output_item: tuple[str, Any],
|
||||
link_output_selector: str,
|
||||
sink_node_id: str | None = None,
|
||||
sink_pin_name: str | None = None,
|
||||
) -> Any:
|
||||
"""
|
||||
Retrieve a nested value out of `output` using the flattened *name*.
|
||||
Retrieve a nested value out of `output` using the flattened `link_output_selector`.
|
||||
|
||||
On any failure (wrong name, wrong type, out-of-range, bad path)
|
||||
returns **None**.
|
||||
On any failure (wrong name, wrong type, out-of-range, bad path) returns **None**.
|
||||
|
||||
### Special Case: Tool pins
|
||||
For regular output pins, the `output_item`'s name will simply be the field name, and
|
||||
`link_output_selector` (= the `source_name` of the link) may provide a "selector"
|
||||
used to extract part of the output value and route it through the link
|
||||
to the next node.
|
||||
|
||||
However, for tool pins, it is the other way around: the `output_item`'s name
|
||||
provides the routing information (`tools_^_{sink_node_id}_~_{field_name}`),
|
||||
and the `link_output_selector` is simply `"tools"`
|
||||
(or `"tools_^_{tool_name}_~_{field_name}"` for backward compatibility).
|
||||
|
||||
Args:
|
||||
output: Tuple of (base_name, data) representing a block output entry
|
||||
name: The flattened field name to extract from the output data
|
||||
output_item: Tuple of (base_name, data) representing a block output entry.
|
||||
link_output_selector: The flattened field name to extract from the output data.
|
||||
sink_node_id: Sink node ID, used for tool use routing.
|
||||
sink_pin_name: Sink pin name, used for tool use routing.
|
||||
|
||||
Returns:
|
||||
The value at the specified path, or None if not found/invalid
|
||||
The value at the specified path, or `None` if not found/invalid.
|
||||
"""
|
||||
base_name, data = output
|
||||
output_pin_name, data = output_item
|
||||
|
||||
# Special handling for tool pins
|
||||
if is_tool_pin(link_output_selector) and ( # "tools" or "tools_^_…"
|
||||
output_pin_name.startswith("tools_^_") and "_~_" in output_pin_name
|
||||
):
|
||||
if not (sink_node_id and sink_pin_name):
|
||||
raise ValueError(
|
||||
"sink_node_id and sink_pin_name must be provided for tool pin routing"
|
||||
)
|
||||
|
||||
# Extract routing information from emit key: tools_^_{node_id}_~_{field}
|
||||
selector = output_pin_name[8:] # Remove "tools_^_" prefix
|
||||
target_node_id, target_input_pin = selector.split("_~_", 1)
|
||||
if target_node_id == sink_node_id and target_input_pin == sink_pin_name:
|
||||
return data
|
||||
else:
|
||||
return None
|
||||
|
||||
# Exact match → whole object
|
||||
if name == base_name:
|
||||
if link_output_selector == output_pin_name:
|
||||
return data
|
||||
|
||||
# Must start with the expected name
|
||||
if not name.startswith(base_name):
|
||||
if not link_output_selector.startswith(output_pin_name):
|
||||
return None
|
||||
path = name[len(base_name) :]
|
||||
path = link_output_selector[len(output_pin_name) :]
|
||||
if not path:
|
||||
return None # nothing left to parse
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ from enum import Enum
|
||||
from multiprocessing import Manager
|
||||
from queue import Empty
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Annotated,
|
||||
Any,
|
||||
AsyncGenerator,
|
||||
@@ -34,6 +35,7 @@ from prisma.types import (
|
||||
AgentNodeExecutionKeyValueDataCreateInput,
|
||||
AgentNodeExecutionUpdateInput,
|
||||
AgentNodeExecutionWhereInput,
|
||||
AgentNodeExecutionWhereUniqueInput,
|
||||
)
|
||||
from pydantic import BaseModel, ConfigDict, JsonValue, ValidationError
|
||||
from pydantic.fields import Field
|
||||
@@ -64,12 +66,27 @@ from .includes import (
|
||||
)
|
||||
from .model import CredentialsMetaInput, GraphExecutionStats, NodeExecutionStats
|
||||
|
||||
if TYPE_CHECKING:
|
||||
pass
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
config = Config()
|
||||
|
||||
|
||||
class ExecutionContext(BaseModel):
|
||||
"""
|
||||
Unified context that carries execution-level data throughout the entire execution flow.
|
||||
This includes information needed by blocks, sub-graphs, and execution management.
|
||||
"""
|
||||
|
||||
safe_mode: bool = True
|
||||
user_timezone: str = "UTC"
|
||||
root_execution_id: Optional[str] = None
|
||||
parent_execution_id: Optional[str] = None
|
||||
|
||||
|
||||
# -------------------------- Models -------------------------- #
|
||||
|
||||
|
||||
@@ -96,11 +113,14 @@ NodesInputMasks = Mapping[str, NodeInputMask]
|
||||
VALID_STATUS_TRANSITIONS = {
|
||||
ExecutionStatus.QUEUED: [
|
||||
ExecutionStatus.INCOMPLETE,
|
||||
ExecutionStatus.TERMINATED, # For resuming halted execution
|
||||
ExecutionStatus.REVIEW, # For resuming after review
|
||||
],
|
||||
ExecutionStatus.RUNNING: [
|
||||
ExecutionStatus.INCOMPLETE,
|
||||
ExecutionStatus.QUEUED,
|
||||
ExecutionStatus.TERMINATED, # For resuming halted execution
|
||||
ExecutionStatus.REVIEW, # For resuming after review
|
||||
],
|
||||
ExecutionStatus.COMPLETED: [
|
||||
ExecutionStatus.RUNNING,
|
||||
@@ -109,11 +129,16 @@ VALID_STATUS_TRANSITIONS = {
|
||||
ExecutionStatus.INCOMPLETE,
|
||||
ExecutionStatus.QUEUED,
|
||||
ExecutionStatus.RUNNING,
|
||||
ExecutionStatus.REVIEW,
|
||||
],
|
||||
ExecutionStatus.TERMINATED: [
|
||||
ExecutionStatus.INCOMPLETE,
|
||||
ExecutionStatus.QUEUED,
|
||||
ExecutionStatus.RUNNING,
|
||||
ExecutionStatus.REVIEW,
|
||||
],
|
||||
ExecutionStatus.REVIEW: [
|
||||
ExecutionStatus.RUNNING,
|
||||
],
|
||||
}
|
||||
|
||||
@@ -356,9 +381,8 @@ class GraphExecutionWithNodes(GraphExecution):
|
||||
|
||||
def to_graph_execution_entry(
|
||||
self,
|
||||
user_context: "UserContext",
|
||||
execution_context: ExecutionContext,
|
||||
compiled_nodes_input_masks: Optional[NodesInputMasks] = None,
|
||||
parent_graph_exec_id: Optional[str] = None,
|
||||
):
|
||||
return GraphExecutionEntry(
|
||||
user_id=self.user_id,
|
||||
@@ -366,8 +390,7 @@ class GraphExecutionWithNodes(GraphExecution):
|
||||
graph_version=self.graph_version or 0,
|
||||
graph_exec_id=self.id,
|
||||
nodes_input_masks=compiled_nodes_input_masks,
|
||||
user_context=user_context,
|
||||
parent_graph_exec_id=parent_graph_exec_id,
|
||||
execution_context=execution_context,
|
||||
)
|
||||
|
||||
|
||||
@@ -440,17 +463,18 @@ class NodeExecutionResult(BaseModel):
|
||||
)
|
||||
|
||||
def to_node_execution_entry(
|
||||
self, user_context: "UserContext"
|
||||
self, execution_context: ExecutionContext
|
||||
) -> "NodeExecutionEntry":
|
||||
return NodeExecutionEntry(
|
||||
user_id=self.user_id,
|
||||
graph_exec_id=self.graph_exec_id,
|
||||
graph_id=self.graph_id,
|
||||
graph_version=self.graph_version,
|
||||
node_exec_id=self.node_exec_id,
|
||||
node_id=self.node_id,
|
||||
block_id=self.block_id,
|
||||
inputs=self.input_data,
|
||||
user_context=user_context,
|
||||
execution_context=execution_context,
|
||||
)
|
||||
|
||||
|
||||
@@ -460,6 +484,7 @@ class NodeExecutionResult(BaseModel):
|
||||
async def get_graph_executions(
|
||||
graph_exec_id: Optional[str] = None,
|
||||
graph_id: Optional[str] = None,
|
||||
graph_version: Optional[int] = None,
|
||||
user_id: Optional[str] = None,
|
||||
statuses: Optional[list[ExecutionStatus]] = None,
|
||||
created_time_gte: Optional[datetime] = None,
|
||||
@@ -476,6 +501,8 @@ async def get_graph_executions(
|
||||
where_filter["userId"] = user_id
|
||||
if graph_id:
|
||||
where_filter["agentGraphId"] = graph_id
|
||||
if graph_version is not None:
|
||||
where_filter["agentGraphVersion"] = graph_version
|
||||
if created_time_gte or created_time_lte:
|
||||
where_filter["createdAt"] = {
|
||||
"gte": created_time_gte or datetime.min.replace(tzinfo=timezone.utc),
|
||||
@@ -725,7 +752,7 @@ async def upsert_execution_input(
|
||||
input_name: str,
|
||||
input_data: JsonValue,
|
||||
node_exec_id: str | None = None,
|
||||
) -> tuple[str, BlockInput]:
|
||||
) -> tuple[NodeExecutionResult, BlockInput]:
|
||||
"""
|
||||
Insert AgentNodeExecutionInputOutput record for as one of AgentNodeExecution.Input.
|
||||
If there is no AgentNodeExecution that has no `input_name` as input, create new one.
|
||||
@@ -758,7 +785,7 @@ async def upsert_execution_input(
|
||||
existing_execution = await AgentNodeExecution.prisma().find_first(
|
||||
where=existing_exec_query_filter,
|
||||
order={"addedTime": "asc"},
|
||||
include={"Input": True},
|
||||
include={"Input": True, "GraphExecution": True},
|
||||
)
|
||||
json_input_data = SafeJson(input_data)
|
||||
|
||||
@@ -770,7 +797,7 @@ async def upsert_execution_input(
|
||||
referencedByInputExecId=existing_execution.id,
|
||||
)
|
||||
)
|
||||
return existing_execution.id, {
|
||||
return NodeExecutionResult.from_db(existing_execution), {
|
||||
**{
|
||||
input_data.name: type_utils.convert(input_data.data, JsonValue)
|
||||
for input_data in existing_execution.Input or []
|
||||
@@ -785,9 +812,10 @@ async def upsert_execution_input(
|
||||
agentGraphExecutionId=graph_exec_id,
|
||||
executionStatus=ExecutionStatus.INCOMPLETE,
|
||||
Input={"create": {"name": input_name, "data": json_input_data}},
|
||||
)
|
||||
),
|
||||
include={"GraphExecution": True},
|
||||
)
|
||||
return result.id, {input_name: input_data}
|
||||
return NodeExecutionResult.from_db(result), {input_name: input_data}
|
||||
|
||||
else:
|
||||
raise ValueError(
|
||||
@@ -812,6 +840,30 @@ async def upsert_execution_output(
|
||||
await AgentNodeExecutionInputOutput.prisma().create(data=data)
|
||||
|
||||
|
||||
async def get_execution_outputs_by_node_exec_id(
|
||||
node_exec_id: str,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Get all execution outputs for a specific node execution ID.
|
||||
|
||||
Args:
|
||||
node_exec_id: The node execution ID to get outputs for
|
||||
|
||||
Returns:
|
||||
Dictionary mapping output names to their data values
|
||||
"""
|
||||
outputs = await AgentNodeExecutionInputOutput.prisma().find_many(
|
||||
where={"referencedByOutputExecId": node_exec_id}
|
||||
)
|
||||
|
||||
result = {}
|
||||
for output in outputs:
|
||||
if output.data is not None:
|
||||
result[output.name] = type_utils.convert(output.data, JsonValue)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
async def update_graph_execution_start_time(
|
||||
graph_exec_id: str,
|
||||
) -> GraphExecution | None:
|
||||
@@ -883,9 +935,25 @@ async def update_node_execution_status_batch(
|
||||
node_exec_ids: list[str],
|
||||
status: ExecutionStatus,
|
||||
stats: dict[str, Any] | None = None,
|
||||
):
|
||||
await AgentNodeExecution.prisma().update_many(
|
||||
where={"id": {"in": node_exec_ids}},
|
||||
) -> int:
|
||||
# Validate status transitions - allowed_from should never be empty for valid statuses
|
||||
allowed_from = VALID_STATUS_TRANSITIONS.get(status, [])
|
||||
if not allowed_from:
|
||||
raise ValueError(
|
||||
f"Invalid status transition: {status} has no valid source statuses"
|
||||
)
|
||||
|
||||
# For batch updates, we filter to only update nodes with valid current statuses
|
||||
where_clause = cast(
|
||||
AgentNodeExecutionWhereInput,
|
||||
{
|
||||
"id": {"in": node_exec_ids},
|
||||
"executionStatus": {"in": [s.value for s in allowed_from]},
|
||||
},
|
||||
)
|
||||
|
||||
return await AgentNodeExecution.prisma().update_many(
|
||||
where=where_clause,
|
||||
data=_get_update_status_data(status, None, stats),
|
||||
)
|
||||
|
||||
@@ -899,15 +967,32 @@ async def update_node_execution_status(
|
||||
if status == ExecutionStatus.QUEUED and execution_data is None:
|
||||
raise ValueError("Execution data must be provided when queuing an execution.")
|
||||
|
||||
res = await AgentNodeExecution.prisma().update(
|
||||
where={"id": node_exec_id},
|
||||
# Validate status transitions - allowed_from should never be empty for valid statuses
|
||||
allowed_from = VALID_STATUS_TRANSITIONS.get(status, [])
|
||||
if not allowed_from:
|
||||
raise ValueError(
|
||||
f"Invalid status transition: {status} has no valid source statuses"
|
||||
)
|
||||
|
||||
if res := await AgentNodeExecution.prisma().update(
|
||||
where=cast(
|
||||
AgentNodeExecutionWhereUniqueInput,
|
||||
{
|
||||
"id": node_exec_id,
|
||||
"executionStatus": {"in": [s.value for s in allowed_from]},
|
||||
},
|
||||
),
|
||||
data=_get_update_status_data(status, execution_data, stats),
|
||||
include=EXECUTION_RESULT_INCLUDE,
|
||||
)
|
||||
if not res:
|
||||
raise ValueError(f"Execution {node_exec_id} not found.")
|
||||
):
|
||||
return NodeExecutionResult.from_db(res)
|
||||
|
||||
return NodeExecutionResult.from_db(res)
|
||||
if res := await AgentNodeExecution.prisma().find_unique(
|
||||
where={"id": node_exec_id}, include=EXECUTION_RESULT_INCLUDE
|
||||
):
|
||||
return NodeExecutionResult.from_db(res)
|
||||
|
||||
raise ValueError(f"Execution {node_exec_id} not found.")
|
||||
|
||||
|
||||
def _get_update_status_data(
|
||||
@@ -961,17 +1046,17 @@ async def get_node_execution(node_exec_id: str) -> NodeExecutionResult | None:
|
||||
return NodeExecutionResult.from_db(execution)
|
||||
|
||||
|
||||
async def get_node_executions(
|
||||
def _build_node_execution_where_clause(
|
||||
graph_exec_id: str | None = None,
|
||||
node_id: str | None = None,
|
||||
block_ids: list[str] | None = None,
|
||||
statuses: list[ExecutionStatus] | None = None,
|
||||
limit: int | None = None,
|
||||
created_time_gte: datetime | None = None,
|
||||
created_time_lte: datetime | None = None,
|
||||
include_exec_data: bool = True,
|
||||
) -> list[NodeExecutionResult]:
|
||||
"""⚠️ No `user_id` check: DO NOT USE without check in user-facing endpoints."""
|
||||
) -> AgentNodeExecutionWhereInput:
|
||||
"""
|
||||
Build where clause for node execution queries.
|
||||
"""
|
||||
where_clause: AgentNodeExecutionWhereInput = {}
|
||||
if graph_exec_id:
|
||||
where_clause["agentGraphExecutionId"] = graph_exec_id
|
||||
@@ -988,6 +1073,29 @@ async def get_node_executions(
|
||||
"lte": created_time_lte or datetime.max.replace(tzinfo=timezone.utc),
|
||||
}
|
||||
|
||||
return where_clause
|
||||
|
||||
|
||||
async def get_node_executions(
|
||||
graph_exec_id: str | None = None,
|
||||
node_id: str | None = None,
|
||||
block_ids: list[str] | None = None,
|
||||
statuses: list[ExecutionStatus] | None = None,
|
||||
limit: int | None = None,
|
||||
created_time_gte: datetime | None = None,
|
||||
created_time_lte: datetime | None = None,
|
||||
include_exec_data: bool = True,
|
||||
) -> list[NodeExecutionResult]:
|
||||
"""⚠️ No `user_id` check: DO NOT USE without check in user-facing endpoints."""
|
||||
where_clause = _build_node_execution_where_clause(
|
||||
graph_exec_id=graph_exec_id,
|
||||
node_id=node_id,
|
||||
block_ids=block_ids,
|
||||
statuses=statuses,
|
||||
created_time_gte=created_time_gte,
|
||||
created_time_lte=created_time_lte,
|
||||
)
|
||||
|
||||
executions = await AgentNodeExecution.prisma().find_many(
|
||||
where=where_clause,
|
||||
include=(
|
||||
@@ -1029,31 +1137,29 @@ async def get_latest_node_execution(
|
||||
# ----------------- Execution Infrastructure ----------------- #
|
||||
|
||||
|
||||
class UserContext(BaseModel):
|
||||
"""Generic user context for graph execution containing user-specific settings."""
|
||||
|
||||
timezone: str
|
||||
|
||||
|
||||
class GraphExecutionEntry(BaseModel):
|
||||
model_config = {"extra": "ignore"}
|
||||
|
||||
user_id: str
|
||||
graph_exec_id: str
|
||||
graph_id: str
|
||||
graph_version: int
|
||||
nodes_input_masks: Optional[NodesInputMasks] = None
|
||||
user_context: UserContext
|
||||
parent_graph_exec_id: Optional[str] = None
|
||||
execution_context: ExecutionContext = Field(default_factory=ExecutionContext)
|
||||
|
||||
|
||||
class NodeExecutionEntry(BaseModel):
|
||||
model_config = {"extra": "ignore"}
|
||||
|
||||
user_id: str
|
||||
graph_exec_id: str
|
||||
graph_id: str
|
||||
graph_version: int
|
||||
node_exec_id: str
|
||||
node_id: str
|
||||
block_id: str
|
||||
inputs: BlockInput
|
||||
user_context: UserContext
|
||||
execution_context: ExecutionContext = Field(default_factory=ExecutionContext)
|
||||
|
||||
|
||||
class ExecutionQueue(Generic[T]):
|
||||
@@ -1387,3 +1493,35 @@ async def get_graph_execution_by_share_token(
|
||||
created_at=execution.createdAt,
|
||||
outputs=outputs,
|
||||
)
|
||||
|
||||
|
||||
async def get_frequently_executed_graphs(
|
||||
days_back: int = 30,
|
||||
min_executions: int = 10,
|
||||
) -> list[dict]:
|
||||
"""Get graphs that have been frequently executed for monitoring."""
|
||||
query_template = """
|
||||
SELECT DISTINCT
|
||||
e."agentGraphId" as graph_id,
|
||||
e."userId" as user_id,
|
||||
COUNT(*) as execution_count
|
||||
FROM {schema_prefix}"AgentGraphExecution" e
|
||||
WHERE e."createdAt" >= $1::timestamp
|
||||
AND e."isDeleted" = false
|
||||
AND e."executionStatus" IN ('COMPLETED', 'FAILED', 'TERMINATED')
|
||||
GROUP BY e."agentGraphId", e."userId"
|
||||
HAVING COUNT(*) >= $2
|
||||
ORDER BY execution_count DESC
|
||||
"""
|
||||
|
||||
start_date = datetime.now(timezone.utc) - timedelta(days=days_back)
|
||||
result = await query_raw_with_schema(query_template, start_date, min_executions)
|
||||
|
||||
return [
|
||||
{
|
||||
"graph_id": row["graph_id"],
|
||||
"user_id": row["user_id"],
|
||||
"execution_count": int(row["execution_count"]),
|
||||
}
|
||||
for row in result
|
||||
]
|
||||
|
||||
@@ -18,6 +18,7 @@ from prisma.types import (
|
||||
AgentGraphWhereInput,
|
||||
AgentNodeCreateInput,
|
||||
AgentNodeLinkCreateInput,
|
||||
StoreListingVersionWhereInput,
|
||||
)
|
||||
from pydantic import BaseModel, Field, create_model
|
||||
from pydantic.fields import computed_field
|
||||
@@ -26,7 +27,7 @@ from backend.blocks.agent import AgentExecutorBlock
|
||||
from backend.blocks.io import AgentInputBlock, AgentOutputBlock
|
||||
from backend.blocks.llm import LlmModel
|
||||
from backend.data.db import prisma as db
|
||||
from backend.data.dynamic_fields import extract_base_field_name
|
||||
from backend.data.dynamic_fields import is_tool_pin, sanitize_pin_name
|
||||
from backend.data.includes import MAX_GRAPH_VERSIONS_FETCH
|
||||
from backend.data.model import (
|
||||
CredentialsField,
|
||||
@@ -60,6 +61,10 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GraphSettings(BaseModel):
|
||||
human_in_the_loop_safe_mode: bool | None = None
|
||||
|
||||
|
||||
class Link(BaseDbModel):
|
||||
source_id: str
|
||||
sink_id: str
|
||||
@@ -224,6 +229,15 @@ class BaseGraph(BaseDbModel):
|
||||
def has_external_trigger(self) -> bool:
|
||||
return self.webhook_input_node is not None
|
||||
|
||||
@computed_field
|
||||
@property
|
||||
def has_human_in_the_loop(self) -> bool:
|
||||
return any(
|
||||
node.block_id
|
||||
for node in self.nodes
|
||||
if node.block.block_type == BlockType.HUMAN_IN_THE_LOOP
|
||||
)
|
||||
|
||||
@property
|
||||
def webhook_input_node(self) -> Node | None:
|
||||
return next(
|
||||
@@ -578,9 +592,9 @@ class GraphModel(Graph):
|
||||
nodes_input_masks.get(node.id, {}) if nodes_input_masks else {}
|
||||
)
|
||||
provided_inputs = set(
|
||||
[_sanitize_pin_name(name) for name in node.input_default]
|
||||
[sanitize_pin_name(name) for name in node.input_default]
|
||||
+ [
|
||||
_sanitize_pin_name(link.sink_name)
|
||||
sanitize_pin_name(link.sink_name)
|
||||
for link in input_links.get(node.id, [])
|
||||
]
|
||||
+ ([name for name in node_input_mask] if node_input_mask else [])
|
||||
@@ -696,7 +710,7 @@ class GraphModel(Graph):
|
||||
f"{prefix}, {node.block_id} is invalid block id, available blocks: {blocks}"
|
||||
)
|
||||
|
||||
sanitized_name = _sanitize_pin_name(name)
|
||||
sanitized_name = sanitize_pin_name(name)
|
||||
vals = node.input_default
|
||||
if i == 0:
|
||||
fields = (
|
||||
@@ -710,7 +724,7 @@ class GraphModel(Graph):
|
||||
if block.block_type not in [BlockType.AGENT]
|
||||
else vals.get("input_schema", {}).get("properties", {}).keys()
|
||||
)
|
||||
if sanitized_name not in fields and not _is_tool_pin(name):
|
||||
if sanitized_name not in fields and not is_tool_pin(name):
|
||||
fields_msg = f"Allowed fields: {fields}"
|
||||
raise ValueError(f"{prefix}, `{name}` invalid, {fields_msg}")
|
||||
|
||||
@@ -750,17 +764,6 @@ class GraphModel(Graph):
|
||||
)
|
||||
|
||||
|
||||
def _is_tool_pin(name: str) -> bool:
|
||||
return name.startswith("tools_^_")
|
||||
|
||||
|
||||
def _sanitize_pin_name(name: str) -> str:
|
||||
sanitized_name = extract_base_field_name(name)
|
||||
if _is_tool_pin(sanitized_name):
|
||||
return "tools"
|
||||
return sanitized_name
|
||||
|
||||
|
||||
class GraphMeta(Graph):
|
||||
user_id: str
|
||||
|
||||
@@ -895,9 +898,9 @@ async def get_graph_metadata(graph_id: str, version: int | None = None) -> Graph
|
||||
|
||||
async def get_graph(
|
||||
graph_id: str,
|
||||
version: int | None = None,
|
||||
version: int | None,
|
||||
user_id: str | None,
|
||||
*,
|
||||
user_id: str | None = None,
|
||||
for_export: bool = False,
|
||||
include_subgraphs: bool = False,
|
||||
skip_access_check: bool = False,
|
||||
@@ -908,26 +911,44 @@ async def get_graph(
|
||||
|
||||
Returns `None` if the record is not found.
|
||||
"""
|
||||
where_clause: AgentGraphWhereInput = {
|
||||
"id": graph_id,
|
||||
}
|
||||
graph = None
|
||||
|
||||
if version is not None:
|
||||
where_clause["version"] = version
|
||||
# Only search graph directly on owned graph (or access check is skipped)
|
||||
if skip_access_check or user_id is not None:
|
||||
graph_where_clause: AgentGraphWhereInput = {
|
||||
"id": graph_id,
|
||||
}
|
||||
if version is not None:
|
||||
graph_where_clause["version"] = version
|
||||
if not skip_access_check and user_id is not None:
|
||||
graph_where_clause["userId"] = user_id
|
||||
|
||||
graph = await AgentGraph.prisma().find_first(
|
||||
where=graph_where_clause,
|
||||
include=AGENT_GRAPH_INCLUDE,
|
||||
order={"version": "desc"},
|
||||
)
|
||||
|
||||
# Use store listed graph to find not owned graph
|
||||
if graph is None:
|
||||
store_where_clause: StoreListingVersionWhereInput = {
|
||||
"agentGraphId": graph_id,
|
||||
"submissionStatus": SubmissionStatus.APPROVED,
|
||||
"isDeleted": False,
|
||||
}
|
||||
if version is not None:
|
||||
store_where_clause["agentGraphVersion"] = version
|
||||
|
||||
if store_listing := await StoreListingVersion.prisma().find_first(
|
||||
where=store_where_clause,
|
||||
order={"agentGraphVersion": "desc"},
|
||||
include={"AgentGraph": {"include": AGENT_GRAPH_INCLUDE}},
|
||||
):
|
||||
graph = store_listing.AgentGraph
|
||||
|
||||
graph = await AgentGraph.prisma().find_first(
|
||||
where=where_clause,
|
||||
include=AGENT_GRAPH_INCLUDE,
|
||||
order={"version": "desc"},
|
||||
)
|
||||
if graph is None:
|
||||
return None
|
||||
|
||||
if not skip_access_check and graph.userId != user_id:
|
||||
# For access, the graph must be owned by the user or listed in the store
|
||||
if not await is_graph_published_in_marketplace(graph_id, graph.version):
|
||||
return None
|
||||
|
||||
if include_subgraphs or for_export:
|
||||
sub_graphs = await get_sub_graphs(graph)
|
||||
return GraphModel.from_db(
|
||||
@@ -1097,6 +1118,28 @@ async def delete_graph(graph_id: str, user_id: str) -> int:
|
||||
return entries_count
|
||||
|
||||
|
||||
async def get_graph_settings(user_id: str, graph_id: str) -> GraphSettings:
|
||||
lib = await LibraryAgent.prisma().find_first(
|
||||
where={
|
||||
"userId": user_id,
|
||||
"agentGraphId": graph_id,
|
||||
"isDeleted": False,
|
||||
"isArchived": False,
|
||||
},
|
||||
order={"agentGraphVersion": "desc"},
|
||||
)
|
||||
if not lib or not lib.settings:
|
||||
return GraphSettings()
|
||||
|
||||
try:
|
||||
return GraphSettings.model_validate(lib.settings)
|
||||
except Exception:
|
||||
logger.warning(
|
||||
f"Malformed settings for LibraryAgent user={user_id} graph={graph_id}"
|
||||
)
|
||||
return GraphSettings()
|
||||
|
||||
|
||||
async def validate_graph_execution_permissions(
|
||||
user_id: str, graph_id: str, graph_version: int, is_sub_graph: bool = False
|
||||
) -> None:
|
||||
|
||||
258
autogpt_platform/backend/backend/data/human_review.py
Normal file
258
autogpt_platform/backend/backend/data/human_review.py
Normal file
@@ -0,0 +1,258 @@
|
||||
"""
|
||||
Data layer for Human In The Loop (HITL) review operations.
|
||||
Handles all database operations for pending human reviews.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from typing import Optional
|
||||
|
||||
from prisma.enums import ReviewStatus
|
||||
from prisma.models import PendingHumanReview
|
||||
from prisma.types import PendingHumanReviewUpdateInput
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.server.v2.executions.review.model import (
|
||||
PendingHumanReviewModel,
|
||||
SafeJsonData,
|
||||
)
|
||||
from backend.util.json import SafeJson
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ReviewResult(BaseModel):
|
||||
"""Result of a review operation."""
|
||||
|
||||
data: Optional[SafeJsonData] = None
|
||||
status: ReviewStatus
|
||||
message: str = ""
|
||||
processed: bool
|
||||
node_exec_id: str
|
||||
|
||||
|
||||
async def get_or_create_human_review(
|
||||
user_id: str,
|
||||
node_exec_id: str,
|
||||
graph_exec_id: str,
|
||||
graph_id: str,
|
||||
graph_version: int,
|
||||
input_data: SafeJsonData,
|
||||
message: str,
|
||||
editable: bool,
|
||||
) -> Optional[ReviewResult]:
|
||||
"""
|
||||
Get existing review or create a new pending review entry.
|
||||
|
||||
Uses upsert with empty update to get existing or create new review in a single operation.
|
||||
|
||||
Args:
|
||||
user_id: ID of the user who owns this review
|
||||
node_exec_id: ID of the node execution
|
||||
graph_exec_id: ID of the graph execution
|
||||
graph_id: ID of the graph template
|
||||
graph_version: Version of the graph template
|
||||
input_data: The data to be reviewed
|
||||
message: Instructions for the reviewer
|
||||
editable: Whether the data can be edited
|
||||
|
||||
Returns:
|
||||
ReviewResult if the review is complete, None if waiting for human input
|
||||
"""
|
||||
try:
|
||||
logger.debug(f"Getting or creating review for node {node_exec_id}")
|
||||
|
||||
# Upsert - get existing or create new review
|
||||
review = await PendingHumanReview.prisma().upsert(
|
||||
where={"nodeExecId": node_exec_id},
|
||||
data={
|
||||
"create": {
|
||||
"userId": user_id,
|
||||
"nodeExecId": node_exec_id,
|
||||
"graphExecId": graph_exec_id,
|
||||
"graphId": graph_id,
|
||||
"graphVersion": graph_version,
|
||||
"payload": SafeJson(input_data),
|
||||
"instructions": message,
|
||||
"editable": editable,
|
||||
"status": ReviewStatus.WAITING,
|
||||
},
|
||||
"update": {}, # Do nothing on update - keep existing review as is
|
||||
},
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Review {'created' if review.createdAt == review.updatedAt else 'retrieved'} for node {node_exec_id} with status {review.status}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Database error in get_or_create_human_review for node {node_exec_id}: {str(e)}"
|
||||
)
|
||||
raise
|
||||
|
||||
# Early return if already processed
|
||||
if review.processed:
|
||||
return None
|
||||
|
||||
# If pending, return None to continue waiting, otherwise return the review result
|
||||
if review.status == ReviewStatus.WAITING:
|
||||
return None
|
||||
else:
|
||||
return ReviewResult(
|
||||
data=review.payload,
|
||||
status=review.status,
|
||||
message=review.reviewMessage or "",
|
||||
processed=review.processed,
|
||||
node_exec_id=review.nodeExecId,
|
||||
)
|
||||
|
||||
|
||||
async def has_pending_reviews_for_graph_exec(graph_exec_id: str) -> bool:
|
||||
"""
|
||||
Check if a graph execution has any pending reviews.
|
||||
|
||||
Args:
|
||||
graph_exec_id: The graph execution ID to check
|
||||
|
||||
Returns:
|
||||
True if there are reviews waiting for human input, False otherwise
|
||||
"""
|
||||
# Check if there are any reviews waiting for human input
|
||||
count = await PendingHumanReview.prisma().count(
|
||||
where={"graphExecId": graph_exec_id, "status": ReviewStatus.WAITING}
|
||||
)
|
||||
return count > 0
|
||||
|
||||
|
||||
async def get_pending_reviews_for_user(
|
||||
user_id: str, page: int = 1, page_size: int = 25
|
||||
) -> list["PendingHumanReviewModel"]:
|
||||
"""
|
||||
Get all pending reviews for a user with pagination.
|
||||
|
||||
Args:
|
||||
user_id: User ID to get reviews for
|
||||
page: Page number (1-indexed)
|
||||
page_size: Number of reviews per page
|
||||
|
||||
Returns:
|
||||
List of pending review models
|
||||
"""
|
||||
# Calculate offset for pagination
|
||||
offset = (page - 1) * page_size
|
||||
|
||||
reviews = await PendingHumanReview.prisma().find_many(
|
||||
where={"userId": user_id, "status": ReviewStatus.WAITING},
|
||||
order={"createdAt": "desc"},
|
||||
skip=offset,
|
||||
take=page_size,
|
||||
)
|
||||
|
||||
return [PendingHumanReviewModel.from_db(review) for review in reviews]
|
||||
|
||||
|
||||
async def get_pending_reviews_for_execution(
|
||||
graph_exec_id: str, user_id: str
|
||||
) -> list["PendingHumanReviewModel"]:
|
||||
"""
|
||||
Get all pending reviews for a specific graph execution.
|
||||
|
||||
Args:
|
||||
graph_exec_id: Graph execution ID
|
||||
user_id: User ID for security validation
|
||||
|
||||
Returns:
|
||||
List of pending review models
|
||||
"""
|
||||
reviews = await PendingHumanReview.prisma().find_many(
|
||||
where={
|
||||
"userId": user_id,
|
||||
"graphExecId": graph_exec_id,
|
||||
"status": ReviewStatus.WAITING,
|
||||
},
|
||||
order={"createdAt": "asc"},
|
||||
)
|
||||
|
||||
return [PendingHumanReviewModel.from_db(review) for review in reviews]
|
||||
|
||||
|
||||
async def process_all_reviews_for_execution(
|
||||
user_id: str,
|
||||
review_decisions: dict[str, tuple[ReviewStatus, SafeJsonData | None, str | None]],
|
||||
) -> dict[str, PendingHumanReviewModel]:
|
||||
"""Process all pending reviews for an execution with approve/reject decisions.
|
||||
|
||||
Args:
|
||||
user_id: User ID for ownership validation
|
||||
review_decisions: Map of node_exec_id -> (status, reviewed_data, message)
|
||||
|
||||
Returns:
|
||||
Dict of node_exec_id -> updated review model
|
||||
"""
|
||||
if not review_decisions:
|
||||
return {}
|
||||
|
||||
node_exec_ids = list(review_decisions.keys())
|
||||
|
||||
# Get all reviews for validation
|
||||
reviews = await PendingHumanReview.prisma().find_many(
|
||||
where={
|
||||
"nodeExecId": {"in": node_exec_ids},
|
||||
"userId": user_id,
|
||||
"status": ReviewStatus.WAITING,
|
||||
},
|
||||
)
|
||||
|
||||
# Validate all reviews can be processed
|
||||
if len(reviews) != len(node_exec_ids):
|
||||
missing_ids = set(node_exec_ids) - {review.nodeExecId for review in reviews}
|
||||
raise ValueError(
|
||||
f"Reviews not found, access denied, or not in WAITING status: {', '.join(missing_ids)}"
|
||||
)
|
||||
|
||||
# Create parallel update tasks
|
||||
update_tasks = []
|
||||
|
||||
for review in reviews:
|
||||
new_status, reviewed_data, message = review_decisions[review.nodeExecId]
|
||||
has_data_changes = reviewed_data is not None and reviewed_data != review.payload
|
||||
|
||||
# Check edit permissions for actual data modifications
|
||||
if has_data_changes and not review.editable:
|
||||
raise ValueError(f"Review {review.nodeExecId} is not editable")
|
||||
|
||||
update_data: PendingHumanReviewUpdateInput = {
|
||||
"status": new_status,
|
||||
"reviewMessage": message,
|
||||
"wasEdited": has_data_changes,
|
||||
"reviewedAt": datetime.now(timezone.utc),
|
||||
}
|
||||
|
||||
if has_data_changes:
|
||||
update_data["payload"] = SafeJson(reviewed_data)
|
||||
|
||||
task = PendingHumanReview.prisma().update(
|
||||
where={"nodeExecId": review.nodeExecId},
|
||||
data=update_data,
|
||||
)
|
||||
update_tasks.append(task)
|
||||
|
||||
# Execute all updates in parallel and get updated reviews
|
||||
updated_reviews = await asyncio.gather(*update_tasks)
|
||||
|
||||
# Note: Execution resumption is now handled at the API layer after ALL reviews
|
||||
# for an execution are processed (both approved and rejected)
|
||||
|
||||
# Return as dict for easy access
|
||||
return {
|
||||
review.nodeExecId: PendingHumanReviewModel.from_db(review)
|
||||
for review in updated_reviews
|
||||
}
|
||||
|
||||
|
||||
async def update_review_processed_status(node_exec_id: str, processed: bool) -> None:
|
||||
"""Update the processed status of a review."""
|
||||
await PendingHumanReview.prisma().update(
|
||||
where={"nodeExecId": node_exec_id}, data={"processed": processed}
|
||||
)
|
||||
342
autogpt_platform/backend/backend/data/human_review_test.py
Normal file
342
autogpt_platform/backend/backend/data/human_review_test.py
Normal file
@@ -0,0 +1,342 @@
|
||||
import datetime
|
||||
from unittest.mock import AsyncMock, Mock
|
||||
|
||||
import pytest
|
||||
import pytest_mock
|
||||
from prisma.enums import ReviewStatus
|
||||
|
||||
from backend.data.human_review import (
|
||||
get_or_create_human_review,
|
||||
get_pending_reviews_for_execution,
|
||||
get_pending_reviews_for_user,
|
||||
has_pending_reviews_for_graph_exec,
|
||||
process_all_reviews_for_execution,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_db_review():
|
||||
"""Create a sample database review object"""
|
||||
mock_review = Mock()
|
||||
mock_review.nodeExecId = "test_node_123"
|
||||
mock_review.userId = "test-user-123"
|
||||
mock_review.graphExecId = "test_graph_exec_456"
|
||||
mock_review.graphId = "test_graph_789"
|
||||
mock_review.graphVersion = 1
|
||||
mock_review.payload = {"data": "test payload"}
|
||||
mock_review.instructions = "Please review"
|
||||
mock_review.editable = True
|
||||
mock_review.status = ReviewStatus.WAITING
|
||||
mock_review.reviewMessage = None
|
||||
mock_review.wasEdited = False
|
||||
mock_review.processed = False
|
||||
mock_review.createdAt = datetime.datetime.now(datetime.timezone.utc)
|
||||
mock_review.updatedAt = None
|
||||
mock_review.reviewedAt = None
|
||||
return mock_review
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_or_create_human_review_new(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
sample_db_review,
|
||||
):
|
||||
"""Test creating a new human review"""
|
||||
# Mock the upsert to return a new review (created_at == updated_at)
|
||||
sample_db_review.status = ReviewStatus.WAITING
|
||||
sample_db_review.processed = False
|
||||
|
||||
mock_upsert = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||
mock_upsert.return_value.upsert = AsyncMock(return_value=sample_db_review)
|
||||
|
||||
result = await get_or_create_human_review(
|
||||
user_id="test-user-123",
|
||||
node_exec_id="test_node_123",
|
||||
graph_exec_id="test_graph_exec_456",
|
||||
graph_id="test_graph_789",
|
||||
graph_version=1,
|
||||
input_data={"data": "test payload"},
|
||||
message="Please review",
|
||||
editable=True,
|
||||
)
|
||||
|
||||
# Should return None for pending reviews (waiting for human input)
|
||||
assert result is None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_or_create_human_review_approved(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
sample_db_review,
|
||||
):
|
||||
"""Test retrieving an already approved review"""
|
||||
# Set up review as already approved
|
||||
sample_db_review.status = ReviewStatus.APPROVED
|
||||
sample_db_review.processed = False
|
||||
sample_db_review.reviewMessage = "Looks good"
|
||||
|
||||
mock_upsert = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||
mock_upsert.return_value.upsert = AsyncMock(return_value=sample_db_review)
|
||||
|
||||
result = await get_or_create_human_review(
|
||||
user_id="test-user-123",
|
||||
node_exec_id="test_node_123",
|
||||
graph_exec_id="test_graph_exec_456",
|
||||
graph_id="test_graph_789",
|
||||
graph_version=1,
|
||||
input_data={"data": "test payload"},
|
||||
message="Please review",
|
||||
editable=True,
|
||||
)
|
||||
|
||||
# Should return the approved result
|
||||
assert result is not None
|
||||
assert result.status == ReviewStatus.APPROVED
|
||||
assert result.data == {"data": "test payload"}
|
||||
assert result.message == "Looks good"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_has_pending_reviews_for_graph_exec_true(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
):
|
||||
"""Test when there are pending reviews"""
|
||||
mock_count = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||
mock_count.return_value.count = AsyncMock(return_value=2)
|
||||
|
||||
result = await has_pending_reviews_for_graph_exec("test_graph_exec")
|
||||
|
||||
assert result is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_has_pending_reviews_for_graph_exec_false(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
):
|
||||
"""Test when there are no pending reviews"""
|
||||
mock_count = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||
mock_count.return_value.count = AsyncMock(return_value=0)
|
||||
|
||||
result = await has_pending_reviews_for_graph_exec("test_graph_exec")
|
||||
|
||||
assert result is False
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_pending_reviews_for_user(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
sample_db_review,
|
||||
):
|
||||
"""Test getting pending reviews for a user with pagination"""
|
||||
mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||
mock_find_many.return_value.find_many = AsyncMock(return_value=[sample_db_review])
|
||||
|
||||
result = await get_pending_reviews_for_user("test_user", page=2, page_size=10)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0].node_exec_id == "test_node_123"
|
||||
|
||||
# Verify pagination parameters
|
||||
call_args = mock_find_many.return_value.find_many.call_args
|
||||
assert call_args.kwargs["skip"] == 10 # (page-1) * page_size = (2-1) * 10
|
||||
assert call_args.kwargs["take"] == 10
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_pending_reviews_for_execution(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
sample_db_review,
|
||||
):
|
||||
"""Test getting pending reviews for specific execution"""
|
||||
mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||
mock_find_many.return_value.find_many = AsyncMock(return_value=[sample_db_review])
|
||||
|
||||
result = await get_pending_reviews_for_execution(
|
||||
"test_graph_exec_456", "test-user-123"
|
||||
)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0].graph_exec_id == "test_graph_exec_456"
|
||||
|
||||
# Verify it filters by execution and user
|
||||
call_args = mock_find_many.return_value.find_many.call_args
|
||||
where_clause = call_args.kwargs["where"]
|
||||
assert where_clause["userId"] == "test-user-123"
|
||||
assert where_clause["graphExecId"] == "test_graph_exec_456"
|
||||
assert where_clause["status"] == ReviewStatus.WAITING
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_process_all_reviews_for_execution_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
sample_db_review,
|
||||
):
|
||||
"""Test successful processing of reviews for an execution"""
|
||||
# Mock finding reviews
|
||||
mock_prisma = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||
mock_prisma.return_value.find_many = AsyncMock(return_value=[sample_db_review])
|
||||
|
||||
# Mock updating reviews
|
||||
updated_review = Mock()
|
||||
updated_review.nodeExecId = "test_node_123"
|
||||
updated_review.userId = "test-user-123"
|
||||
updated_review.graphExecId = "test_graph_exec_456"
|
||||
updated_review.graphId = "test_graph_789"
|
||||
updated_review.graphVersion = 1
|
||||
updated_review.payload = {"data": "modified"}
|
||||
updated_review.instructions = "Please review"
|
||||
updated_review.editable = True
|
||||
updated_review.status = ReviewStatus.APPROVED
|
||||
updated_review.reviewMessage = "Approved"
|
||||
updated_review.wasEdited = True
|
||||
updated_review.processed = False
|
||||
updated_review.createdAt = datetime.datetime.now(datetime.timezone.utc)
|
||||
updated_review.updatedAt = datetime.datetime.now(datetime.timezone.utc)
|
||||
updated_review.reviewedAt = datetime.datetime.now(datetime.timezone.utc)
|
||||
mock_prisma.return_value.update = AsyncMock(return_value=updated_review)
|
||||
|
||||
# Mock gather to simulate parallel updates
|
||||
mocker.patch(
|
||||
"backend.data.human_review.asyncio.gather",
|
||||
new=AsyncMock(return_value=[updated_review]),
|
||||
)
|
||||
|
||||
result = await process_all_reviews_for_execution(
|
||||
user_id="test-user-123",
|
||||
review_decisions={
|
||||
"test_node_123": (ReviewStatus.APPROVED, {"data": "modified"}, "Approved")
|
||||
},
|
||||
)
|
||||
|
||||
assert len(result) == 1
|
||||
assert "test_node_123" in result
|
||||
assert result["test_node_123"].status == ReviewStatus.APPROVED
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_process_all_reviews_for_execution_validation_errors(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
):
|
||||
"""Test validation errors in process_all_reviews_for_execution"""
|
||||
# Mock finding fewer reviews than requested (some not found)
|
||||
mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||
mock_find_many.return_value.find_many = AsyncMock(
|
||||
return_value=[]
|
||||
) # No reviews found
|
||||
|
||||
with pytest.raises(ValueError, match="Reviews not found"):
|
||||
await process_all_reviews_for_execution(
|
||||
user_id="test-user-123",
|
||||
review_decisions={
|
||||
"nonexistent_node": (ReviewStatus.APPROVED, {"data": "test"}, "message")
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_process_all_reviews_edit_permission_error(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
sample_db_review,
|
||||
):
|
||||
"""Test editing non-editable review"""
|
||||
# Set review as non-editable
|
||||
sample_db_review.editable = False
|
||||
|
||||
# Mock finding reviews
|
||||
mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||
mock_find_many.return_value.find_many = AsyncMock(return_value=[sample_db_review])
|
||||
|
||||
with pytest.raises(ValueError, match="not editable"):
|
||||
await process_all_reviews_for_execution(
|
||||
user_id="test-user-123",
|
||||
review_decisions={
|
||||
"test_node_123": (
|
||||
ReviewStatus.APPROVED,
|
||||
{"data": "modified"},
|
||||
"message",
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_process_all_reviews_mixed_approval_rejection(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
sample_db_review,
|
||||
):
|
||||
"""Test processing mixed approval and rejection decisions"""
|
||||
# Create second review for rejection
|
||||
second_review = Mock()
|
||||
second_review.nodeExecId = "test_node_456"
|
||||
second_review.userId = "test-user-123"
|
||||
second_review.graphExecId = "test_graph_exec_456"
|
||||
second_review.graphId = "test_graph_789"
|
||||
second_review.graphVersion = 1
|
||||
second_review.payload = {"data": "original"}
|
||||
second_review.instructions = "Second review"
|
||||
second_review.editable = True
|
||||
second_review.status = ReviewStatus.WAITING
|
||||
second_review.reviewMessage = None
|
||||
second_review.wasEdited = False
|
||||
second_review.processed = False
|
||||
second_review.createdAt = datetime.datetime.now(datetime.timezone.utc)
|
||||
second_review.updatedAt = None
|
||||
second_review.reviewedAt = None
|
||||
|
||||
# Mock finding reviews
|
||||
mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||
mock_find_many.return_value.find_many = AsyncMock(
|
||||
return_value=[sample_db_review, second_review]
|
||||
)
|
||||
|
||||
# Mock updating reviews
|
||||
approved_review = Mock()
|
||||
approved_review.nodeExecId = "test_node_123"
|
||||
approved_review.userId = "test-user-123"
|
||||
approved_review.graphExecId = "test_graph_exec_456"
|
||||
approved_review.graphId = "test_graph_789"
|
||||
approved_review.graphVersion = 1
|
||||
approved_review.payload = {"data": "modified"}
|
||||
approved_review.instructions = "Please review"
|
||||
approved_review.editable = True
|
||||
approved_review.status = ReviewStatus.APPROVED
|
||||
approved_review.reviewMessage = "Approved"
|
||||
approved_review.wasEdited = True
|
||||
approved_review.processed = False
|
||||
approved_review.createdAt = datetime.datetime.now(datetime.timezone.utc)
|
||||
approved_review.updatedAt = datetime.datetime.now(datetime.timezone.utc)
|
||||
approved_review.reviewedAt = datetime.datetime.now(datetime.timezone.utc)
|
||||
|
||||
rejected_review = Mock()
|
||||
rejected_review.nodeExecId = "test_node_456"
|
||||
rejected_review.userId = "test-user-123"
|
||||
rejected_review.graphExecId = "test_graph_exec_456"
|
||||
rejected_review.graphId = "test_graph_789"
|
||||
rejected_review.graphVersion = 1
|
||||
rejected_review.payload = {"data": "original"}
|
||||
rejected_review.instructions = "Please review"
|
||||
rejected_review.editable = True
|
||||
rejected_review.status = ReviewStatus.REJECTED
|
||||
rejected_review.reviewMessage = "Rejected"
|
||||
rejected_review.wasEdited = False
|
||||
rejected_review.processed = False
|
||||
rejected_review.createdAt = datetime.datetime.now(datetime.timezone.utc)
|
||||
rejected_review.updatedAt = datetime.datetime.now(datetime.timezone.utc)
|
||||
rejected_review.reviewedAt = datetime.datetime.now(datetime.timezone.utc)
|
||||
|
||||
mocker.patch(
|
||||
"backend.data.human_review.asyncio.gather",
|
||||
new=AsyncMock(return_value=[approved_review, rejected_review]),
|
||||
)
|
||||
|
||||
result = await process_all_reviews_for_execution(
|
||||
user_id="test-user-123",
|
||||
review_decisions={
|
||||
"test_node_123": (ReviewStatus.APPROVED, {"data": "modified"}, "Approved"),
|
||||
"test_node_456": (ReviewStatus.REJECTED, None, "Rejected"),
|
||||
},
|
||||
)
|
||||
|
||||
assert len(result) == 2
|
||||
assert "test_node_123" in result
|
||||
assert "test_node_456" in result
|
||||
@@ -1,5 +1,5 @@
|
||||
import logging
|
||||
from typing import AsyncGenerator, Literal, Optional, overload
|
||||
from typing import TYPE_CHECKING, AsyncGenerator, Literal, Optional, overload
|
||||
|
||||
from prisma.models import AgentNode, AgentPreset, IntegrationWebhook
|
||||
from prisma.types import (
|
||||
@@ -19,10 +19,12 @@ from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.integrations.webhooks import get_webhook_manager
|
||||
from backend.integrations.webhooks.utils import webhook_ingress_url
|
||||
from backend.server.v2.library.model import LibraryAgentPreset
|
||||
from backend.util.exceptions import NotFoundError
|
||||
from backend.util.json import SafeJson
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.server.v2.library.model import LibraryAgentPreset
|
||||
|
||||
from .db import BaseDbModel
|
||||
from .graph import NodeModel
|
||||
|
||||
@@ -64,7 +66,7 @@ class Webhook(BaseDbModel):
|
||||
|
||||
class WebhookWithRelations(Webhook):
|
||||
triggered_nodes: list[NodeModel]
|
||||
triggered_presets: list[LibraryAgentPreset]
|
||||
triggered_presets: list["LibraryAgentPreset"]
|
||||
|
||||
@staticmethod
|
||||
def from_db(webhook: IntegrationWebhook):
|
||||
@@ -73,6 +75,12 @@ class WebhookWithRelations(Webhook):
|
||||
"AgentNodes and AgentPresets must be included in "
|
||||
"IntegrationWebhook query with relations"
|
||||
)
|
||||
# LibraryAgentPreset import is moved to TYPE_CHECKING to avoid circular import:
|
||||
# integrations.py → library/model.py → integrations.py (for Webhook)
|
||||
# Runtime import is used in WebhookWithRelations.from_db() method instead
|
||||
# Import at runtime to avoid circular dependency
|
||||
from backend.server.v2.library.model import LibraryAgentPreset
|
||||
|
||||
return WebhookWithRelations(
|
||||
**Webhook.from_db(webhook).model_dump(),
|
||||
triggered_nodes=[NodeModel.from_db(node) for node in webhook.AgentNodes],
|
||||
|
||||
@@ -22,7 +22,7 @@ from typing import (
|
||||
from urllib.parse import urlparse
|
||||
from uuid import uuid4
|
||||
|
||||
from prisma.enums import CreditTransactionType
|
||||
from prisma.enums import CreditTransactionType, OnboardingStep
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
ConfigDict,
|
||||
@@ -46,6 +46,7 @@ from backend.util.settings import Secrets
|
||||
|
||||
# Type alias for any provider name (including custom ones)
|
||||
AnyProviderName = str # Will be validated as ProviderName at runtime
|
||||
USER_TIMEZONE_NOT_SET = "not-set"
|
||||
|
||||
|
||||
class User(BaseModel):
|
||||
@@ -98,7 +99,7 @@ class User(BaseModel):
|
||||
|
||||
# User timezone for scheduling and time display
|
||||
timezone: str = Field(
|
||||
default="not-set",
|
||||
default=USER_TIMEZONE_NOT_SET,
|
||||
description="User timezone (IANA timezone identifier or 'not-set')",
|
||||
)
|
||||
|
||||
@@ -155,7 +156,7 @@ class User(BaseModel):
|
||||
notify_on_daily_summary=prisma_user.notifyOnDailySummary or True,
|
||||
notify_on_weekly_summary=prisma_user.notifyOnWeeklySummary or True,
|
||||
notify_on_monthly_summary=prisma_user.notifyOnMonthlySummary or True,
|
||||
timezone=prisma_user.timezone or "not-set",
|
||||
timezone=prisma_user.timezone or USER_TIMEZONE_NOT_SET,
|
||||
)
|
||||
|
||||
|
||||
@@ -433,6 +434,18 @@ class OAuthState(BaseModel):
|
||||
code_verifier: Optional[str] = None
|
||||
"""Unix timestamp (seconds) indicating when this OAuth state expires"""
|
||||
scopes: list[str]
|
||||
# Fields for external API OAuth flows
|
||||
callback_url: Optional[str] = None
|
||||
"""External app's callback URL for OAuth redirect"""
|
||||
state_metadata: dict[str, Any] = Field(default_factory=dict)
|
||||
"""Metadata to echo back to external app on completion"""
|
||||
initiated_by_api_key_id: Optional[str] = None
|
||||
"""ID of the API key that initiated this OAuth flow"""
|
||||
|
||||
@property
|
||||
def is_external(self) -> bool:
|
||||
"""Whether this OAuth flow was initiated via external API."""
|
||||
return self.callback_url is not None
|
||||
|
||||
|
||||
class UserMetadata(BaseModel):
|
||||
@@ -855,3 +868,20 @@ class UserExecutionSummaryStats(BaseModel):
|
||||
total_execution_time: float = Field(default=0)
|
||||
average_execution_time: float = Field(default=0)
|
||||
cost_breakdown: dict[str, float] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class UserOnboarding(BaseModel):
|
||||
userId: str
|
||||
completedSteps: list[OnboardingStep]
|
||||
walletShown: bool
|
||||
notified: list[OnboardingStep]
|
||||
rewardedFor: list[OnboardingStep]
|
||||
usageReason: Optional[str]
|
||||
integrations: list[str]
|
||||
otherIntegrations: Optional[str]
|
||||
selectedStoreListingVersionId: Optional[str]
|
||||
agentInput: Optional[dict[str, Any]]
|
||||
onboardingAgentExecutionId: Optional[str]
|
||||
agentRuns: int
|
||||
lastRunAt: Optional[datetime]
|
||||
consecutiveRunDays: int
|
||||
|
||||
@@ -2,7 +2,7 @@ from __future__ import annotations
|
||||
|
||||
from typing import AsyncGenerator
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, field_serializer
|
||||
|
||||
from backend.data.event_bus import AsyncRedisEventBus
|
||||
from backend.server.model import NotificationPayload
|
||||
@@ -15,6 +15,11 @@ class NotificationEvent(BaseModel):
|
||||
user_id: str
|
||||
payload: NotificationPayload
|
||||
|
||||
@field_serializer("payload")
|
||||
def serialize_payload(self, payload: NotificationPayload):
|
||||
"""Ensure extra fields survive Redis serialization."""
|
||||
return payload.model_dump()
|
||||
|
||||
|
||||
class AsyncRedisNotificationEventBus(AsyncRedisEventBus[NotificationEvent]):
|
||||
Model = NotificationEvent # type: ignore
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import re
|
||||
from datetime import datetime
|
||||
from typing import Any, Optional
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Any, Literal, Optional
|
||||
from zoneinfo import ZoneInfo
|
||||
|
||||
import prisma
|
||||
import pydantic
|
||||
@@ -8,12 +9,18 @@ from prisma.enums import OnboardingStep
|
||||
from prisma.models import UserOnboarding
|
||||
from prisma.types import UserOnboardingCreateInput, UserOnboardingUpdateInput
|
||||
|
||||
from backend.data.block import get_blocks
|
||||
from backend.data import execution as execution_db
|
||||
from backend.data.credit import get_user_credit_model
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.data.notification_bus import (
|
||||
AsyncRedisNotificationEventBus,
|
||||
NotificationEvent,
|
||||
)
|
||||
from backend.data.user import get_user_by_id
|
||||
from backend.server.model import OnboardingNotificationPayload
|
||||
from backend.server.v2.store.model import StoreAgentDetails
|
||||
from backend.util.cache import cached
|
||||
from backend.util.json import SafeJson
|
||||
from backend.util.timezone_utils import get_user_timezone_or_utc
|
||||
|
||||
# Mapping from user reason id to categories to search for when choosing agent to show
|
||||
REASON_MAPPING: dict[str, list[str]] = {
|
||||
@@ -26,9 +33,20 @@ REASON_MAPPING: dict[str, list[str]] = {
|
||||
POINTS_AGENT_COUNT = 50 # Number of agents to calculate points for
|
||||
MIN_AGENT_COUNT = 2 # Minimum number of marketplace agents to enable onboarding
|
||||
|
||||
FrontendOnboardingStep = Literal[
|
||||
OnboardingStep.WELCOME,
|
||||
OnboardingStep.USAGE_REASON,
|
||||
OnboardingStep.INTEGRATIONS,
|
||||
OnboardingStep.AGENT_CHOICE,
|
||||
OnboardingStep.AGENT_NEW_RUN,
|
||||
OnboardingStep.AGENT_INPUT,
|
||||
OnboardingStep.CONGRATS,
|
||||
OnboardingStep.MARKETPLACE_VISIT,
|
||||
OnboardingStep.BUILDER_OPEN,
|
||||
]
|
||||
|
||||
|
||||
class UserOnboardingUpdate(pydantic.BaseModel):
|
||||
completedSteps: Optional[list[OnboardingStep]] = None
|
||||
walletShown: Optional[bool] = None
|
||||
notified: Optional[list[OnboardingStep]] = None
|
||||
usageReason: Optional[str] = None
|
||||
@@ -37,9 +55,6 @@ class UserOnboardingUpdate(pydantic.BaseModel):
|
||||
selectedStoreListingVersionId: Optional[str] = None
|
||||
agentInput: Optional[dict[str, Any]] = None
|
||||
onboardingAgentExecutionId: Optional[str] = None
|
||||
agentRuns: Optional[int] = None
|
||||
lastRunAt: Optional[datetime] = None
|
||||
consecutiveRunDays: Optional[int] = None
|
||||
|
||||
|
||||
async def get_user_onboarding(user_id: str):
|
||||
@@ -78,26 +93,6 @@ async def reset_user_onboarding(user_id: str):
|
||||
async def update_user_onboarding(user_id: str, data: UserOnboardingUpdate):
|
||||
update: UserOnboardingUpdateInput = {}
|
||||
onboarding = await get_user_onboarding(user_id)
|
||||
if data.completedSteps is not None:
|
||||
update["completedSteps"] = list(
|
||||
set(data.completedSteps + onboarding.completedSteps)
|
||||
)
|
||||
for step in (
|
||||
OnboardingStep.AGENT_NEW_RUN,
|
||||
OnboardingStep.MARKETPLACE_VISIT,
|
||||
OnboardingStep.MARKETPLACE_ADD_AGENT,
|
||||
OnboardingStep.MARKETPLACE_RUN_AGENT,
|
||||
OnboardingStep.BUILDER_SAVE_AGENT,
|
||||
OnboardingStep.RE_RUN_AGENT,
|
||||
OnboardingStep.SCHEDULE_AGENT,
|
||||
OnboardingStep.RUN_AGENTS,
|
||||
OnboardingStep.RUN_3_DAYS,
|
||||
OnboardingStep.TRIGGER_WEBHOOK,
|
||||
OnboardingStep.RUN_14_DAYS,
|
||||
OnboardingStep.RUN_AGENTS_100,
|
||||
):
|
||||
if step in data.completedSteps:
|
||||
await reward_user(user_id, step, onboarding)
|
||||
if data.walletShown:
|
||||
update["walletShown"] = data.walletShown
|
||||
if data.notified is not None:
|
||||
@@ -114,12 +109,6 @@ async def update_user_onboarding(user_id: str, data: UserOnboardingUpdate):
|
||||
update["agentInput"] = SafeJson(data.agentInput)
|
||||
if data.onboardingAgentExecutionId is not None:
|
||||
update["onboardingAgentExecutionId"] = data.onboardingAgentExecutionId
|
||||
if data.agentRuns is not None and data.agentRuns > onboarding.agentRuns:
|
||||
update["agentRuns"] = data.agentRuns
|
||||
if data.lastRunAt is not None:
|
||||
update["lastRunAt"] = data.lastRunAt
|
||||
if data.consecutiveRunDays is not None:
|
||||
update["consecutiveRunDays"] = data.consecutiveRunDays
|
||||
|
||||
return await UserOnboarding.prisma().upsert(
|
||||
where={"userId": user_id},
|
||||
@@ -130,7 +119,7 @@ async def update_user_onboarding(user_id: str, data: UserOnboardingUpdate):
|
||||
)
|
||||
|
||||
|
||||
async def reward_user(user_id: str, step: OnboardingStep, onboarding: UserOnboarding):
|
||||
async def _reward_user(user_id: str, onboarding: UserOnboarding, step: OnboardingStep):
|
||||
reward = 0
|
||||
match step:
|
||||
# Reward user when they clicked New Run during onboarding
|
||||
@@ -168,35 +157,66 @@ async def reward_user(user_id: str, step: OnboardingStep, onboarding: UserOnboar
|
||||
if step in onboarding.rewardedFor:
|
||||
return
|
||||
|
||||
onboarding.rewardedFor.append(step)
|
||||
user_credit_model = await get_user_credit_model(user_id)
|
||||
await user_credit_model.onboarding_reward(user_id, reward, step)
|
||||
await UserOnboarding.prisma().update(
|
||||
where={"userId": user_id},
|
||||
data={
|
||||
"completedSteps": list(set(onboarding.completedSteps + [step])),
|
||||
"rewardedFor": onboarding.rewardedFor,
|
||||
"rewardedFor": list(set(onboarding.rewardedFor + [step])),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
async def complete_webhook_trigger_step(user_id: str):
|
||||
async def complete_onboarding_step(user_id: str, step: OnboardingStep):
|
||||
"""
|
||||
Completes the TRIGGER_WEBHOOK onboarding step for the user if not already completed.
|
||||
Completes the specified onboarding step for the user if not already completed.
|
||||
"""
|
||||
|
||||
onboarding = await get_user_onboarding(user_id)
|
||||
if OnboardingStep.TRIGGER_WEBHOOK not in onboarding.completedSteps:
|
||||
await update_user_onboarding(
|
||||
user_id,
|
||||
UserOnboardingUpdate(
|
||||
completedSteps=onboarding.completedSteps
|
||||
+ [OnboardingStep.TRIGGER_WEBHOOK]
|
||||
),
|
||||
if step not in onboarding.completedSteps:
|
||||
await UserOnboarding.prisma().update(
|
||||
where={"userId": user_id},
|
||||
data={
|
||||
"completedSteps": list(set(onboarding.completedSteps + [step])),
|
||||
},
|
||||
)
|
||||
await _reward_user(user_id, onboarding, step)
|
||||
await _send_onboarding_notification(user_id, step)
|
||||
|
||||
|
||||
def clean_and_split(text: str) -> list[str]:
|
||||
async def _send_onboarding_notification(
|
||||
user_id: str, step: OnboardingStep | None, event: str = "step_completed"
|
||||
):
|
||||
"""
|
||||
Sends an onboarding notification to the user.
|
||||
"""
|
||||
payload = OnboardingNotificationPayload(
|
||||
type="onboarding",
|
||||
event=event,
|
||||
step=step,
|
||||
)
|
||||
await AsyncRedisNotificationEventBus().publish(
|
||||
NotificationEvent(user_id=user_id, payload=payload)
|
||||
)
|
||||
|
||||
|
||||
async def complete_re_run_agent(user_id: str, graph_id: str) -> None:
|
||||
"""
|
||||
Complete RE_RUN_AGENT step when a user runs a graph they've run before.
|
||||
Keeps overhead low by only counting executions if the step is still pending.
|
||||
"""
|
||||
onboarding = await get_user_onboarding(user_id)
|
||||
if OnboardingStep.RE_RUN_AGENT in onboarding.completedSteps:
|
||||
return
|
||||
|
||||
# Includes current execution, so count > 1 means there was at least one prior run.
|
||||
previous_exec_count = await execution_db.get_graph_executions_count(
|
||||
user_id=user_id, graph_id=graph_id
|
||||
)
|
||||
if previous_exec_count > 1:
|
||||
await complete_onboarding_step(user_id, OnboardingStep.RE_RUN_AGENT)
|
||||
|
||||
|
||||
def _clean_and_split(text: str) -> list[str]:
|
||||
"""
|
||||
Removes all special characters from a string, truncates it to 100 characters,
|
||||
and splits it by whitespace and commas.
|
||||
@@ -219,7 +239,7 @@ def clean_and_split(text: str) -> list[str]:
|
||||
return words
|
||||
|
||||
|
||||
def calculate_points(
|
||||
def _calculate_points(
|
||||
agent, categories: list[str], custom: list[str], integrations: list[str]
|
||||
) -> int:
|
||||
"""
|
||||
@@ -263,18 +283,85 @@ def calculate_points(
|
||||
return int(points)
|
||||
|
||||
|
||||
def get_credentials_blocks() -> dict[str, str]:
|
||||
# Returns a dictionary of block id to credentials field name
|
||||
creds: dict[str, str] = {}
|
||||
blocks = get_blocks()
|
||||
for id, block in blocks.items():
|
||||
for field_name, field_info in block().input_schema.model_fields.items():
|
||||
if field_info.annotation == CredentialsMetaInput:
|
||||
creds[id] = field_name
|
||||
return creds
|
||||
def _normalize_datetime(value: datetime | None) -> datetime | None:
|
||||
if value is None:
|
||||
return None
|
||||
if value.tzinfo is None:
|
||||
return value.replace(tzinfo=timezone.utc)
|
||||
return value.astimezone(timezone.utc)
|
||||
|
||||
|
||||
CREDENTIALS_FIELDS: dict[str, str] = get_credentials_blocks()
|
||||
def _calculate_consecutive_run_days(
|
||||
last_run_at: datetime | None, current_consecutive_days: int, user_timezone: str
|
||||
) -> tuple[datetime, int]:
|
||||
tz = ZoneInfo(user_timezone)
|
||||
local_now = datetime.now(tz)
|
||||
normalized_last_run = _normalize_datetime(last_run_at)
|
||||
|
||||
if normalized_last_run is None:
|
||||
return local_now.astimezone(timezone.utc), 1
|
||||
|
||||
last_run_local = normalized_last_run.astimezone(tz)
|
||||
last_run_date = last_run_local.date()
|
||||
today = local_now.date()
|
||||
|
||||
if last_run_date == today:
|
||||
return local_now.astimezone(timezone.utc), current_consecutive_days
|
||||
|
||||
if last_run_date == today - timedelta(days=1):
|
||||
return local_now.astimezone(timezone.utc), current_consecutive_days + 1
|
||||
|
||||
return local_now.astimezone(timezone.utc), 1
|
||||
|
||||
|
||||
def _get_run_milestone_steps(
|
||||
new_run_count: int, consecutive_days: int
|
||||
) -> list[OnboardingStep]:
|
||||
milestones: list[OnboardingStep] = []
|
||||
if new_run_count >= 10:
|
||||
milestones.append(OnboardingStep.RUN_AGENTS)
|
||||
if new_run_count >= 100:
|
||||
milestones.append(OnboardingStep.RUN_AGENTS_100)
|
||||
if consecutive_days >= 3:
|
||||
milestones.append(OnboardingStep.RUN_3_DAYS)
|
||||
if consecutive_days >= 14:
|
||||
milestones.append(OnboardingStep.RUN_14_DAYS)
|
||||
return milestones
|
||||
|
||||
|
||||
async def _get_user_timezone(user_id: str) -> str:
|
||||
user = await get_user_by_id(user_id)
|
||||
return get_user_timezone_or_utc(user.timezone if user else None)
|
||||
|
||||
|
||||
async def increment_runs(user_id: str):
|
||||
"""
|
||||
Increment a user's run counters and trigger any onboarding milestones.
|
||||
"""
|
||||
user_timezone = await _get_user_timezone(user_id)
|
||||
onboarding = await get_user_onboarding(user_id)
|
||||
new_run_count = onboarding.agentRuns + 1
|
||||
last_run_at, consecutive_run_days = _calculate_consecutive_run_days(
|
||||
onboarding.lastRunAt, onboarding.consecutiveRunDays, user_timezone
|
||||
)
|
||||
|
||||
await UserOnboarding.prisma().update(
|
||||
where={"userId": user_id},
|
||||
data={
|
||||
"agentRuns": {"increment": 1},
|
||||
"lastRunAt": last_run_at,
|
||||
"consecutiveRunDays": consecutive_run_days,
|
||||
},
|
||||
)
|
||||
|
||||
milestones = _get_run_milestone_steps(new_run_count, consecutive_run_days)
|
||||
new_steps = [step for step in milestones if step not in onboarding.completedSteps]
|
||||
|
||||
for step in new_steps:
|
||||
await complete_onboarding_step(user_id, step)
|
||||
# Send progress notification if no steps were completed, so client refetches onboarding state
|
||||
if not new_steps:
|
||||
await _send_onboarding_notification(user_id, None, event="increment_runs")
|
||||
|
||||
|
||||
async def get_recommended_agents(user_id: str) -> list[StoreAgentDetails]:
|
||||
@@ -283,7 +370,7 @@ async def get_recommended_agents(user_id: str) -> list[StoreAgentDetails]:
|
||||
|
||||
where_clause: dict[str, Any] = {}
|
||||
|
||||
custom = clean_and_split((user_onboarding.usageReason or "").lower())
|
||||
custom = _clean_and_split((user_onboarding.usageReason or "").lower())
|
||||
|
||||
if categories:
|
||||
where_clause["OR"] = [
|
||||
@@ -331,7 +418,7 @@ async def get_recommended_agents(user_id: str) -> list[StoreAgentDetails]:
|
||||
# Calculate points for the first X agents and choose the top 2
|
||||
agent_points = []
|
||||
for agent in storeAgents[:POINTS_AGENT_COUNT]:
|
||||
points = calculate_points(
|
||||
points = _calculate_points(
|
||||
agent, categories, custom, user_onboarding.integrations
|
||||
)
|
||||
agent_points.append((agent, points))
|
||||
@@ -345,6 +432,7 @@ async def get_recommended_agents(user_id: str) -> list[StoreAgentDetails]:
|
||||
slug=agent.slug,
|
||||
agent_name=agent.agent_name,
|
||||
agent_video=agent.agent_video or "",
|
||||
agent_output_demo=agent.agent_output_demo or "",
|
||||
agent_image=agent.agent_image,
|
||||
creator=agent.creator_username,
|
||||
creator_avatar=agent.creator_avatar,
|
||||
|
||||
@@ -27,6 +27,101 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Default system prompt template for activity status generation
|
||||
DEFAULT_SYSTEM_PROMPT = """You are an AI assistant analyzing what an agent execution accomplished and whether it worked correctly.
|
||||
You need to provide both a user-friendly summary AND a correctness assessment.
|
||||
|
||||
FOR THE ACTIVITY STATUS:
|
||||
- Write from the user's perspective about what they accomplished, NOT about technical execution details
|
||||
- Focus on the ACTUAL TASK the user wanted done, not the internal workflow steps
|
||||
- Avoid technical terms like 'workflow', 'execution', 'components', 'nodes', 'processing', etc.
|
||||
- Keep it to 3 sentences maximum. Be conversational and human-friendly
|
||||
|
||||
FOR THE CORRECTNESS SCORE:
|
||||
- Provide a score from 0.0 to 1.0 indicating how well the execution achieved its intended purpose
|
||||
- Use this scoring guide:
|
||||
0.0-0.2: Failure - The result clearly did not meet the task requirements
|
||||
0.2-0.4: Poor - Major issues; only small parts of the goal were achieved
|
||||
0.4-0.6: Partial Success - Some objectives met, but with noticeable gaps or inaccuracies
|
||||
0.6-0.8: Mostly Successful - Largely achieved the intended outcome, with minor flaws
|
||||
0.8-1.0: Success - Fully met or exceeded the task requirements
|
||||
- Base the score on actual outputs produced, not just technical completion
|
||||
|
||||
UNDERSTAND THE INTENDED PURPOSE:
|
||||
- FIRST: Read the graph description carefully to understand what the user wanted to accomplish
|
||||
- The graph name and description tell you the main goal/intention of this automation
|
||||
- Use this intended purpose as your PRIMARY criteria for success/failure evaluation
|
||||
- Ask yourself: 'Did this execution actually accomplish what the graph was designed to do?'
|
||||
|
||||
CRITICAL OUTPUT ANALYSIS:
|
||||
- Check if blocks that should produce user-facing results actually produced outputs
|
||||
- Blocks with names containing 'Output', 'Post', 'Create', 'Send', 'Publish', 'Generate' are usually meant to produce final results
|
||||
- If these critical blocks have NO outputs (empty recent_outputs), the task likely FAILED even if status shows 'completed'
|
||||
- Sub-agents (AgentExecutorBlock) that produce no outputs usually indicate failed sub-tasks
|
||||
- Most importantly: Does the execution result match what the graph description promised to deliver?
|
||||
|
||||
SUCCESS EVALUATION BASED ON INTENTION:
|
||||
- If the graph is meant to 'create blog posts' → check if blog content was actually created
|
||||
- If the graph is meant to 'send emails' → check if emails were actually sent
|
||||
- If the graph is meant to 'analyze data' → check if analysis results were produced
|
||||
- If the graph is meant to 'generate reports' → check if reports were generated
|
||||
- Technical completion ≠ goal achievement. Focus on whether the USER'S INTENDED OUTCOME was delivered
|
||||
|
||||
IMPORTANT: Be HONEST about what actually happened:
|
||||
- If the input was invalid/nonsensical, say so directly
|
||||
- If the task failed, explain what went wrong in simple terms
|
||||
- If errors occurred, focus on what the user needs to know
|
||||
- Only claim success if the INTENDED PURPOSE was genuinely accomplished AND produced expected outputs
|
||||
- Don't sugar-coat failures or present them as helpful feedback
|
||||
- ESPECIALLY: If the graph's main purpose wasn't achieved, this is a failure regardless of 'completed' status
|
||||
|
||||
Understanding Errors:
|
||||
- Node errors: Individual steps may fail but the overall task might still complete (e.g., one data source fails but others work)
|
||||
- Graph error (in overall_status.graph_error): This means the entire execution failed and nothing was accomplished
|
||||
- Missing outputs from critical blocks: Even if no errors, this means the task failed to produce expected results
|
||||
- Focus on whether the graph's intended purpose was fulfilled, not whether technical steps completed"""
|
||||
|
||||
# Default user prompt template for activity status generation
|
||||
DEFAULT_USER_PROMPT = """A user ran '{{GRAPH_NAME}}' to accomplish something. Based on this execution data,
|
||||
provide both an activity summary and correctness assessment:
|
||||
|
||||
{{EXECUTION_DATA}}
|
||||
|
||||
ANALYSIS CHECKLIST:
|
||||
1. READ graph_info.description FIRST - this tells you what the user intended to accomplish
|
||||
2. Check overall_status.graph_error - if present, the entire execution failed
|
||||
3. Look for nodes with 'Output', 'Post', 'Create', 'Send', 'Publish', 'Generate' in their block_name
|
||||
4. Check if these critical blocks have empty recent_outputs arrays - this indicates failure
|
||||
5. Look for AgentExecutorBlock (sub-agents) with no outputs - this suggests sub-task failures
|
||||
6. Count how many nodes produced outputs vs total nodes - low ratio suggests problems
|
||||
7. MOST IMPORTANT: Does the execution outcome match what graph_info.description promised?
|
||||
|
||||
INTENTION-BASED EVALUATION:
|
||||
- If description mentions 'blog writing' → did it create blog content?
|
||||
- If description mentions 'email automation' → were emails actually sent?
|
||||
- If description mentions 'data analysis' → were analysis results produced?
|
||||
- If description mentions 'content generation' → was content actually generated?
|
||||
- If description mentions 'social media posting' → were posts actually made?
|
||||
- Match the outputs to the stated intention, not just technical completion
|
||||
|
||||
PROVIDE:
|
||||
activity_status: 1-3 sentences about what the user accomplished, such as:
|
||||
- 'I analyzed your resume and provided detailed feedback for the IT industry.'
|
||||
- 'I couldn't complete the task because critical steps failed to produce any results.'
|
||||
- 'I failed to generate the content you requested due to missing API access.'
|
||||
- 'I extracted key information from your documents and organized it into a summary.'
|
||||
- 'The task failed because the blog post creation step didn't produce any output.'
|
||||
|
||||
correctness_score: A float score from 0.0 to 1.0 based on how well the intended purpose was achieved:
|
||||
- 0.0-0.2: Failure (didn't meet requirements)
|
||||
- 0.2-0.4: Poor (major issues, minimal achievement)
|
||||
- 0.4-0.6: Partial Success (some objectives met with gaps)
|
||||
- 0.6-0.8: Mostly Successful (largely achieved with minor flaws)
|
||||
- 0.8-1.0: Success (fully met or exceeded requirements)
|
||||
|
||||
BE CRITICAL: If the graph's intended purpose (from description) wasn't achieved, use a low score (0.0-0.4) even if status is 'completed'."""
|
||||
|
||||
|
||||
class ErrorInfo(TypedDict):
|
||||
"""Type definition for error information."""
|
||||
|
||||
@@ -93,6 +188,9 @@ async def generate_activity_status_for_execution(
|
||||
execution_status: ExecutionStatus | None = None,
|
||||
model_name: str = "gpt-4o-mini",
|
||||
skip_feature_flag: bool = False,
|
||||
system_prompt: str = DEFAULT_SYSTEM_PROMPT,
|
||||
user_prompt: str = DEFAULT_USER_PROMPT,
|
||||
skip_existing: bool = True,
|
||||
) -> ActivityStatusResponse | None:
|
||||
"""
|
||||
Generate an AI-based activity status summary and correctness assessment for a graph execution.
|
||||
@@ -108,10 +206,15 @@ async def generate_activity_status_for_execution(
|
||||
db_client: Database client for fetching data
|
||||
user_id: User ID for LaunchDarkly feature flag evaluation
|
||||
execution_status: The overall execution status (COMPLETED, FAILED, TERMINATED)
|
||||
model_name: AI model to use for generation (default: gpt-4o-mini)
|
||||
skip_feature_flag: Whether to skip LaunchDarkly feature flag check
|
||||
system_prompt: Custom system prompt template (default: DEFAULT_SYSTEM_PROMPT)
|
||||
user_prompt: Custom user prompt template with placeholders (default: DEFAULT_USER_PROMPT)
|
||||
skip_existing: Whether to skip if activity_status and correctness_score already exist
|
||||
|
||||
Returns:
|
||||
AI-generated activity status response with activity_status and correctness_status,
|
||||
or None if feature is disabled
|
||||
or None if feature is disabled or skipped
|
||||
"""
|
||||
# Check LaunchDarkly feature flag for AI activity status generation with full context support
|
||||
if not skip_feature_flag and not await is_feature_enabled(
|
||||
@@ -120,6 +223,20 @@ async def generate_activity_status_for_execution(
|
||||
logger.debug("AI activity status generation is disabled via LaunchDarkly")
|
||||
return None
|
||||
|
||||
# Check if we should skip existing data (for admin regeneration option)
|
||||
if (
|
||||
skip_existing
|
||||
and execution_stats.activity_status
|
||||
and execution_stats.correctness_score is not None
|
||||
):
|
||||
logger.debug(
|
||||
f"Skipping activity status generation for {graph_exec_id}: already exists"
|
||||
)
|
||||
return {
|
||||
"activity_status": execution_stats.activity_status,
|
||||
"correctness_score": execution_stats.correctness_score,
|
||||
}
|
||||
|
||||
# Check if we have OpenAI API key
|
||||
try:
|
||||
settings = Settings()
|
||||
@@ -136,7 +253,12 @@ async def generate_activity_status_for_execution(
|
||||
|
||||
# Get graph metadata and full graph structure for name, description, and links
|
||||
graph_metadata = await db_client.get_graph_metadata(graph_id, graph_version)
|
||||
graph = await db_client.get_graph(graph_id, graph_version)
|
||||
graph = await db_client.get_graph(
|
||||
graph_id=graph_id,
|
||||
version=graph_version,
|
||||
user_id=user_id,
|
||||
skip_access_check=True,
|
||||
)
|
||||
|
||||
graph_name = graph_metadata.name if graph_metadata else f"Graph {graph_id}"
|
||||
graph_description = graph_metadata.description if graph_metadata else ""
|
||||
@@ -152,94 +274,23 @@ async def generate_activity_status_for_execution(
|
||||
execution_status,
|
||||
)
|
||||
|
||||
# Prepare execution data as JSON for template substitution
|
||||
execution_data_json = json.dumps(execution_data, indent=2)
|
||||
|
||||
# Perform template substitution for user prompt
|
||||
user_prompt_content = user_prompt.replace("{{GRAPH_NAME}}", graph_name).replace(
|
||||
"{{EXECUTION_DATA}}", execution_data_json
|
||||
)
|
||||
|
||||
# Prepare prompt for AI with structured output requirements
|
||||
prompt = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": (
|
||||
"You are an AI assistant analyzing what an agent execution accomplished and whether it worked correctly. "
|
||||
"You need to provide both a user-friendly summary AND a correctness assessment.\n\n"
|
||||
"FOR THE ACTIVITY STATUS:\n"
|
||||
"- Write from the user's perspective about what they accomplished, NOT about technical execution details\n"
|
||||
"- Focus on the ACTUAL TASK the user wanted done, not the internal workflow steps\n"
|
||||
"- Avoid technical terms like 'workflow', 'execution', 'components', 'nodes', 'processing', etc.\n"
|
||||
"- Keep it to 3 sentences maximum. Be conversational and human-friendly\n\n"
|
||||
"FOR THE CORRECTNESS SCORE:\n"
|
||||
"- Provide a score from 0.0 to 1.0 indicating how well the execution achieved its intended purpose\n"
|
||||
"- Use this scoring guide:\n"
|
||||
" 0.0-0.2: Failure - The result clearly did not meet the task requirements\n"
|
||||
" 0.2-0.4: Poor - Major issues; only small parts of the goal were achieved\n"
|
||||
" 0.4-0.6: Partial Success - Some objectives met, but with noticeable gaps or inaccuracies\n"
|
||||
" 0.6-0.8: Mostly Successful - Largely achieved the intended outcome, with minor flaws\n"
|
||||
" 0.8-1.0: Success - Fully met or exceeded the task requirements\n"
|
||||
"- Base the score on actual outputs produced, not just technical completion\n\n"
|
||||
"UNDERSTAND THE INTENDED PURPOSE:\n"
|
||||
"- FIRST: Read the graph description carefully to understand what the user wanted to accomplish\n"
|
||||
"- The graph name and description tell you the main goal/intention of this automation\n"
|
||||
"- Use this intended purpose as your PRIMARY criteria for success/failure evaluation\n"
|
||||
"- Ask yourself: 'Did this execution actually accomplish what the graph was designed to do?'\n\n"
|
||||
"CRITICAL OUTPUT ANALYSIS:\n"
|
||||
"- Check if blocks that should produce user-facing results actually produced outputs\n"
|
||||
"- Blocks with names containing 'Output', 'Post', 'Create', 'Send', 'Publish', 'Generate' are usually meant to produce final results\n"
|
||||
"- If these critical blocks have NO outputs (empty recent_outputs), the task likely FAILED even if status shows 'completed'\n"
|
||||
"- Sub-agents (AgentExecutorBlock) that produce no outputs usually indicate failed sub-tasks\n"
|
||||
"- Most importantly: Does the execution result match what the graph description promised to deliver?\n\n"
|
||||
"SUCCESS EVALUATION BASED ON INTENTION:\n"
|
||||
"- If the graph is meant to 'create blog posts' → check if blog content was actually created\n"
|
||||
"- If the graph is meant to 'send emails' → check if emails were actually sent\n"
|
||||
"- If the graph is meant to 'analyze data' → check if analysis results were produced\n"
|
||||
"- If the graph is meant to 'generate reports' → check if reports were generated\n"
|
||||
"- Technical completion ≠ goal achievement. Focus on whether the USER'S INTENDED OUTCOME was delivered\n\n"
|
||||
"IMPORTANT: Be HONEST about what actually happened:\n"
|
||||
"- If the input was invalid/nonsensical, say so directly\n"
|
||||
"- If the task failed, explain what went wrong in simple terms\n"
|
||||
"- If errors occurred, focus on what the user needs to know\n"
|
||||
"- Only claim success if the INTENDED PURPOSE was genuinely accomplished AND produced expected outputs\n"
|
||||
"- Don't sugar-coat failures or present them as helpful feedback\n"
|
||||
"- ESPECIALLY: If the graph's main purpose wasn't achieved, this is a failure regardless of 'completed' status\n\n"
|
||||
"Understanding Errors:\n"
|
||||
"- Node errors: Individual steps may fail but the overall task might still complete (e.g., one data source fails but others work)\n"
|
||||
"- Graph error (in overall_status.graph_error): This means the entire execution failed and nothing was accomplished\n"
|
||||
"- Missing outputs from critical blocks: Even if no errors, this means the task failed to produce expected results\n"
|
||||
"- Focus on whether the graph's intended purpose was fulfilled, not whether technical steps completed"
|
||||
),
|
||||
"content": system_prompt,
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": (
|
||||
f"A user ran '{graph_name}' to accomplish something. Based on this execution data, "
|
||||
f"provide both an activity summary and correctness assessment:\n\n"
|
||||
f"{json.dumps(execution_data, indent=2)}\n\n"
|
||||
"ANALYSIS CHECKLIST:\n"
|
||||
"1. READ graph_info.description FIRST - this tells you what the user intended to accomplish\n"
|
||||
"2. Check overall_status.graph_error - if present, the entire execution failed\n"
|
||||
"3. Look for nodes with 'Output', 'Post', 'Create', 'Send', 'Publish', 'Generate' in their block_name\n"
|
||||
"4. Check if these critical blocks have empty recent_outputs arrays - this indicates failure\n"
|
||||
"5. Look for AgentExecutorBlock (sub-agents) with no outputs - this suggests sub-task failures\n"
|
||||
"6. Count how many nodes produced outputs vs total nodes - low ratio suggests problems\n"
|
||||
"7. MOST IMPORTANT: Does the execution outcome match what graph_info.description promised?\n\n"
|
||||
"INTENTION-BASED EVALUATION:\n"
|
||||
"- If description mentions 'blog writing' → did it create blog content?\n"
|
||||
"- If description mentions 'email automation' → were emails actually sent?\n"
|
||||
"- If description mentions 'data analysis' → were analysis results produced?\n"
|
||||
"- If description mentions 'content generation' → was content actually generated?\n"
|
||||
"- If description mentions 'social media posting' → were posts actually made?\n"
|
||||
"- Match the outputs to the stated intention, not just technical completion\n\n"
|
||||
"PROVIDE:\n"
|
||||
"activity_status: 1-3 sentences about what the user accomplished, such as:\n"
|
||||
"- 'I analyzed your resume and provided detailed feedback for the IT industry.'\n"
|
||||
"- 'I couldn't complete the task because critical steps failed to produce any results.'\n"
|
||||
"- 'I failed to generate the content you requested due to missing API access.'\n"
|
||||
"- 'I extracted key information from your documents and organized it into a summary.'\n"
|
||||
"- 'The task failed because the blog post creation step didn't produce any output.'\n\n"
|
||||
"correctness_score: A float score from 0.0 to 1.0 based on how well the intended purpose was achieved:\n"
|
||||
"- 0.0-0.2: Failure (didn't meet requirements)\n"
|
||||
"- 0.2-0.4: Poor (major issues, minimal achievement)\n"
|
||||
"- 0.4-0.6: Partial Success (some objectives met with gaps)\n"
|
||||
"- 0.6-0.8: Mostly Successful (largely achieved with minor flaws)\n"
|
||||
"- 0.8-1.0: Success (fully met or exceeded requirements)\n\n"
|
||||
"BE CRITICAL: If the graph's intended purpose (from description) wasn't achieved, use a low score (0.0-0.4) even if status is 'completed'."
|
||||
),
|
||||
"content": user_prompt_content,
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
@@ -3,12 +3,18 @@ from contextlib import asynccontextmanager
|
||||
from typing import TYPE_CHECKING, Callable, Concatenate, ParamSpec, TypeVar, cast
|
||||
|
||||
from backend.data import db
|
||||
from backend.data.analytics import (
|
||||
get_accuracy_trends_and_alerts,
|
||||
get_marketplace_graphs_for_monitoring,
|
||||
)
|
||||
from backend.data.credit import UsageTransactionMetadata, get_user_credit_model
|
||||
from backend.data.execution import (
|
||||
create_graph_execution,
|
||||
get_block_error_stats,
|
||||
get_child_graph_executions,
|
||||
get_execution_kv_data,
|
||||
get_execution_outputs_by_node_exec_id,
|
||||
get_frequently_executed_graphs,
|
||||
get_graph_execution_meta,
|
||||
get_graph_executions,
|
||||
get_graph_executions_count,
|
||||
@@ -28,9 +34,15 @@ from backend.data.graph import (
|
||||
get_connected_output_nodes,
|
||||
get_graph,
|
||||
get_graph_metadata,
|
||||
get_graph_settings,
|
||||
get_node,
|
||||
validate_graph_execution_permissions,
|
||||
)
|
||||
from backend.data.human_review import (
|
||||
get_or_create_human_review,
|
||||
has_pending_reviews_for_graph_exec,
|
||||
update_review_processed_status,
|
||||
)
|
||||
from backend.data.notifications import (
|
||||
clear_all_user_notification_batches,
|
||||
create_or_add_to_user_notification_batch,
|
||||
@@ -136,15 +148,20 @@ class DatabaseManager(AppService):
|
||||
update_graph_execution_stats = _(update_graph_execution_stats)
|
||||
upsert_execution_input = _(upsert_execution_input)
|
||||
upsert_execution_output = _(upsert_execution_output)
|
||||
get_execution_outputs_by_node_exec_id = _(get_execution_outputs_by_node_exec_id)
|
||||
get_execution_kv_data = _(get_execution_kv_data)
|
||||
set_execution_kv_data = _(set_execution_kv_data)
|
||||
get_block_error_stats = _(get_block_error_stats)
|
||||
get_accuracy_trends_and_alerts = _(get_accuracy_trends_and_alerts)
|
||||
get_frequently_executed_graphs = _(get_frequently_executed_graphs)
|
||||
get_marketplace_graphs_for_monitoring = _(get_marketplace_graphs_for_monitoring)
|
||||
|
||||
# Graphs
|
||||
get_node = _(get_node)
|
||||
get_graph = _(get_graph)
|
||||
get_connected_output_nodes = _(get_connected_output_nodes)
|
||||
get_graph_metadata = _(get_graph_metadata)
|
||||
get_graph_settings = _(get_graph_settings)
|
||||
|
||||
# Credits
|
||||
spend_credits = _(_spend_credits, name="spend_credits")
|
||||
@@ -161,6 +178,11 @@ class DatabaseManager(AppService):
|
||||
get_user_email_verification = _(get_user_email_verification)
|
||||
get_user_notification_preference = _(get_user_notification_preference)
|
||||
|
||||
# Human In The Loop
|
||||
get_or_create_human_review = _(get_or_create_human_review)
|
||||
has_pending_reviews_for_graph_exec = _(has_pending_reviews_for_graph_exec)
|
||||
update_review_processed_status = _(update_review_processed_status)
|
||||
|
||||
# Notifications - async
|
||||
clear_all_user_notification_batches = _(clear_all_user_notification_batches)
|
||||
create_or_add_to_user_notification_batch = _(
|
||||
@@ -214,6 +236,13 @@ class DatabaseManagerClient(AppServiceClient):
|
||||
|
||||
# Block error monitoring
|
||||
get_block_error_stats = _(d.get_block_error_stats)
|
||||
# Execution accuracy monitoring
|
||||
get_accuracy_trends_and_alerts = _(d.get_accuracy_trends_and_alerts)
|
||||
get_frequently_executed_graphs = _(d.get_frequently_executed_graphs)
|
||||
get_marketplace_graphs_for_monitoring = _(d.get_marketplace_graphs_for_monitoring)
|
||||
|
||||
# Human In The Loop
|
||||
has_pending_reviews_for_graph_exec = _(d.has_pending_reviews_for_graph_exec)
|
||||
|
||||
# User Emails
|
||||
get_user_email_by_id = _(d.get_user_email_by_id)
|
||||
@@ -241,6 +270,7 @@ class DatabaseManagerAsyncClient(AppServiceClient):
|
||||
get_latest_node_execution = d.get_latest_node_execution
|
||||
get_graph = d.get_graph
|
||||
get_graph_metadata = d.get_graph_metadata
|
||||
get_graph_settings = d.get_graph_settings
|
||||
get_graph_execution_meta = d.get_graph_execution_meta
|
||||
get_node = d.get_node
|
||||
get_node_execution = d.get_node_execution
|
||||
@@ -249,6 +279,7 @@ class DatabaseManagerAsyncClient(AppServiceClient):
|
||||
get_user_integrations = d.get_user_integrations
|
||||
upsert_execution_input = d.upsert_execution_input
|
||||
upsert_execution_output = d.upsert_execution_output
|
||||
get_execution_outputs_by_node_exec_id = d.get_execution_outputs_by_node_exec_id
|
||||
update_graph_execution_stats = d.update_graph_execution_stats
|
||||
update_node_execution_status = d.update_node_execution_status
|
||||
update_node_execution_status_batch = d.update_node_execution_status_batch
|
||||
@@ -256,6 +287,10 @@ class DatabaseManagerAsyncClient(AppServiceClient):
|
||||
get_execution_kv_data = d.get_execution_kv_data
|
||||
set_execution_kv_data = d.set_execution_kv_data
|
||||
|
||||
# Human In The Loop
|
||||
get_or_create_human_review = d.get_or_create_human_review
|
||||
update_review_processed_status = d.update_review_processed_status
|
||||
|
||||
# User Comms
|
||||
get_active_user_ids_in_timerange = d.get_active_user_ids_in_timerange
|
||||
get_user_email_by_id = d.get_user_email_by_id
|
||||
|
||||
@@ -29,6 +29,7 @@ from backend.data.block import (
|
||||
from backend.data.credit import UsageTransactionMetadata
|
||||
from backend.data.dynamic_fields import parse_execution_output
|
||||
from backend.data.execution import (
|
||||
ExecutionContext,
|
||||
ExecutionQueue,
|
||||
ExecutionStatus,
|
||||
GraphExecution,
|
||||
@@ -36,7 +37,6 @@ from backend.data.execution import (
|
||||
NodeExecutionEntry,
|
||||
NodeExecutionResult,
|
||||
NodesInputMasks,
|
||||
UserContext,
|
||||
)
|
||||
from backend.data.graph import Link, Node
|
||||
from backend.data.model import GraphExecutionStats, NodeExecutionStats
|
||||
@@ -133,9 +133,8 @@ def execute_graph(
|
||||
cluster_lock: ClusterLock,
|
||||
):
|
||||
"""Execute graph using thread-local ExecutionProcessor instance"""
|
||||
return _tls.processor.on_graph_execution(
|
||||
graph_exec_entry, cancel_event, cluster_lock
|
||||
)
|
||||
processor: ExecutionProcessor = _tls.processor
|
||||
return processor.on_graph_execution(graph_exec_entry, cancel_event, cluster_lock)
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
@@ -143,8 +142,8 @@ T = TypeVar("T")
|
||||
|
||||
async def execute_node(
|
||||
node: Node,
|
||||
creds_manager: IntegrationCredentialsManager,
|
||||
data: NodeExecutionEntry,
|
||||
execution_processor: "ExecutionProcessor",
|
||||
execution_stats: NodeExecutionStats | None = None,
|
||||
nodes_input_masks: Optional[NodesInputMasks] = None,
|
||||
) -> BlockOutput:
|
||||
@@ -164,9 +163,12 @@ async def execute_node(
|
||||
user_id = data.user_id
|
||||
graph_exec_id = data.graph_exec_id
|
||||
graph_id = data.graph_id
|
||||
graph_version = data.graph_version
|
||||
node_exec_id = data.node_exec_id
|
||||
node_id = data.node_id
|
||||
node_block = node.block
|
||||
execution_context = data.execution_context
|
||||
creds_manager = execution_processor.creds_manager
|
||||
|
||||
log_metadata = LogMetadata(
|
||||
logger=_logger,
|
||||
@@ -204,28 +206,66 @@ async def execute_node(
|
||||
# Inject extra execution arguments for the blocks via kwargs
|
||||
extra_exec_kwargs: dict = {
|
||||
"graph_id": graph_id,
|
||||
"graph_version": graph_version,
|
||||
"node_id": node_id,
|
||||
"graph_exec_id": graph_exec_id,
|
||||
"node_exec_id": node_exec_id,
|
||||
"user_id": user_id,
|
||||
"execution_context": execution_context,
|
||||
"execution_processor": execution_processor,
|
||||
}
|
||||
|
||||
# Add user context from NodeExecutionEntry
|
||||
extra_exec_kwargs["user_context"] = data.user_context
|
||||
|
||||
# Last-minute fetch credentials + acquire a system-wide read-write lock to prevent
|
||||
# changes during execution. ⚠️ This means a set of credentials can only be used by
|
||||
# one (running) block at a time; simultaneous execution of blocks using same
|
||||
# credentials is not supported.
|
||||
creds_lock = None
|
||||
creds_locks: list[AsyncRedisLock] = []
|
||||
input_model = cast(type[BlockSchema], node_block.input_schema)
|
||||
|
||||
# Handle regular credentials fields
|
||||
for field_name, input_type in input_model.get_credentials_fields().items():
|
||||
credentials_meta = input_type(**input_data[field_name])
|
||||
credentials, creds_lock = await creds_manager.acquire(
|
||||
user_id, credentials_meta.id
|
||||
)
|
||||
credentials, lock = await creds_manager.acquire(user_id, credentials_meta.id)
|
||||
creds_locks.append(lock)
|
||||
extra_exec_kwargs[field_name] = credentials
|
||||
|
||||
# Handle auto-generated credentials (e.g., from GoogleDriveFileInput)
|
||||
for kwarg_name, info in input_model.get_auto_credentials_fields().items():
|
||||
field_name = info["field_name"]
|
||||
field_data = input_data.get(field_name)
|
||||
if field_data and isinstance(field_data, dict):
|
||||
# Check if _credentials_id key exists in the field data
|
||||
if "_credentials_id" in field_data:
|
||||
cred_id = field_data["_credentials_id"]
|
||||
if cred_id:
|
||||
# Credential ID provided - acquire credentials
|
||||
provider = info.get("config", {}).get(
|
||||
"provider", "external service"
|
||||
)
|
||||
file_name = field_data.get("name", "selected file")
|
||||
try:
|
||||
credentials, lock = await creds_manager.acquire(
|
||||
user_id, cred_id
|
||||
)
|
||||
creds_locks.append(lock)
|
||||
extra_exec_kwargs[kwarg_name] = credentials
|
||||
except ValueError:
|
||||
# Credential was deleted or doesn't exist
|
||||
raise ValueError(
|
||||
f"Authentication expired for '{file_name}' in field '{field_name}'. "
|
||||
f"The saved {provider.capitalize()} credentials no longer exist. "
|
||||
f"Please re-select the file to re-authenticate."
|
||||
)
|
||||
# else: _credentials_id is explicitly None, skip credentials (for chained data)
|
||||
else:
|
||||
# _credentials_id key missing entirely - this is an error
|
||||
provider = info.get("config", {}).get("provider", "external service")
|
||||
file_name = field_data.get("name", "selected file")
|
||||
raise ValueError(
|
||||
f"Authentication missing for '{file_name}' in field '{field_name}'. "
|
||||
f"Please re-select the file to authenticate with {provider.capitalize()}."
|
||||
)
|
||||
|
||||
output_size = 0
|
||||
|
||||
# sentry tracking nonsense to get user counts for blocks because isolation scopes don't work :(
|
||||
@@ -241,8 +281,8 @@ async def execute_node(
|
||||
scope.set_tag("node_id", node_id)
|
||||
scope.set_tag("block_name", node_block.name)
|
||||
scope.set_tag("block_id", node_block.id)
|
||||
for k, v in (data.user_context or UserContext(timezone="UTC")).model_dump().items():
|
||||
scope.set_tag(f"user_context.{k}", v)
|
||||
for k, v in execution_context.model_dump().items():
|
||||
scope.set_tag(f"execution_context.{k}", v)
|
||||
|
||||
try:
|
||||
async for output_name, output_data in node_block.execute(
|
||||
@@ -252,19 +292,24 @@ async def execute_node(
|
||||
output_size += len(json.dumps(output_data))
|
||||
log_metadata.debug("Node produced output", **{output_name: output_data})
|
||||
yield output_name, output_data
|
||||
except Exception:
|
||||
except Exception as ex:
|
||||
# Capture exception WITH context still set before restoring scope
|
||||
sentry_sdk.capture_exception(scope=scope)
|
||||
sentry_sdk.capture_exception(error=ex, scope=scope)
|
||||
sentry_sdk.flush() # Ensure it's sent before we restore scope
|
||||
# Re-raise to maintain normal error flow
|
||||
raise
|
||||
finally:
|
||||
# Ensure credentials are released even if execution fails
|
||||
if creds_lock and (await creds_lock.locked()) and (await creds_lock.owned()):
|
||||
try:
|
||||
await creds_lock.release()
|
||||
except Exception as e:
|
||||
log_metadata.error(f"Failed to release credentials lock: {e}")
|
||||
# Ensure all credentials are released even if execution fails
|
||||
for creds_lock in creds_locks:
|
||||
if (
|
||||
creds_lock
|
||||
and (await creds_lock.locked())
|
||||
and (await creds_lock.owned())
|
||||
):
|
||||
try:
|
||||
await creds_lock.release()
|
||||
except Exception as e:
|
||||
log_metadata.error(f"Failed to release credentials lock: {e}")
|
||||
|
||||
# Update execution stats
|
||||
if execution_stats is not None:
|
||||
@@ -284,9 +329,10 @@ async def _enqueue_next_nodes(
|
||||
user_id: str,
|
||||
graph_exec_id: str,
|
||||
graph_id: str,
|
||||
graph_version: int,
|
||||
log_metadata: LogMetadata,
|
||||
nodes_input_masks: Optional[NodesInputMasks],
|
||||
user_context: UserContext,
|
||||
execution_context: ExecutionContext,
|
||||
) -> list[NodeExecutionEntry]:
|
||||
async def add_enqueued_execution(
|
||||
node_exec_id: str, node_id: str, block_id: str, data: BlockInput
|
||||
@@ -301,11 +347,12 @@ async def _enqueue_next_nodes(
|
||||
user_id=user_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
graph_id=graph_id,
|
||||
graph_version=graph_version,
|
||||
node_exec_id=node_exec_id,
|
||||
node_id=node_id,
|
||||
block_id=block_id,
|
||||
inputs=data,
|
||||
user_context=user_context,
|
||||
execution_context=execution_context,
|
||||
)
|
||||
|
||||
async def register_next_executions(node_link: Link) -> list[NodeExecutionEntry]:
|
||||
@@ -322,7 +369,9 @@ async def _enqueue_next_nodes(
|
||||
next_node_id = node_link.sink_id
|
||||
|
||||
output_name, _ = output
|
||||
next_data = parse_execution_output(output, next_output_name)
|
||||
next_data = parse_execution_output(
|
||||
output, next_output_name, next_node_id, next_input_name
|
||||
)
|
||||
if next_data is None and output_name != next_output_name:
|
||||
return enqueued_executions
|
||||
next_node = await db_client.get_node(next_node_id)
|
||||
@@ -332,17 +381,14 @@ async def _enqueue_next_nodes(
|
||||
# Or the same input to be consumed multiple times.
|
||||
async with synchronized(f"upsert_input-{next_node_id}-{graph_exec_id}"):
|
||||
# Add output data to the earliest incomplete execution, or create a new one.
|
||||
next_node_exec_id, next_node_input = await db_client.upsert_execution_input(
|
||||
next_node_exec, next_node_input = await db_client.upsert_execution_input(
|
||||
node_id=next_node_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
input_name=next_input_name,
|
||||
input_data=next_data,
|
||||
)
|
||||
await async_update_node_execution_status(
|
||||
db_client=db_client,
|
||||
exec_id=next_node_exec_id,
|
||||
status=ExecutionStatus.INCOMPLETE,
|
||||
)
|
||||
next_node_exec_id = next_node_exec.node_exec_id
|
||||
await send_async_execution_update(next_node_exec)
|
||||
|
||||
# Complete missing static input pins data using the last execution input.
|
||||
static_link_names = {
|
||||
@@ -563,8 +609,8 @@ class ExecutionProcessor:
|
||||
|
||||
async for output_name, output_data in execute_node(
|
||||
node=node,
|
||||
creds_manager=self.creds_manager,
|
||||
data=node_exec,
|
||||
execution_processor=self,
|
||||
execution_stats=stats,
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
):
|
||||
@@ -658,6 +704,16 @@ class ExecutionProcessor:
|
||||
log_metadata.info(
|
||||
f"⚙️ Graph execution #{graph_exec.graph_exec_id} is already running, continuing where it left off."
|
||||
)
|
||||
elif exec_meta.status == ExecutionStatus.REVIEW:
|
||||
exec_meta.status = ExecutionStatus.RUNNING
|
||||
log_metadata.info(
|
||||
f"⚙️ Graph execution #{graph_exec.graph_exec_id} was waiting for review, resuming execution."
|
||||
)
|
||||
update_graph_execution_state(
|
||||
db_client=db_client,
|
||||
graph_exec_id=graph_exec.graph_exec_id,
|
||||
status=ExecutionStatus.RUNNING,
|
||||
)
|
||||
elif exec_meta.status == ExecutionStatus.FAILED:
|
||||
exec_meta.status = ExecutionStatus.RUNNING
|
||||
log_metadata.info(
|
||||
@@ -695,19 +751,21 @@ class ExecutionProcessor:
|
||||
raise status
|
||||
exec_meta.status = status
|
||||
|
||||
# Activity status handling
|
||||
activity_response = asyncio.run_coroutine_threadsafe(
|
||||
generate_activity_status_for_execution(
|
||||
graph_exec_id=graph_exec.graph_exec_id,
|
||||
graph_id=graph_exec.graph_id,
|
||||
graph_version=graph_exec.graph_version,
|
||||
execution_stats=exec_stats,
|
||||
db_client=get_db_async_client(),
|
||||
user_id=graph_exec.user_id,
|
||||
execution_status=status,
|
||||
),
|
||||
self.node_execution_loop,
|
||||
).result(timeout=60.0)
|
||||
if status in [ExecutionStatus.COMPLETED, ExecutionStatus.FAILED]:
|
||||
activity_response = asyncio.run_coroutine_threadsafe(
|
||||
generate_activity_status_for_execution(
|
||||
graph_exec_id=graph_exec.graph_exec_id,
|
||||
graph_id=graph_exec.graph_id,
|
||||
graph_version=graph_exec.graph_version,
|
||||
execution_stats=exec_stats,
|
||||
db_client=get_db_async_client(),
|
||||
user_id=graph_exec.user_id,
|
||||
execution_status=status,
|
||||
),
|
||||
self.node_execution_loop,
|
||||
).result(timeout=60.0)
|
||||
else:
|
||||
activity_response = None
|
||||
if activity_response is not None:
|
||||
exec_stats.activity_status = activity_response["activity_status"]
|
||||
exec_stats.correctness_score = activity_response["correctness_score"]
|
||||
@@ -803,12 +861,17 @@ class ExecutionProcessor:
|
||||
execution_stats_lock = threading.Lock()
|
||||
|
||||
# State holders ----------------------------------------------------
|
||||
running_node_execution: dict[str, NodeExecutionProgress] = defaultdict(
|
||||
self.running_node_execution: dict[str, NodeExecutionProgress] = defaultdict(
|
||||
NodeExecutionProgress
|
||||
)
|
||||
running_node_evaluation: dict[str, Future] = {}
|
||||
self.running_node_evaluation: dict[str, Future] = {}
|
||||
self.execution_stats = execution_stats
|
||||
self.execution_stats_lock = execution_stats_lock
|
||||
execution_queue = ExecutionQueue[NodeExecutionEntry]()
|
||||
|
||||
running_node_execution = self.running_node_execution
|
||||
running_node_evaluation = self.running_node_evaluation
|
||||
|
||||
try:
|
||||
if db_client.get_credits(graph_exec.user_id) <= 0:
|
||||
raise InsufficientBalanceError(
|
||||
@@ -843,14 +906,18 @@ class ExecutionProcessor:
|
||||
ExecutionStatus.RUNNING,
|
||||
ExecutionStatus.QUEUED,
|
||||
ExecutionStatus.TERMINATED,
|
||||
ExecutionStatus.REVIEW,
|
||||
],
|
||||
):
|
||||
node_entry = node_exec.to_node_execution_entry(graph_exec.user_context)
|
||||
node_entry = node_exec.to_node_execution_entry(
|
||||
graph_exec.execution_context
|
||||
)
|
||||
execution_queue.add(node_entry)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Main dispatch / polling loop -----------------------------
|
||||
# ------------------------------------------------------------
|
||||
|
||||
while not execution_queue.empty():
|
||||
if cancel.is_set():
|
||||
break
|
||||
@@ -1004,7 +1071,12 @@ class ExecutionProcessor:
|
||||
elif error is not None:
|
||||
execution_status = ExecutionStatus.FAILED
|
||||
else:
|
||||
execution_status = ExecutionStatus.COMPLETED
|
||||
if db_client.has_pending_reviews_for_graph_exec(
|
||||
graph_exec.graph_exec_id
|
||||
):
|
||||
execution_status = ExecutionStatus.REVIEW
|
||||
else:
|
||||
execution_status = ExecutionStatus.COMPLETED
|
||||
|
||||
if error:
|
||||
execution_stats.error = str(error) or type(error).__name__
|
||||
@@ -1140,9 +1212,10 @@ class ExecutionProcessor:
|
||||
user_id=graph_exec.user_id,
|
||||
graph_exec_id=graph_exec.graph_exec_id,
|
||||
graph_id=graph_exec.graph_id,
|
||||
graph_version=graph_exec.graph_version,
|
||||
log_metadata=log_metadata,
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
user_context=graph_exec.user_context,
|
||||
execution_context=graph_exec.execution_context,
|
||||
):
|
||||
execution_queue.add(next_execution)
|
||||
|
||||
@@ -1532,36 +1605,32 @@ class ExecutionManager(AppProcess):
|
||||
graph_exec_id = graph_exec_entry.graph_exec_id
|
||||
user_id = graph_exec_entry.user_id
|
||||
graph_id = graph_exec_entry.graph_id
|
||||
parent_graph_exec_id = graph_exec_entry.parent_graph_exec_id
|
||||
root_exec_id = graph_exec_entry.execution_context.root_execution_id
|
||||
parent_exec_id = graph_exec_entry.execution_context.parent_execution_id
|
||||
|
||||
logger.info(
|
||||
f"[{self.service_name}] Received RUN for graph_exec_id={graph_exec_id}, user_id={user_id}, executor_id={self.executor_id}"
|
||||
+ (f", parent={parent_graph_exec_id}" if parent_graph_exec_id else "")
|
||||
+ (f", root={root_exec_id}" if root_exec_id else "")
|
||||
+ (f", parent={parent_exec_id}" if parent_exec_id else "")
|
||||
)
|
||||
|
||||
# Check if parent execution is already terminated (prevents orphaned child executions)
|
||||
if parent_graph_exec_id:
|
||||
try:
|
||||
parent_exec = get_db_client().get_graph_execution_meta(
|
||||
execution_id=parent_graph_exec_id,
|
||||
user_id=user_id,
|
||||
# Check if root execution is already terminated (prevents orphaned child executions)
|
||||
if root_exec_id and root_exec_id != graph_exec_id:
|
||||
parent_exec = get_db_client().get_graph_execution_meta(
|
||||
execution_id=root_exec_id,
|
||||
user_id=user_id,
|
||||
)
|
||||
if parent_exec and parent_exec.status == ExecutionStatus.TERMINATED:
|
||||
logger.info(
|
||||
f"[{self.service_name}] Skipping execution {graph_exec_id} - parent {root_exec_id} is TERMINATED"
|
||||
)
|
||||
if parent_exec and parent_exec.status == ExecutionStatus.TERMINATED:
|
||||
logger.info(
|
||||
f"[{self.service_name}] Skipping execution {graph_exec_id} - parent {parent_graph_exec_id} is TERMINATED"
|
||||
)
|
||||
# Mark this child as terminated since parent was stopped
|
||||
get_db_client().update_graph_execution_stats(
|
||||
graph_exec_id=graph_exec_id,
|
||||
status=ExecutionStatus.TERMINATED,
|
||||
)
|
||||
_ack_message(reject=False, requeue=False)
|
||||
return
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"[{self.service_name}] Could not check parent status for {graph_exec_id}: {e}"
|
||||
# Mark this child as terminated since parent was stopped
|
||||
get_db_client().update_graph_execution_stats(
|
||||
graph_exec_id=graph_exec_id,
|
||||
status=ExecutionStatus.TERMINATED,
|
||||
)
|
||||
# Continue execution if parent check fails (don't block on errors)
|
||||
_ack_message(reject=False, requeue=False)
|
||||
return
|
||||
|
||||
# Check user rate limit before processing
|
||||
try:
|
||||
|
||||
@@ -2,6 +2,7 @@ import asyncio
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
import uuid
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
from urllib.parse import parse_qs, urlencode, urlparse, urlunparse
|
||||
@@ -25,18 +26,22 @@ from sqlalchemy import MetaData, create_engine
|
||||
from backend.data.block import BlockInput
|
||||
from backend.data.execution import GraphExecutionWithNodes
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.data.onboarding import increment_runs
|
||||
from backend.executor import utils as execution_utils
|
||||
from backend.monitoring import (
|
||||
NotificationJobArgs,
|
||||
process_existing_batches,
|
||||
process_weekly_summary,
|
||||
report_block_error_rates,
|
||||
report_execution_accuracy_alerts,
|
||||
report_late_executions,
|
||||
)
|
||||
from backend.util.clients import get_scheduler_client
|
||||
from backend.util.cloud_storage import cleanup_expired_files_async
|
||||
from backend.util.exceptions import (
|
||||
GraphNotFoundError,
|
||||
GraphNotInLibraryError,
|
||||
GraphValidationError,
|
||||
NotAuthorizedError,
|
||||
NotFoundError,
|
||||
)
|
||||
@@ -150,6 +155,7 @@ async def _execute_graph(**kwargs):
|
||||
inputs=args.input_data,
|
||||
graph_credentials_inputs=args.input_credentials,
|
||||
)
|
||||
await increment_runs(args.user_id)
|
||||
elapsed = asyncio.get_event_loop().time() - start_time
|
||||
logger.info(
|
||||
f"Graph execution started with ID {graph_exec.id} for graph {args.graph_id} "
|
||||
@@ -160,14 +166,12 @@ async def _execute_graph(**kwargs):
|
||||
f"Graph execution {graph_exec.id} took {elapsed:.2f}s to create/publish - "
|
||||
f"this is unusually slow and may indicate resource contention"
|
||||
)
|
||||
except GraphNotFoundError as e:
|
||||
await _handle_graph_not_available(e, args, start_time)
|
||||
except GraphNotInLibraryError as e:
|
||||
elapsed = asyncio.get_event_loop().time() - start_time
|
||||
logger.warning(
|
||||
f"Scheduled execution blocked for deleted/archived graph {args.graph_id} "
|
||||
f"(user {args.user_id}) after {elapsed:.2f}s: {e}"
|
||||
)
|
||||
# Clean up orphaned schedules for this graph
|
||||
await _cleanup_orphaned_schedules_for_graph(args.graph_id, args.user_id)
|
||||
await _handle_graph_not_available(e, args, start_time)
|
||||
except GraphValidationError:
|
||||
await _handle_graph_validation_error(args)
|
||||
except Exception as e:
|
||||
elapsed = asyncio.get_event_loop().time() - start_time
|
||||
logger.error(
|
||||
@@ -176,6 +180,34 @@ async def _execute_graph(**kwargs):
|
||||
)
|
||||
|
||||
|
||||
async def _handle_graph_validation_error(args: "GraphExecutionJobArgs") -> None:
|
||||
logger.error(
|
||||
f"Scheduled Graph {args.graph_id} failed validation. Unscheduling graph"
|
||||
)
|
||||
if args.schedule_id:
|
||||
scheduler_client = get_scheduler_client()
|
||||
await scheduler_client.delete_schedule(
|
||||
schedule_id=args.schedule_id,
|
||||
user_id=args.user_id,
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"Unable to unschedule graph: {args.graph_id} as this is an old job with no associated schedule_id please remove manually"
|
||||
)
|
||||
|
||||
|
||||
async def _handle_graph_not_available(
|
||||
e: Exception, args: "GraphExecutionJobArgs", start_time: float
|
||||
) -> None:
|
||||
elapsed = asyncio.get_event_loop().time() - start_time
|
||||
logger.warning(
|
||||
f"Scheduled execution blocked for deleted/archived graph {args.graph_id} "
|
||||
f"(user {args.user_id}) after {elapsed:.2f}s: {e}"
|
||||
)
|
||||
# Clean up orphaned schedules for this graph
|
||||
await _cleanup_orphaned_schedules_for_graph(args.graph_id, args.user_id)
|
||||
|
||||
|
||||
async def _cleanup_orphaned_schedules_for_graph(graph_id: str, user_id: str) -> None:
|
||||
"""
|
||||
Clean up orphaned schedules for a specific graph when execution fails with GraphNotAccessibleError.
|
||||
@@ -210,6 +242,11 @@ def cleanup_expired_files():
|
||||
run_async(cleanup_expired_files_async())
|
||||
|
||||
|
||||
def execution_accuracy_alerts():
|
||||
"""Check execution accuracy and send alerts if drops are detected."""
|
||||
return report_execution_accuracy_alerts()
|
||||
|
||||
|
||||
# Monitoring functions are now imported from monitoring module
|
||||
|
||||
|
||||
@@ -220,9 +257,11 @@ class Jobstores(Enum):
|
||||
|
||||
|
||||
class GraphExecutionJobArgs(BaseModel):
|
||||
schedule_id: str | None = None
|
||||
user_id: str
|
||||
graph_id: str
|
||||
graph_version: int
|
||||
agent_name: str | None = None
|
||||
cron: str
|
||||
input_data: BlockInput
|
||||
input_credentials: dict[str, CredentialsMetaInput] = Field(default_factory=dict)
|
||||
@@ -407,6 +446,17 @@ class Scheduler(AppService):
|
||||
jobstore=Jobstores.EXECUTION.value,
|
||||
)
|
||||
|
||||
# Execution Accuracy Monitoring - configurable interval
|
||||
self.scheduler.add_job(
|
||||
execution_accuracy_alerts,
|
||||
id="report_execution_accuracy_alerts",
|
||||
trigger="interval",
|
||||
replace_existing=True,
|
||||
seconds=config.execution_accuracy_check_interval_hours
|
||||
* 3600, # Convert hours to seconds
|
||||
jobstore=Jobstores.EXECUTION.value,
|
||||
)
|
||||
|
||||
self.scheduler.add_listener(job_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
|
||||
self.scheduler.add_listener(job_missed_listener, EVENT_JOB_MISSED)
|
||||
self.scheduler.add_listener(job_max_instances_listener, EVENT_JOB_MAX_INSTANCES)
|
||||
@@ -468,11 +518,14 @@ class Scheduler(AppService):
|
||||
logger.info(
|
||||
f"Scheduling job for user {user_id} with timezone {user_timezone} (cron: {cron})"
|
||||
)
|
||||
schedule_id = str(uuid.uuid4())
|
||||
|
||||
job_args = GraphExecutionJobArgs(
|
||||
schedule_id=schedule_id,
|
||||
user_id=user_id,
|
||||
graph_id=graph_id,
|
||||
graph_version=graph_version,
|
||||
agent_name=name,
|
||||
cron=cron,
|
||||
input_data=input_data,
|
||||
input_credentials=input_credentials,
|
||||
@@ -484,6 +537,7 @@ class Scheduler(AppService):
|
||||
trigger=CronTrigger.from_crontab(cron, timezone=user_timezone),
|
||||
jobstore=Jobstores.EXECUTION.value,
|
||||
replace_existing=True,
|
||||
id=schedule_id,
|
||||
)
|
||||
logger.info(
|
||||
f"Added job {job.id} with cron schedule '{cron}' in timezone {user_timezone}, input data: {input_data}"
|
||||
@@ -550,6 +604,11 @@ class Scheduler(AppService):
|
||||
"""Manually trigger cleanup of expired cloud storage files."""
|
||||
return cleanup_expired_files()
|
||||
|
||||
@expose
|
||||
def execute_report_execution_accuracy_alerts(self):
|
||||
"""Manually trigger execution accuracy alert checking."""
|
||||
return execution_accuracy_alerts()
|
||||
|
||||
|
||||
class SchedulerClient(AppServiceClient):
|
||||
@classmethod
|
||||
|
||||
@@ -10,6 +10,7 @@ from pydantic import BaseModel, JsonValue, ValidationError
|
||||
|
||||
from backend.data import execution as execution_db
|
||||
from backend.data import graph as graph_db
|
||||
from backend.data import user as user_db
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCostType,
|
||||
@@ -24,55 +25,32 @@ from backend.data.db import prisma
|
||||
# Import dynamic field utilities from centralized location
|
||||
from backend.data.dynamic_fields import merge_execution_input
|
||||
from backend.data.execution import (
|
||||
ExecutionContext,
|
||||
ExecutionStatus,
|
||||
GraphExecutionMeta,
|
||||
GraphExecutionStats,
|
||||
GraphExecutionWithNodes,
|
||||
NodesInputMasks,
|
||||
UserContext,
|
||||
get_graph_execution,
|
||||
)
|
||||
from backend.data.graph import GraphModel, Node
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.data.model import USER_TIMEZONE_NOT_SET, CredentialsMetaInput
|
||||
from backend.data.rabbitmq import Exchange, ExchangeType, Queue, RabbitMQConfig
|
||||
from backend.data.user import get_user_by_id
|
||||
from backend.util.cache import cached
|
||||
from backend.util.clients import (
|
||||
get_async_execution_event_bus,
|
||||
get_async_execution_queue,
|
||||
get_database_manager_async_client,
|
||||
get_integration_credentials_store,
|
||||
)
|
||||
from backend.util.exceptions import GraphValidationError, NotFoundError
|
||||
from backend.util.exceptions import (
|
||||
GraphNotFoundError,
|
||||
GraphValidationError,
|
||||
NotFoundError,
|
||||
)
|
||||
from backend.util.logging import TruncatedLogger, is_structured_logging_enabled
|
||||
from backend.util.settings import Config
|
||||
from backend.util.type import convert
|
||||
|
||||
|
||||
@cached(maxsize=1000, ttl_seconds=3600)
|
||||
async def get_user_context(user_id: str) -> UserContext:
|
||||
"""
|
||||
Get UserContext for a user, always returns a valid context with timezone.
|
||||
Defaults to UTC if user has no timezone set.
|
||||
"""
|
||||
user_context = UserContext(timezone="UTC") # Default to UTC
|
||||
try:
|
||||
if prisma.is_connected():
|
||||
user = await get_user_by_id(user_id)
|
||||
else:
|
||||
user = await get_database_manager_async_client().get_user_by_id(user_id)
|
||||
|
||||
if user and user.timezone and user.timezone != "not-set":
|
||||
user_context.timezone = user.timezone
|
||||
logger.debug(f"Retrieved user context: timezone={user.timezone}")
|
||||
else:
|
||||
logger.debug("User has no timezone set, using UTC")
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not fetch user timezone: {e}")
|
||||
# Continue with UTC as default
|
||||
|
||||
return user_context
|
||||
|
||||
|
||||
config = Config()
|
||||
logger = TruncatedLogger(logging.getLogger(__name__), prefix="[GraphExecutorUtil]")
|
||||
|
||||
@@ -490,7 +468,6 @@ async def validate_and_construct_node_execution_input(
|
||||
graph_version: The version of the graph to use.
|
||||
graph_credentials_inputs: Credentials inputs to use.
|
||||
nodes_input_masks: Node inputs to use.
|
||||
is_sub_graph: Whether this is a sub-graph execution.
|
||||
|
||||
Returns:
|
||||
GraphModel: Full graph object for the given `graph_id`.
|
||||
@@ -516,7 +493,7 @@ async def validate_and_construct_node_execution_input(
|
||||
skip_access_check=True,
|
||||
)
|
||||
if not graph:
|
||||
raise NotFoundError(f"Graph #{graph_id} not found.")
|
||||
raise GraphNotFoundError(f"Graph #{graph_id} not found.")
|
||||
|
||||
# Validate that the user has permission to execute this graph
|
||||
# This checks both library membership and execution permissions,
|
||||
@@ -758,8 +735,8 @@ async def add_graph_execution(
|
||||
graph_version: Optional[int] = None,
|
||||
graph_credentials_inputs: Optional[Mapping[str, CredentialsMetaInput]] = None,
|
||||
nodes_input_masks: Optional[NodesInputMasks] = None,
|
||||
parent_graph_exec_id: Optional[str] = None,
|
||||
is_sub_graph: bool = False,
|
||||
execution_context: Optional[ExecutionContext] = None,
|
||||
graph_exec_id: Optional[str] = None,
|
||||
) -> GraphExecutionWithNodes:
|
||||
"""
|
||||
Adds a graph execution to the queue and returns the execution entry.
|
||||
@@ -774,33 +751,54 @@ async def add_graph_execution(
|
||||
Keys should map to the keys generated by `GraphModel.aggregate_credentials_inputs`.
|
||||
nodes_input_masks: Node inputs to use in the execution.
|
||||
parent_graph_exec_id: The ID of the parent graph execution (for nested executions).
|
||||
is_sub_graph: Whether this is a sub-graph execution.
|
||||
graph_exec_id: If provided, resume this existing execution instead of creating a new one.
|
||||
Returns:
|
||||
GraphExecutionEntry: The entry for the graph execution.
|
||||
Raises:
|
||||
ValueError: If the graph is not found or if there are validation errors.
|
||||
NotFoundError: If graph_exec_id is provided but execution is not found.
|
||||
"""
|
||||
if prisma.is_connected():
|
||||
edb = execution_db
|
||||
udb = user_db
|
||||
gdb = graph_db
|
||||
else:
|
||||
edb = get_database_manager_async_client()
|
||||
edb = udb = gdb = get_database_manager_async_client()
|
||||
|
||||
graph, starting_nodes_input, compiled_nodes_input_masks = (
|
||||
await validate_and_construct_node_execution_input(
|
||||
graph_id=graph_id,
|
||||
# Get or create the graph execution
|
||||
if graph_exec_id:
|
||||
# Resume existing execution
|
||||
graph_exec = await get_graph_execution(
|
||||
user_id=user_id,
|
||||
graph_inputs=inputs or {},
|
||||
graph_version=graph_version,
|
||||
graph_credentials_inputs=graph_credentials_inputs,
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
is_sub_graph=is_sub_graph,
|
||||
execution_id=graph_exec_id,
|
||||
include_node_executions=True,
|
||||
)
|
||||
|
||||
if not graph_exec:
|
||||
raise NotFoundError(f"Graph execution #{graph_exec_id} not found.")
|
||||
|
||||
# Use existing execution's compiled input masks
|
||||
compiled_nodes_input_masks = graph_exec.nodes_input_masks or {}
|
||||
|
||||
logger.info(f"Resuming graph execution #{graph_exec.id} for graph #{graph_id}")
|
||||
else:
|
||||
parent_exec_id = (
|
||||
execution_context.parent_execution_id if execution_context else None
|
||||
)
|
||||
|
||||
# Create new execution
|
||||
graph, starting_nodes_input, compiled_nodes_input_masks = (
|
||||
await validate_and_construct_node_execution_input(
|
||||
graph_id=graph_id,
|
||||
user_id=user_id,
|
||||
graph_inputs=inputs or {},
|
||||
graph_version=graph_version,
|
||||
graph_credentials_inputs=graph_credentials_inputs,
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
is_sub_graph=parent_exec_id is not None,
|
||||
)
|
||||
)
|
||||
)
|
||||
graph_exec = None
|
||||
|
||||
try:
|
||||
# Sanity check: running add_graph_execution with the properties of
|
||||
# the graph_exec created here should create the same execution again.
|
||||
graph_exec = await edb.create_graph_execution(
|
||||
user_id=user_id,
|
||||
graph_id=graph_id,
|
||||
@@ -810,20 +808,38 @@ async def add_graph_execution(
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
starting_nodes_input=starting_nodes_input,
|
||||
preset_id=preset_id,
|
||||
parent_graph_exec_id=parent_graph_exec_id,
|
||||
parent_graph_exec_id=parent_exec_id,
|
||||
)
|
||||
|
||||
graph_exec_entry = graph_exec.to_graph_execution_entry(
|
||||
user_context=await get_user_context(user_id),
|
||||
compiled_nodes_input_masks=compiled_nodes_input_masks,
|
||||
parent_graph_exec_id=parent_graph_exec_id,
|
||||
)
|
||||
logger.info(
|
||||
f"Created graph execution #{graph_exec.id} for graph "
|
||||
f"#{graph_id} with {len(starting_nodes_input)} starting nodes. "
|
||||
f"Now publishing to execution queue."
|
||||
f"#{graph_id} with {len(starting_nodes_input)} starting nodes"
|
||||
)
|
||||
|
||||
# Generate execution context if it's not provided
|
||||
if execution_context is None:
|
||||
user = await udb.get_user_by_id(user_id)
|
||||
settings = await gdb.get_graph_settings(user_id=user_id, graph_id=graph_id)
|
||||
|
||||
execution_context = ExecutionContext(
|
||||
safe_mode=(
|
||||
settings.human_in_the_loop_safe_mode
|
||||
if settings.human_in_the_loop_safe_mode is not None
|
||||
else True
|
||||
),
|
||||
user_timezone=(
|
||||
user.timezone if user.timezone != USER_TIMEZONE_NOT_SET else "UTC"
|
||||
),
|
||||
root_execution_id=graph_exec.id,
|
||||
)
|
||||
|
||||
try:
|
||||
graph_exec_entry = graph_exec.to_graph_execution_entry(
|
||||
compiled_nodes_input_masks=compiled_nodes_input_masks,
|
||||
execution_context=execution_context,
|
||||
)
|
||||
logger.info(f"Publishing execution {graph_exec.id} to execution queue")
|
||||
|
||||
exec_queue = await get_async_execution_queue()
|
||||
await exec_queue.publish_message(
|
||||
routing_key=GRAPH_EXECUTION_ROUTING_KEY,
|
||||
|
||||
@@ -111,6 +111,35 @@ def test_parse_execution_output():
|
||||
parse_execution_output(output, "result_@_attr_$_0_#_key") is None
|
||||
) # Should fail at @_attr
|
||||
|
||||
# Test case 7: Tool pin routing with matching node ID and pin name
|
||||
output = ("tools_^_node123_~_query", "search term")
|
||||
assert parse_execution_output(output, "tools", "node123", "query") == "search term"
|
||||
|
||||
# Test case 8: Tool pin routing with node ID mismatch
|
||||
output = ("tools_^_node123_~_query", "search term")
|
||||
assert parse_execution_output(output, "tools", "node456", "query") is None
|
||||
|
||||
# Test case 9: Tool pin routing with pin name mismatch
|
||||
output = ("tools_^_node123_~_query", "search term")
|
||||
assert parse_execution_output(output, "tools", "node123", "different_pin") is None
|
||||
|
||||
# Test case 10: Tool pin routing with complex field names
|
||||
output = ("tools_^_node789_~_nested_field", {"key": "value"})
|
||||
result = parse_execution_output(output, "tools", "node789", "nested_field")
|
||||
assert result == {"key": "value"}
|
||||
|
||||
# Test case 11: Tool pin routing missing required parameters should raise error
|
||||
output = ("tools_^_node123_~_query", "search term")
|
||||
try:
|
||||
parse_execution_output(output, "tools", "node123") # Missing sink_pin_name
|
||||
assert False, "Should have raised ValueError"
|
||||
except ValueError as e:
|
||||
assert "must be provided for tool pin routing" in str(e)
|
||||
|
||||
# Test case 12: Non-tool pin with similar pattern should use normal logic
|
||||
output = ("tools_^_node123_~_query", "search term")
|
||||
assert parse_execution_output(output, "different_name", "node123", "query") is None
|
||||
|
||||
|
||||
def test_merge_execution_input():
|
||||
# Test case for basic list extraction
|
||||
@@ -319,9 +348,6 @@ async def test_add_graph_execution_is_repeatable(mocker: MockerFixture):
|
||||
mock_graph_exec.node_executions = [] # Add this to avoid AttributeError
|
||||
mock_graph_exec.to_graph_execution_entry.return_value = mocker.MagicMock()
|
||||
|
||||
# Mock user context
|
||||
mock_user_context = {"user_id": user_id, "context": "test_context"}
|
||||
|
||||
# Mock the queue and event bus
|
||||
mock_queue = mocker.AsyncMock()
|
||||
mock_event_bus = mocker.MagicMock()
|
||||
@@ -333,7 +359,8 @@ async def test_add_graph_execution_is_repeatable(mocker: MockerFixture):
|
||||
)
|
||||
mock_edb = mocker.patch("backend.executor.utils.execution_db")
|
||||
mock_prisma = mocker.patch("backend.executor.utils.prisma")
|
||||
mock_get_user_context = mocker.patch("backend.executor.utils.get_user_context")
|
||||
mock_udb = mocker.patch("backend.executor.utils.user_db")
|
||||
mock_gdb = mocker.patch("backend.executor.utils.graph_db")
|
||||
mock_get_queue = mocker.patch("backend.executor.utils.get_async_execution_queue")
|
||||
mock_get_event_bus = mocker.patch(
|
||||
"backend.executor.utils.get_async_execution_event_bus"
|
||||
@@ -351,7 +378,14 @@ async def test_add_graph_execution_is_repeatable(mocker: MockerFixture):
|
||||
return_value=mock_graph_exec
|
||||
)
|
||||
mock_edb.update_node_execution_status_batch = mocker.AsyncMock()
|
||||
mock_get_user_context.return_value = mock_user_context
|
||||
# Mock user and settings data
|
||||
mock_user = mocker.MagicMock()
|
||||
mock_user.timezone = "UTC"
|
||||
mock_settings = mocker.MagicMock()
|
||||
mock_settings.human_in_the_loop_safe_mode = True
|
||||
|
||||
mock_udb.get_user_by_id = mocker.AsyncMock(return_value=mock_user)
|
||||
mock_gdb.get_graph_settings = mocker.AsyncMock(return_value=mock_settings)
|
||||
mock_get_queue.return_value = mock_queue
|
||||
mock_get_event_bus.return_value = mock_event_bus
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ from backend.data.model import (
|
||||
OAuth2Credentials,
|
||||
OAuthState,
|
||||
UserIntegrations,
|
||||
UserPasswordCredentials,
|
||||
)
|
||||
from backend.data.redis_client import get_redis_async
|
||||
from backend.util.settings import Settings
|
||||
@@ -207,6 +208,14 @@ v0_credentials = APIKeyCredentials(
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
webshare_proxy_credentials = UserPasswordCredentials(
|
||||
id="a5b3c7d9-2e4f-4a6b-8c1d-9e0f1a2b3c4d",
|
||||
provider="webshare_proxy",
|
||||
username=SecretStr(settings.secrets.webshare_proxy_username),
|
||||
password=SecretStr(settings.secrets.webshare_proxy_password),
|
||||
title="Use Credits for Webshare Proxy",
|
||||
)
|
||||
|
||||
DEFAULT_CREDENTIALS = [
|
||||
ollama_credentials,
|
||||
revid_credentials,
|
||||
@@ -233,6 +242,7 @@ DEFAULT_CREDENTIALS = [
|
||||
google_maps_credentials,
|
||||
llama_api_credentials,
|
||||
v0_credentials,
|
||||
webshare_proxy_credentials,
|
||||
]
|
||||
|
||||
|
||||
@@ -321,6 +331,11 @@ class IntegrationCredentialsStore:
|
||||
all_credentials.append(zerobounce_credentials)
|
||||
if settings.secrets.google_maps_api_key:
|
||||
all_credentials.append(google_maps_credentials)
|
||||
if (
|
||||
settings.secrets.webshare_proxy_username
|
||||
and settings.secrets.webshare_proxy_password
|
||||
):
|
||||
all_credentials.append(webshare_proxy_credentials)
|
||||
return all_credentials
|
||||
|
||||
async def get_creds_by_id(
|
||||
@@ -399,7 +414,15 @@ class IntegrationCredentialsStore:
|
||||
# ===================== OAUTH STATES ===================== #
|
||||
|
||||
async def store_state_token(
|
||||
self, user_id: str, provider: str, scopes: list[str], use_pkce: bool = False
|
||||
self,
|
||||
user_id: str,
|
||||
provider: str,
|
||||
scopes: list[str],
|
||||
use_pkce: bool = False,
|
||||
# New parameters for external API OAuth flows
|
||||
callback_url: Optional[str] = None,
|
||||
state_metadata: Optional[dict] = None,
|
||||
initiated_by_api_key_id: Optional[str] = None,
|
||||
) -> tuple[str, str]:
|
||||
token = secrets.token_urlsafe(32)
|
||||
expires_at = datetime.now(timezone.utc) + timedelta(minutes=10)
|
||||
@@ -412,6 +435,10 @@ class IntegrationCredentialsStore:
|
||||
code_verifier=code_verifier,
|
||||
expires_at=int(expires_at.timestamp()),
|
||||
scopes=scopes,
|
||||
# External API OAuth flow fields
|
||||
callback_url=callback_url,
|
||||
state_metadata=state_metadata or {},
|
||||
initiated_by_api_key_id=initiated_by_api_key_id,
|
||||
)
|
||||
|
||||
async with self.edit_user_integrations(user_id) as user_integrations:
|
||||
|
||||
@@ -49,6 +49,7 @@ class ProviderName(str, Enum):
|
||||
TODOIST = "todoist"
|
||||
UNREAL_SPEECH = "unreal_speech"
|
||||
V0 = "v0"
|
||||
WEBSHARE_PROXY = "webshare_proxy"
|
||||
ZEROBOUNCE = "zerobounce"
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -18,7 +18,9 @@ class ManualWebhookManagerBase(BaseWebhooksManager[WT]):
|
||||
ingress_url: str,
|
||||
secret: str,
|
||||
) -> tuple[str, dict]:
|
||||
print(ingress_url) # FIXME: pass URL to user in front end
|
||||
# TODO: pass ingress_url to user in frontend
|
||||
# See: https://github.com/Significant-Gravitas/AutoGPT/issues/8537
|
||||
logger.debug(f"Manual webhook registered with ingress URL: {ingress_url}")
|
||||
|
||||
return "", {}
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"""Monitoring module for platform health and alerting."""
|
||||
|
||||
from .accuracy_monitor import AccuracyMonitor, report_execution_accuracy_alerts
|
||||
from .block_error_monitor import BlockErrorMonitor, report_block_error_rates
|
||||
from .late_execution_monitor import (
|
||||
LateExecutionException,
|
||||
@@ -13,10 +14,12 @@ from .notification_monitor import (
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"AccuracyMonitor",
|
||||
"BlockErrorMonitor",
|
||||
"LateExecutionMonitor",
|
||||
"LateExecutionException",
|
||||
"NotificationJobArgs",
|
||||
"report_execution_accuracy_alerts",
|
||||
"report_block_error_rates",
|
||||
"report_late_executions",
|
||||
"process_existing_batches",
|
||||
|
||||
107
autogpt_platform/backend/backend/monitoring/accuracy_monitor.py
Normal file
107
autogpt_platform/backend/backend/monitoring/accuracy_monitor.py
Normal file
@@ -0,0 +1,107 @@
|
||||
"""Execution accuracy monitoring module."""
|
||||
|
||||
import logging
|
||||
|
||||
from backend.util.clients import (
|
||||
get_database_manager_client,
|
||||
get_notification_manager_client,
|
||||
)
|
||||
from backend.util.metrics import DiscordChannel, sentry_capture_error
|
||||
from backend.util.settings import Config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
config = Config()
|
||||
|
||||
|
||||
class AccuracyMonitor:
|
||||
"""Monitor execution accuracy trends and send alerts for drops."""
|
||||
|
||||
def __init__(self, drop_threshold: float = 10.0):
|
||||
self.config = config
|
||||
self.notification_client = get_notification_manager_client()
|
||||
self.database_client = get_database_manager_client()
|
||||
self.drop_threshold = drop_threshold
|
||||
|
||||
def check_execution_accuracy_alerts(self) -> str:
|
||||
"""Check marketplace agents for accuracy drops and send alerts."""
|
||||
try:
|
||||
logger.info("Checking execution accuracy for marketplace agents")
|
||||
|
||||
# Get marketplace graphs using database client
|
||||
graphs = self.database_client.get_marketplace_graphs_for_monitoring(
|
||||
days_back=30, min_executions=10
|
||||
)
|
||||
|
||||
alerts_found = 0
|
||||
|
||||
for graph_data in graphs:
|
||||
result = self.database_client.get_accuracy_trends_and_alerts(
|
||||
graph_id=graph_data.graph_id,
|
||||
user_id=graph_data.user_id,
|
||||
days_back=21, # 3 weeks
|
||||
drop_threshold=self.drop_threshold,
|
||||
)
|
||||
|
||||
if result.alert:
|
||||
alert = result.alert
|
||||
|
||||
# Get graph details for better alert info
|
||||
try:
|
||||
graph_info = self.database_client.get_graph_metadata(
|
||||
graph_id=alert.graph_id
|
||||
)
|
||||
graph_name = graph_info.name if graph_info else "Unknown Agent"
|
||||
except Exception:
|
||||
graph_name = "Unknown Agent"
|
||||
|
||||
# Create detailed alert message
|
||||
alert_msg = (
|
||||
f"🚨 **AGENT ACCURACY DROP DETECTED**\n\n"
|
||||
f"**Agent:** {graph_name}\n"
|
||||
f"**Graph ID:** `{alert.graph_id}`\n"
|
||||
f"**Accuracy Drop:** {alert.drop_percent:.1f}%\n"
|
||||
f"**Recent Performance:**\n"
|
||||
f" • 3-day average: {alert.three_day_avg:.1f}%\n"
|
||||
f" • 7-day average: {alert.seven_day_avg:.1f}%\n"
|
||||
)
|
||||
|
||||
if alert.user_id:
|
||||
alert_msg += f"**Owner:** {alert.user_id}\n"
|
||||
|
||||
# Send individual alert for each agent (not batched)
|
||||
self.notification_client.discord_system_alert(
|
||||
alert_msg, DiscordChannel.PRODUCT
|
||||
)
|
||||
alerts_found += 1
|
||||
logger.warning(
|
||||
f"Sent accuracy alert for agent: {graph_name} ({alert.graph_id})"
|
||||
)
|
||||
|
||||
if alerts_found > 0:
|
||||
return f"Alert sent for {alerts_found} agents with accuracy drops"
|
||||
|
||||
logger.info("No execution accuracy alerts detected")
|
||||
return "No accuracy alerts detected"
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(f"Error checking execution accuracy alerts: {e}")
|
||||
|
||||
error = Exception(f"Error checking execution accuracy alerts: {e}")
|
||||
msg = str(error)
|
||||
sentry_capture_error(error)
|
||||
self.notification_client.discord_system_alert(msg, DiscordChannel.PRODUCT)
|
||||
return msg
|
||||
|
||||
|
||||
def report_execution_accuracy_alerts(drop_threshold: float = 10.0) -> str:
|
||||
"""
|
||||
Check execution accuracy and send alerts if drops are detected.
|
||||
|
||||
Args:
|
||||
drop_threshold: Percentage drop threshold to trigger alerts (default 10.0%)
|
||||
|
||||
Returns:
|
||||
Status message indicating results of the check
|
||||
"""
|
||||
monitor = AccuracyMonitor(drop_threshold=drop_threshold)
|
||||
return monitor.check_execution_accuracy_alerts()
|
||||
@@ -143,6 +143,9 @@ def instrument_fastapi(
|
||||
)
|
||||
|
||||
# Create instrumentator with default metrics
|
||||
# Use service-specific inprogress_name to avoid duplicate registration
|
||||
# when multiple FastAPI apps are instrumented in the same process
|
||||
service_subsystem = service_name.replace("-", "_")
|
||||
instrumentator = Instrumentator(
|
||||
should_group_status_codes=True,
|
||||
should_ignore_untemplated=True,
|
||||
@@ -150,7 +153,7 @@ def instrument_fastapi(
|
||||
should_instrument_requests_inprogress=True,
|
||||
excluded_handlers=excluded_handlers or ["/health", "/readiness"],
|
||||
env_var_name="ENABLE_METRICS",
|
||||
inprogress_name="autogpt_http_requests_inprogress",
|
||||
inprogress_name=f"autogpt_{service_subsystem}_http_requests_inprogress",
|
||||
inprogress_labels=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -3,6 +3,8 @@ from fastapi import FastAPI
|
||||
from backend.monitoring.instrumentation import instrument_fastapi
|
||||
from backend.server.middleware.security import SecurityHeadersMiddleware
|
||||
|
||||
from .routes.integrations import integrations_router
|
||||
from .routes.tools import tools_router
|
||||
from .routes.v1 import v1_router
|
||||
|
||||
external_app = FastAPI(
|
||||
@@ -14,6 +16,8 @@ external_app = FastAPI(
|
||||
|
||||
external_app.add_middleware(SecurityHeadersMiddleware)
|
||||
external_app.include_router(v1_router, prefix="/v1")
|
||||
external_app.include_router(tools_router, prefix="/v1")
|
||||
external_app.include_router(integrations_router, prefix="/v1")
|
||||
|
||||
# Add Prometheus instrumentation
|
||||
instrument_fastapi(
|
||||
|
||||
650
autogpt_platform/backend/backend/server/external/routes/integrations.py
vendored
Normal file
650
autogpt_platform/backend/backend/server/external/routes/integrations.py
vendored
Normal file
@@ -0,0 +1,650 @@
|
||||
"""
|
||||
External API endpoints for integrations and credentials.
|
||||
|
||||
This module provides endpoints for external applications (like Autopilot) to:
|
||||
- Initiate OAuth flows with custom callback URLs
|
||||
- Complete OAuth flows by exchanging authorization codes
|
||||
- Create API key, user/password, and host-scoped credentials
|
||||
- List and manage user credentials
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Annotated, Any, Literal, Optional, Union
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from fastapi import APIRouter, Body, HTTPException, Path, Security, status
|
||||
from prisma.enums import APIKeyPermission
|
||||
from pydantic import BaseModel, Field, SecretStr
|
||||
|
||||
from backend.data.api_key import APIKeyInfo
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
Credentials,
|
||||
CredentialsType,
|
||||
HostScopedCredentials,
|
||||
OAuth2Credentials,
|
||||
UserPasswordCredentials,
|
||||
)
|
||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.integrations.oauth import CREDENTIALS_BY_PROVIDER, HANDLERS_BY_NAME
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.server.external.middleware import require_permission
|
||||
from backend.server.integrations.models import get_all_provider_names
|
||||
from backend.util.settings import Settings
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.integrations.oauth import BaseOAuthHandler
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
settings = Settings()
|
||||
creds_manager = IntegrationCredentialsManager()
|
||||
|
||||
integrations_router = APIRouter(prefix="/integrations", tags=["integrations"])
|
||||
|
||||
|
||||
# ==================== Request/Response Models ==================== #
|
||||
|
||||
|
||||
class OAuthInitiateRequest(BaseModel):
|
||||
"""Request model for initiating an OAuth flow."""
|
||||
|
||||
callback_url: str = Field(
|
||||
..., description="The external app's callback URL for OAuth redirect"
|
||||
)
|
||||
scopes: list[str] = Field(
|
||||
default_factory=list, description="OAuth scopes to request"
|
||||
)
|
||||
state_metadata: dict[str, Any] = Field(
|
||||
default_factory=dict,
|
||||
description="Arbitrary metadata to echo back on completion",
|
||||
)
|
||||
|
||||
|
||||
class OAuthInitiateResponse(BaseModel):
|
||||
"""Response model for OAuth initiation."""
|
||||
|
||||
login_url: str = Field(..., description="URL to redirect user for OAuth consent")
|
||||
state_token: str = Field(..., description="State token for CSRF protection")
|
||||
expires_at: int = Field(
|
||||
..., description="Unix timestamp when the state token expires"
|
||||
)
|
||||
|
||||
|
||||
class OAuthCompleteRequest(BaseModel):
|
||||
"""Request model for completing an OAuth flow."""
|
||||
|
||||
code: str = Field(..., description="Authorization code from OAuth provider")
|
||||
state_token: str = Field(..., description="State token from initiate request")
|
||||
|
||||
|
||||
class OAuthCompleteResponse(BaseModel):
|
||||
"""Response model for OAuth completion."""
|
||||
|
||||
credentials_id: str = Field(..., description="ID of the stored credentials")
|
||||
provider: str = Field(..., description="Provider name")
|
||||
type: str = Field(..., description="Credential type (oauth2)")
|
||||
title: Optional[str] = Field(None, description="Credential title")
|
||||
scopes: list[str] = Field(default_factory=list, description="Granted scopes")
|
||||
username: Optional[str] = Field(None, description="Username from provider")
|
||||
state_metadata: dict[str, Any] = Field(
|
||||
default_factory=dict, description="Echoed metadata from initiate request"
|
||||
)
|
||||
|
||||
|
||||
class CredentialSummary(BaseModel):
|
||||
"""Summary of a credential without sensitive data."""
|
||||
|
||||
id: str
|
||||
provider: str
|
||||
type: CredentialsType
|
||||
title: Optional[str] = None
|
||||
scopes: Optional[list[str]] = None
|
||||
username: Optional[str] = None
|
||||
host: Optional[str] = None
|
||||
|
||||
|
||||
class ProviderInfo(BaseModel):
|
||||
"""Information about an integration provider."""
|
||||
|
||||
name: str
|
||||
supports_oauth: bool = False
|
||||
supports_api_key: bool = False
|
||||
supports_user_password: bool = False
|
||||
supports_host_scoped: bool = False
|
||||
default_scopes: list[str] = Field(default_factory=list)
|
||||
|
||||
|
||||
# ==================== Credential Creation Models ==================== #
|
||||
|
||||
|
||||
class CreateAPIKeyCredentialRequest(BaseModel):
|
||||
"""Request model for creating API key credentials."""
|
||||
|
||||
type: Literal["api_key"] = "api_key"
|
||||
api_key: str = Field(..., description="The API key")
|
||||
title: str = Field(..., description="A name for this credential")
|
||||
expires_at: Optional[int] = Field(
|
||||
None, description="Unix timestamp when the API key expires"
|
||||
)
|
||||
|
||||
|
||||
class CreateUserPasswordCredentialRequest(BaseModel):
|
||||
"""Request model for creating username/password credentials."""
|
||||
|
||||
type: Literal["user_password"] = "user_password"
|
||||
username: str = Field(..., description="Username")
|
||||
password: str = Field(..., description="Password")
|
||||
title: str = Field(..., description="A name for this credential")
|
||||
|
||||
|
||||
class CreateHostScopedCredentialRequest(BaseModel):
|
||||
"""Request model for creating host-scoped credentials."""
|
||||
|
||||
type: Literal["host_scoped"] = "host_scoped"
|
||||
host: str = Field(..., description="Host/domain pattern to match")
|
||||
headers: dict[str, str] = Field(..., description="Headers to include in requests")
|
||||
title: str = Field(..., description="A name for this credential")
|
||||
|
||||
|
||||
# Union type for credential creation
|
||||
CreateCredentialRequest = Annotated[
|
||||
CreateAPIKeyCredentialRequest
|
||||
| CreateUserPasswordCredentialRequest
|
||||
| CreateHostScopedCredentialRequest,
|
||||
Field(discriminator="type"),
|
||||
]
|
||||
|
||||
|
||||
class CreateCredentialResponse(BaseModel):
|
||||
"""Response model for credential creation."""
|
||||
|
||||
id: str
|
||||
provider: str
|
||||
type: CredentialsType
|
||||
title: Optional[str] = None
|
||||
|
||||
|
||||
# ==================== Helper Functions ==================== #
|
||||
|
||||
|
||||
def validate_callback_url(callback_url: str) -> bool:
|
||||
"""Validate that the callback URL is from an allowed origin."""
|
||||
allowed_origins = settings.config.external_oauth_callback_origins
|
||||
|
||||
try:
|
||||
parsed = urlparse(callback_url)
|
||||
callback_origin = f"{parsed.scheme}://{parsed.netloc}"
|
||||
|
||||
for allowed in allowed_origins:
|
||||
# Simple origin matching
|
||||
if callback_origin == allowed:
|
||||
return True
|
||||
|
||||
# Allow localhost with any port in development (proper hostname check)
|
||||
if parsed.hostname == "localhost":
|
||||
for allowed in allowed_origins:
|
||||
allowed_parsed = urlparse(allowed)
|
||||
if allowed_parsed.hostname == "localhost":
|
||||
return True
|
||||
|
||||
return False
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _get_oauth_handler_for_external(
|
||||
provider_name: str, redirect_uri: str
|
||||
) -> "BaseOAuthHandler":
|
||||
"""Get an OAuth handler configured with an external redirect URI."""
|
||||
# Ensure blocks are loaded so SDK providers are available
|
||||
try:
|
||||
from backend.blocks import load_all_blocks
|
||||
|
||||
load_all_blocks()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to load blocks: {e}")
|
||||
|
||||
if provider_name not in HANDLERS_BY_NAME:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Provider '{provider_name}' does not support OAuth",
|
||||
)
|
||||
|
||||
# Check if this provider has custom OAuth credentials
|
||||
oauth_credentials = CREDENTIALS_BY_PROVIDER.get(provider_name)
|
||||
|
||||
if oauth_credentials and not oauth_credentials.use_secrets:
|
||||
import os
|
||||
|
||||
client_id = (
|
||||
os.getenv(oauth_credentials.client_id_env_var)
|
||||
if oauth_credentials.client_id_env_var
|
||||
else None
|
||||
)
|
||||
client_secret = (
|
||||
os.getenv(oauth_credentials.client_secret_env_var)
|
||||
if oauth_credentials.client_secret_env_var
|
||||
else None
|
||||
)
|
||||
else:
|
||||
client_id = getattr(settings.secrets, f"{provider_name}_client_id", None)
|
||||
client_secret = getattr(
|
||||
settings.secrets, f"{provider_name}_client_secret", None
|
||||
)
|
||||
|
||||
if not (client_id and client_secret):
|
||||
logger.error(f"Attempt to use unconfigured {provider_name} OAuth integration")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_501_NOT_IMPLEMENTED,
|
||||
detail={
|
||||
"message": f"Integration with provider '{provider_name}' is not configured.",
|
||||
"hint": "Set client ID and secret in the application's deployment environment",
|
||||
},
|
||||
)
|
||||
|
||||
handler_class = HANDLERS_BY_NAME[provider_name]
|
||||
return handler_class(
|
||||
client_id=client_id,
|
||||
client_secret=client_secret,
|
||||
redirect_uri=redirect_uri,
|
||||
)
|
||||
|
||||
|
||||
# ==================== Endpoints ==================== #
|
||||
|
||||
|
||||
@integrations_router.get("/providers", response_model=list[ProviderInfo])
|
||||
async def list_providers(
|
||||
api_key: APIKeyInfo = Security(
|
||||
require_permission(APIKeyPermission.READ_INTEGRATIONS)
|
||||
),
|
||||
) -> list[ProviderInfo]:
|
||||
"""
|
||||
List all available integration providers.
|
||||
|
||||
Returns a list of all providers with their supported credential types.
|
||||
Most providers support API key credentials, and some also support OAuth.
|
||||
"""
|
||||
# Ensure blocks are loaded
|
||||
try:
|
||||
from backend.blocks import load_all_blocks
|
||||
|
||||
load_all_blocks()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to load blocks: {e}")
|
||||
|
||||
from backend.sdk.registry import AutoRegistry
|
||||
|
||||
providers = []
|
||||
for name in get_all_provider_names():
|
||||
supports_oauth = name in HANDLERS_BY_NAME
|
||||
handler_class = HANDLERS_BY_NAME.get(name)
|
||||
default_scopes = (
|
||||
getattr(handler_class, "DEFAULT_SCOPES", []) if handler_class else []
|
||||
)
|
||||
|
||||
# Check if provider has specific auth types from SDK registration
|
||||
sdk_provider = AutoRegistry.get_provider(name)
|
||||
if sdk_provider and sdk_provider.supported_auth_types:
|
||||
supports_api_key = "api_key" in sdk_provider.supported_auth_types
|
||||
supports_user_password = (
|
||||
"user_password" in sdk_provider.supported_auth_types
|
||||
)
|
||||
supports_host_scoped = "host_scoped" in sdk_provider.supported_auth_types
|
||||
else:
|
||||
# Fallback for legacy providers
|
||||
supports_api_key = True # All providers can accept API keys
|
||||
supports_user_password = name in ("smtp",)
|
||||
supports_host_scoped = name == "http"
|
||||
|
||||
providers.append(
|
||||
ProviderInfo(
|
||||
name=name,
|
||||
supports_oauth=supports_oauth,
|
||||
supports_api_key=supports_api_key,
|
||||
supports_user_password=supports_user_password,
|
||||
supports_host_scoped=supports_host_scoped,
|
||||
default_scopes=default_scopes,
|
||||
)
|
||||
)
|
||||
|
||||
return providers
|
||||
|
||||
|
||||
@integrations_router.post(
|
||||
"/{provider}/oauth/initiate",
|
||||
response_model=OAuthInitiateResponse,
|
||||
summary="Initiate OAuth flow",
|
||||
)
|
||||
async def initiate_oauth(
|
||||
provider: Annotated[str, Path(title="The OAuth provider")],
|
||||
request: OAuthInitiateRequest,
|
||||
api_key: APIKeyInfo = Security(
|
||||
require_permission(APIKeyPermission.MANAGE_INTEGRATIONS)
|
||||
),
|
||||
) -> OAuthInitiateResponse:
|
||||
"""
|
||||
Initiate an OAuth flow for an external application.
|
||||
|
||||
This endpoint allows external apps to start an OAuth flow with a custom
|
||||
callback URL. The callback URL must be from an allowed origin configured
|
||||
in the platform settings.
|
||||
|
||||
Returns a login URL to redirect the user to, along with a state token
|
||||
for CSRF protection.
|
||||
"""
|
||||
# Validate callback URL
|
||||
if not validate_callback_url(request.callback_url):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Callback URL origin is not allowed. Allowed origins: {settings.config.external_oauth_callback_origins}",
|
||||
)
|
||||
|
||||
# Validate provider
|
||||
try:
|
||||
provider_name = ProviderName(provider)
|
||||
except ValueError:
|
||||
# Check if it's a dynamically registered provider
|
||||
if provider not in HANDLERS_BY_NAME:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Provider '{provider}' not found",
|
||||
)
|
||||
provider_name = provider
|
||||
|
||||
# Get OAuth handler with external callback URL
|
||||
handler = _get_oauth_handler_for_external(
|
||||
provider if isinstance(provider_name, str) else provider_name.value,
|
||||
request.callback_url,
|
||||
)
|
||||
|
||||
# Store state token with external flow metadata
|
||||
state_token, code_challenge = await creds_manager.store.store_state_token(
|
||||
user_id=api_key.user_id,
|
||||
provider=provider if isinstance(provider_name, str) else provider_name.value,
|
||||
scopes=request.scopes,
|
||||
callback_url=request.callback_url,
|
||||
state_metadata=request.state_metadata,
|
||||
initiated_by_api_key_id=api_key.id,
|
||||
)
|
||||
|
||||
# Build login URL
|
||||
login_url = handler.get_login_url(
|
||||
request.scopes, state_token, code_challenge=code_challenge
|
||||
)
|
||||
|
||||
# Calculate expiration (10 minutes from now)
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
expires_at = int((datetime.now(timezone.utc) + timedelta(minutes=10)).timestamp())
|
||||
|
||||
return OAuthInitiateResponse(
|
||||
login_url=login_url,
|
||||
state_token=state_token,
|
||||
expires_at=expires_at,
|
||||
)
|
||||
|
||||
|
||||
@integrations_router.post(
|
||||
"/{provider}/oauth/complete",
|
||||
response_model=OAuthCompleteResponse,
|
||||
summary="Complete OAuth flow",
|
||||
)
|
||||
async def complete_oauth(
|
||||
provider: Annotated[str, Path(title="The OAuth provider")],
|
||||
request: OAuthCompleteRequest,
|
||||
api_key: APIKeyInfo = Security(
|
||||
require_permission(APIKeyPermission.MANAGE_INTEGRATIONS)
|
||||
),
|
||||
) -> OAuthCompleteResponse:
|
||||
"""
|
||||
Complete an OAuth flow by exchanging the authorization code for tokens.
|
||||
|
||||
This endpoint should be called after the user has authorized the application
|
||||
and been redirected back to the external app's callback URL with an
|
||||
authorization code.
|
||||
"""
|
||||
# Verify state token
|
||||
valid_state = await creds_manager.store.verify_state_token(
|
||||
api_key.user_id, request.state_token, provider
|
||||
)
|
||||
|
||||
if not valid_state:
|
||||
logger.warning(f"Invalid or expired state token for provider {provider}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Invalid or expired state token",
|
||||
)
|
||||
|
||||
# Verify this is an external flow (callback_url must be set)
|
||||
if not valid_state.callback_url:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="State token was not created for external OAuth flow",
|
||||
)
|
||||
|
||||
# Get OAuth handler with the original callback URL
|
||||
handler = _get_oauth_handler_for_external(provider, valid_state.callback_url)
|
||||
|
||||
try:
|
||||
scopes = valid_state.scopes
|
||||
scopes = handler.handle_default_scopes(scopes)
|
||||
|
||||
credentials = await handler.exchange_code_for_tokens(
|
||||
request.code, scopes, valid_state.code_verifier
|
||||
)
|
||||
|
||||
# Handle Linear's space-separated scopes
|
||||
if len(credentials.scopes) == 1 and " " in credentials.scopes[0]:
|
||||
credentials.scopes = credentials.scopes[0].split(" ")
|
||||
|
||||
# Check scope mismatch
|
||||
if not set(scopes).issubset(set(credentials.scopes)):
|
||||
logger.warning(
|
||||
f"Granted scopes {credentials.scopes} for provider {provider} "
|
||||
f"do not include all requested scopes {scopes}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"OAuth2 Code->Token exchange failed for provider {provider}: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"OAuth2 callback failed to exchange code for tokens: {str(e)}",
|
||||
)
|
||||
|
||||
# Store credentials
|
||||
await creds_manager.create(api_key.user_id, credentials)
|
||||
|
||||
logger.info(f"Successfully completed external OAuth for provider {provider}")
|
||||
|
||||
return OAuthCompleteResponse(
|
||||
credentials_id=credentials.id,
|
||||
provider=credentials.provider,
|
||||
type=credentials.type,
|
||||
title=credentials.title,
|
||||
scopes=credentials.scopes,
|
||||
username=credentials.username,
|
||||
state_metadata=valid_state.state_metadata,
|
||||
)
|
||||
|
||||
|
||||
@integrations_router.get("/credentials", response_model=list[CredentialSummary])
|
||||
async def list_credentials(
|
||||
api_key: APIKeyInfo = Security(
|
||||
require_permission(APIKeyPermission.READ_INTEGRATIONS)
|
||||
),
|
||||
) -> list[CredentialSummary]:
|
||||
"""
|
||||
List all credentials for the authenticated user.
|
||||
|
||||
Returns metadata about each credential without exposing sensitive tokens.
|
||||
"""
|
||||
credentials = await creds_manager.store.get_all_creds(api_key.user_id)
|
||||
return [
|
||||
CredentialSummary(
|
||||
id=cred.id,
|
||||
provider=cred.provider,
|
||||
type=cred.type,
|
||||
title=cred.title,
|
||||
scopes=cred.scopes if isinstance(cred, OAuth2Credentials) else None,
|
||||
username=cred.username if isinstance(cred, OAuth2Credentials) else None,
|
||||
host=cred.host if isinstance(cred, HostScopedCredentials) else None,
|
||||
)
|
||||
for cred in credentials
|
||||
]
|
||||
|
||||
|
||||
@integrations_router.get(
|
||||
"/{provider}/credentials", response_model=list[CredentialSummary]
|
||||
)
|
||||
async def list_credentials_by_provider(
|
||||
provider: Annotated[str, Path(title="The provider to list credentials for")],
|
||||
api_key: APIKeyInfo = Security(
|
||||
require_permission(APIKeyPermission.READ_INTEGRATIONS)
|
||||
),
|
||||
) -> list[CredentialSummary]:
|
||||
"""
|
||||
List credentials for a specific provider.
|
||||
"""
|
||||
credentials = await creds_manager.store.get_creds_by_provider(
|
||||
api_key.user_id, provider
|
||||
)
|
||||
return [
|
||||
CredentialSummary(
|
||||
id=cred.id,
|
||||
provider=cred.provider,
|
||||
type=cred.type,
|
||||
title=cred.title,
|
||||
scopes=cred.scopes if isinstance(cred, OAuth2Credentials) else None,
|
||||
username=cred.username if isinstance(cred, OAuth2Credentials) else None,
|
||||
host=cred.host if isinstance(cred, HostScopedCredentials) else None,
|
||||
)
|
||||
for cred in credentials
|
||||
]
|
||||
|
||||
|
||||
@integrations_router.post(
|
||||
"/{provider}/credentials",
|
||||
response_model=CreateCredentialResponse,
|
||||
status_code=status.HTTP_201_CREATED,
|
||||
summary="Create credentials",
|
||||
)
|
||||
async def create_credential(
|
||||
provider: Annotated[str, Path(title="The provider to create credentials for")],
|
||||
request: Union[
|
||||
CreateAPIKeyCredentialRequest,
|
||||
CreateUserPasswordCredentialRequest,
|
||||
CreateHostScopedCredentialRequest,
|
||||
] = Body(..., discriminator="type"),
|
||||
api_key: APIKeyInfo = Security(
|
||||
require_permission(APIKeyPermission.MANAGE_INTEGRATIONS)
|
||||
),
|
||||
) -> CreateCredentialResponse:
|
||||
"""
|
||||
Create non-OAuth credentials for a provider.
|
||||
|
||||
Supports creating:
|
||||
- API key credentials (type: "api_key")
|
||||
- Username/password credentials (type: "user_password")
|
||||
- Host-scoped credentials (type: "host_scoped")
|
||||
|
||||
For OAuth credentials, use the OAuth initiate/complete flow instead.
|
||||
"""
|
||||
# Validate provider exists
|
||||
all_providers = get_all_provider_names()
|
||||
if provider not in all_providers:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Provider '{provider}' not found",
|
||||
)
|
||||
|
||||
# Create the appropriate credential type
|
||||
credentials: Credentials
|
||||
if request.type == "api_key":
|
||||
credentials = APIKeyCredentials(
|
||||
provider=provider,
|
||||
api_key=SecretStr(request.api_key),
|
||||
title=request.title,
|
||||
expires_at=request.expires_at,
|
||||
)
|
||||
elif request.type == "user_password":
|
||||
credentials = UserPasswordCredentials(
|
||||
provider=provider,
|
||||
username=SecretStr(request.username),
|
||||
password=SecretStr(request.password),
|
||||
title=request.title,
|
||||
)
|
||||
elif request.type == "host_scoped":
|
||||
# Convert string headers to SecretStr
|
||||
secret_headers = {k: SecretStr(v) for k, v in request.headers.items()}
|
||||
credentials = HostScopedCredentials(
|
||||
provider=provider,
|
||||
host=request.host,
|
||||
headers=secret_headers,
|
||||
title=request.title,
|
||||
)
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Unsupported credential type: {request.type}",
|
||||
)
|
||||
|
||||
# Store credentials
|
||||
try:
|
||||
await creds_manager.create(api_key.user_id, credentials)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to store credentials: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to store credentials: {str(e)}",
|
||||
)
|
||||
|
||||
logger.info(f"Created {request.type} credentials for provider {provider}")
|
||||
|
||||
return CreateCredentialResponse(
|
||||
id=credentials.id,
|
||||
provider=provider,
|
||||
type=credentials.type,
|
||||
title=credentials.title,
|
||||
)
|
||||
|
||||
|
||||
class DeleteCredentialResponse(BaseModel):
|
||||
"""Response model for deleting a credential."""
|
||||
|
||||
deleted: bool = Field(..., description="Whether the credential was deleted")
|
||||
credentials_id: str = Field(..., description="ID of the deleted credential")
|
||||
|
||||
|
||||
@integrations_router.delete(
|
||||
"/{provider}/credentials/{cred_id}",
|
||||
response_model=DeleteCredentialResponse,
|
||||
)
|
||||
async def delete_credential(
|
||||
provider: Annotated[str, Path(title="The provider")],
|
||||
cred_id: Annotated[str, Path(title="The credential ID to delete")],
|
||||
api_key: APIKeyInfo = Security(
|
||||
require_permission(APIKeyPermission.DELETE_INTEGRATIONS)
|
||||
),
|
||||
) -> DeleteCredentialResponse:
|
||||
"""
|
||||
Delete a credential.
|
||||
|
||||
Note: This does not revoke the tokens with the provider. For full cleanup,
|
||||
use the main API's delete endpoint which handles webhook cleanup and
|
||||
token revocation.
|
||||
"""
|
||||
creds = await creds_manager.store.get_creds_by_id(api_key.user_id, cred_id)
|
||||
if not creds:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND, detail="Credentials not found"
|
||||
)
|
||||
if creds.provider != provider:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Credentials do not match the specified provider",
|
||||
)
|
||||
|
||||
await creds_manager.delete(api_key.user_id, cred_id)
|
||||
|
||||
return DeleteCredentialResponse(deleted=True, credentials_id=cred_id)
|
||||
148
autogpt_platform/backend/backend/server/external/routes/tools.py
vendored
Normal file
148
autogpt_platform/backend/backend/server/external/routes/tools.py
vendored
Normal file
@@ -0,0 +1,148 @@
|
||||
"""External API routes for chat tools - stateless HTTP endpoints.
|
||||
|
||||
Note: These endpoints use ephemeral sessions that are not persisted to Redis.
|
||||
As a result, session-based rate limiting (max_agent_runs, max_agent_schedules)
|
||||
is not enforced for external API calls. Each request creates a fresh session
|
||||
with zeroed counters. Rate limiting for external API consumers should be
|
||||
handled separately (e.g., via API key quotas).
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from fastapi import APIRouter, Security
|
||||
from prisma.enums import APIKeyPermission
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from backend.data.api_key import APIKeyInfo
|
||||
from backend.server.external.middleware import require_permission
|
||||
from backend.server.v2.chat.model import ChatSession
|
||||
from backend.server.v2.chat.tools import find_agent_tool, run_agent_tool
|
||||
from backend.server.v2.chat.tools.models import ToolResponseBase
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
tools_router = APIRouter(prefix="/tools", tags=["tools"])
|
||||
|
||||
# Note: We use Security() as a function parameter dependency (api_key: APIKeyInfo = Security(...))
|
||||
# rather than in the decorator's dependencies= list. This avoids duplicate permission checks
|
||||
# while still enforcing auth AND giving us access to the api_key for extracting user_id.
|
||||
|
||||
|
||||
# Request models
|
||||
class FindAgentRequest(BaseModel):
|
||||
query: str = Field(..., description="Search query for finding agents")
|
||||
|
||||
|
||||
class RunAgentRequest(BaseModel):
|
||||
"""Request to run or schedule an agent.
|
||||
|
||||
The tool automatically handles the setup flow:
|
||||
- First call returns available inputs so user can decide what values to use
|
||||
- Returns missing credentials if user needs to configure them
|
||||
- Executes when inputs are provided OR use_defaults=true
|
||||
- Schedules execution if schedule_name and cron are provided
|
||||
"""
|
||||
|
||||
username_agent_slug: str = Field(
|
||||
...,
|
||||
description="The marketplace agent slug (e.g., 'username/agent-name')",
|
||||
)
|
||||
inputs: dict[str, Any] = Field(
|
||||
default_factory=dict,
|
||||
description="Dictionary of input values for the agent",
|
||||
)
|
||||
use_defaults: bool = Field(
|
||||
default=False,
|
||||
description="Set to true to run with default values (user must confirm)",
|
||||
)
|
||||
schedule_name: str | None = Field(
|
||||
None,
|
||||
description="Name for scheduled execution (triggers scheduling mode)",
|
||||
)
|
||||
cron: str | None = Field(
|
||||
None,
|
||||
description="Cron expression (5 fields: minute hour day month weekday)",
|
||||
)
|
||||
timezone: str = Field(
|
||||
default="UTC",
|
||||
description="IANA timezone (e.g., 'America/New_York', 'UTC')",
|
||||
)
|
||||
|
||||
|
||||
def _create_ephemeral_session(user_id: str | None) -> ChatSession:
|
||||
"""Create an ephemeral session for stateless API requests."""
|
||||
return ChatSession.new(user_id)
|
||||
|
||||
|
||||
@tools_router.post(
|
||||
path="/find-agent",
|
||||
)
|
||||
async def find_agent(
|
||||
request: FindAgentRequest,
|
||||
api_key: APIKeyInfo = Security(require_permission(APIKeyPermission.USE_TOOLS)),
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Search for agents in the marketplace based on capabilities and user needs.
|
||||
|
||||
Args:
|
||||
request: Search query for finding agents
|
||||
|
||||
Returns:
|
||||
List of matching agents or no results response
|
||||
"""
|
||||
session = _create_ephemeral_session(api_key.user_id)
|
||||
result = await find_agent_tool._execute(
|
||||
user_id=api_key.user_id,
|
||||
session=session,
|
||||
query=request.query,
|
||||
)
|
||||
return _response_to_dict(result)
|
||||
|
||||
|
||||
@tools_router.post(
|
||||
path="/run-agent",
|
||||
)
|
||||
async def run_agent(
|
||||
request: RunAgentRequest,
|
||||
api_key: APIKeyInfo = Security(require_permission(APIKeyPermission.USE_TOOLS)),
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Run or schedule an agent from the marketplace.
|
||||
|
||||
The endpoint automatically handles the setup flow:
|
||||
- Returns missing inputs if required fields are not provided
|
||||
- Returns missing credentials if user needs to configure them
|
||||
- Executes immediately if all requirements are met
|
||||
- Schedules execution if schedule_name and cron are provided
|
||||
|
||||
For scheduled execution:
|
||||
- Cron format: "minute hour day month weekday"
|
||||
- Examples: "0 9 * * 1-5" (9am weekdays), "0 0 * * *" (daily at midnight)
|
||||
- Timezone: Use IANA timezone names like "America/New_York"
|
||||
|
||||
Args:
|
||||
request: Agent slug, inputs, and optional schedule config
|
||||
|
||||
Returns:
|
||||
- setup_requirements: If inputs or credentials are missing
|
||||
- execution_started: If agent was run or scheduled successfully
|
||||
- error: If something went wrong
|
||||
"""
|
||||
session = _create_ephemeral_session(api_key.user_id)
|
||||
result = await run_agent_tool._execute(
|
||||
user_id=api_key.user_id,
|
||||
session=session,
|
||||
username_agent_slug=request.username_agent_slug,
|
||||
inputs=request.inputs,
|
||||
use_defaults=request.use_defaults,
|
||||
schedule_name=request.schedule_name or "",
|
||||
cron=request.cron or "",
|
||||
timezone=request.timezone,
|
||||
)
|
||||
return _response_to_dict(result)
|
||||
|
||||
|
||||
def _response_to_dict(result: ToolResponseBase) -> dict[str, Any]:
|
||||
"""Convert a tool response to a dictionary for JSON serialization."""
|
||||
return result.model_dump()
|
||||
@@ -1,12 +1,15 @@
|
||||
import logging
|
||||
import urllib.parse
|
||||
from collections import defaultdict
|
||||
from typing import Annotated, Any, Optional, Sequence
|
||||
from typing import Annotated, Any, Literal, Optional, Sequence
|
||||
|
||||
from fastapi import APIRouter, Body, HTTPException, Security
|
||||
from prisma.enums import AgentExecutionStatus, APIKeyPermission
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
import backend.data.block
|
||||
import backend.server.v2.store.cache as store_cache
|
||||
import backend.server.v2.store.model as store_model
|
||||
from backend.data import execution as execution_db
|
||||
from backend.data import graph as graph_db
|
||||
from backend.data.api_key import APIKeyInfo
|
||||
@@ -106,10 +109,6 @@ async def get_graph_execution_results(
|
||||
graph_exec_id: str,
|
||||
api_key: APIKeyInfo = Security(require_permission(APIKeyPermission.READ_GRAPH)),
|
||||
) -> GraphExecutionResult:
|
||||
graph = await graph_db.get_graph(graph_id, user_id=api_key.user_id)
|
||||
if not graph:
|
||||
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
|
||||
|
||||
graph_exec = await execution_db.get_graph_execution(
|
||||
user_id=api_key.user_id,
|
||||
execution_id=graph_exec_id,
|
||||
@@ -120,6 +119,13 @@ async def get_graph_execution_results(
|
||||
status_code=404, detail=f"Graph execution #{graph_exec_id} not found."
|
||||
)
|
||||
|
||||
if not await graph_db.get_graph(
|
||||
graph_id=graph_exec.graph_id,
|
||||
version=graph_exec.graph_version,
|
||||
user_id=api_key.user_id,
|
||||
):
|
||||
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
|
||||
|
||||
return GraphExecutionResult(
|
||||
execution_id=graph_exec_id,
|
||||
status=graph_exec.status.value,
|
||||
@@ -141,3 +147,149 @@ async def get_graph_execution_results(
|
||||
else None
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
##############################################
|
||||
############### Store Endpoints ##############
|
||||
##############################################
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
path="/store/agents",
|
||||
tags=["store"],
|
||||
dependencies=[Security(require_permission(APIKeyPermission.READ_STORE))],
|
||||
response_model=store_model.StoreAgentsResponse,
|
||||
)
|
||||
async def get_store_agents(
|
||||
featured: bool = False,
|
||||
creator: str | None = None,
|
||||
sorted_by: Literal["rating", "runs", "name", "updated_at"] | None = None,
|
||||
search_query: str | None = None,
|
||||
category: str | None = None,
|
||||
page: int = 1,
|
||||
page_size: int = 20,
|
||||
) -> store_model.StoreAgentsResponse:
|
||||
"""
|
||||
Get a paginated list of agents from the store with optional filtering and sorting.
|
||||
|
||||
Args:
|
||||
featured: Filter to only show featured agents
|
||||
creator: Filter agents by creator username
|
||||
sorted_by: Sort agents by "runs", "rating", "name", or "updated_at"
|
||||
search_query: Search agents by name, subheading and description
|
||||
category: Filter agents by category
|
||||
page: Page number for pagination (default 1)
|
||||
page_size: Number of agents per page (default 20)
|
||||
|
||||
Returns:
|
||||
StoreAgentsResponse: Paginated list of agents matching the filters
|
||||
"""
|
||||
if page < 1:
|
||||
raise HTTPException(status_code=422, detail="Page must be greater than 0")
|
||||
|
||||
if page_size < 1:
|
||||
raise HTTPException(status_code=422, detail="Page size must be greater than 0")
|
||||
|
||||
agents = await store_cache._get_cached_store_agents(
|
||||
featured=featured,
|
||||
creator=creator,
|
||||
sorted_by=sorted_by,
|
||||
search_query=search_query,
|
||||
category=category,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
return agents
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
path="/store/agents/{username}/{agent_name}",
|
||||
tags=["store"],
|
||||
dependencies=[Security(require_permission(APIKeyPermission.READ_STORE))],
|
||||
response_model=store_model.StoreAgentDetails,
|
||||
)
|
||||
async def get_store_agent(
|
||||
username: str,
|
||||
agent_name: str,
|
||||
) -> store_model.StoreAgentDetails:
|
||||
"""
|
||||
Get details of a specific store agent by username and agent name.
|
||||
|
||||
Args:
|
||||
username: Creator's username
|
||||
agent_name: Name/slug of the agent
|
||||
|
||||
Returns:
|
||||
StoreAgentDetails: Detailed information about the agent
|
||||
"""
|
||||
username = urllib.parse.unquote(username).lower()
|
||||
agent_name = urllib.parse.unquote(agent_name).lower()
|
||||
agent = await store_cache._get_cached_agent_details(
|
||||
username=username, agent_name=agent_name
|
||||
)
|
||||
return agent
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
path="/store/creators",
|
||||
tags=["store"],
|
||||
dependencies=[Security(require_permission(APIKeyPermission.READ_STORE))],
|
||||
response_model=store_model.CreatorsResponse,
|
||||
)
|
||||
async def get_store_creators(
|
||||
featured: bool = False,
|
||||
search_query: str | None = None,
|
||||
sorted_by: Literal["agent_rating", "agent_runs", "num_agents"] | None = None,
|
||||
page: int = 1,
|
||||
page_size: int = 20,
|
||||
) -> store_model.CreatorsResponse:
|
||||
"""
|
||||
Get a paginated list of store creators with optional filtering and sorting.
|
||||
|
||||
Args:
|
||||
featured: Filter to only show featured creators
|
||||
search_query: Search creators by profile description
|
||||
sorted_by: Sort by "agent_rating", "agent_runs", or "num_agents"
|
||||
page: Page number for pagination (default 1)
|
||||
page_size: Number of creators per page (default 20)
|
||||
|
||||
Returns:
|
||||
CreatorsResponse: Paginated list of creators matching the filters
|
||||
"""
|
||||
if page < 1:
|
||||
raise HTTPException(status_code=422, detail="Page must be greater than 0")
|
||||
|
||||
if page_size < 1:
|
||||
raise HTTPException(status_code=422, detail="Page size must be greater than 0")
|
||||
|
||||
creators = await store_cache._get_cached_store_creators(
|
||||
featured=featured,
|
||||
search_query=search_query,
|
||||
sorted_by=sorted_by,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
return creators
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
path="/store/creators/{username}",
|
||||
tags=["store"],
|
||||
dependencies=[Security(require_permission(APIKeyPermission.READ_STORE))],
|
||||
response_model=store_model.CreatorDetails,
|
||||
)
|
||||
async def get_store_creator(
|
||||
username: str,
|
||||
) -> store_model.CreatorDetails:
|
||||
"""
|
||||
Get details of a specific store creator by username.
|
||||
|
||||
Args:
|
||||
username: Creator's username
|
||||
|
||||
Returns:
|
||||
CreatorDetails: Detailed information about the creator
|
||||
"""
|
||||
username = urllib.parse.unquote(username).lower()
|
||||
creator = await store_cache._get_cached_creator_details(username=username)
|
||||
return creator
|
||||
|
||||
@@ -33,7 +33,11 @@ from backend.data.model import (
|
||||
OAuth2Credentials,
|
||||
UserIntegrations,
|
||||
)
|
||||
from backend.data.onboarding import complete_webhook_trigger_step
|
||||
from backend.data.onboarding import (
|
||||
OnboardingStep,
|
||||
complete_onboarding_step,
|
||||
increment_runs,
|
||||
)
|
||||
from backend.data.user import get_user_integrations
|
||||
from backend.executor.utils import add_graph_execution
|
||||
from backend.integrations.ayrshare import AyrshareClient, SocialPlatform
|
||||
@@ -376,7 +380,8 @@ async def webhook_ingress_generic(
|
||||
if not (webhook.triggered_nodes or webhook.triggered_presets):
|
||||
return
|
||||
|
||||
await complete_webhook_trigger_step(user_id)
|
||||
await complete_onboarding_step(user_id, OnboardingStep.TRIGGER_WEBHOOK)
|
||||
await increment_runs(user_id)
|
||||
|
||||
# Execute all triggers concurrently for better performance
|
||||
tasks = []
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import enum
|
||||
from typing import Any, Optional
|
||||
from typing import Any, Literal, Optional
|
||||
|
||||
import pydantic
|
||||
from prisma.enums import OnboardingStep
|
||||
|
||||
from backend.data.api_key import APIKeyInfo, APIKeyPermission
|
||||
from backend.data.graph import Graph
|
||||
@@ -35,8 +36,13 @@ class WSSubscribeGraphExecutionsRequest(pydantic.BaseModel):
|
||||
graph_id: str
|
||||
|
||||
|
||||
GraphCreationSource = Literal["builder", "upload"]
|
||||
GraphExecutionSource = Literal["builder", "library", "onboarding"]
|
||||
|
||||
|
||||
class CreateGraph(pydantic.BaseModel):
|
||||
graph: Graph
|
||||
source: GraphCreationSource | None = None
|
||||
|
||||
|
||||
class CreateAPIKeyRequest(pydantic.BaseModel):
|
||||
@@ -83,6 +89,8 @@ class NotificationPayload(pydantic.BaseModel):
|
||||
type: str
|
||||
event: str
|
||||
|
||||
model_config = pydantic.ConfigDict(extra="allow")
|
||||
|
||||
|
||||
class OnboardingNotificationPayload(NotificationPayload):
|
||||
step: str
|
||||
step: OnboardingStep | None
|
||||
|
||||
@@ -29,13 +29,13 @@ import backend.server.v2.admin.store_admin_routes
|
||||
import backend.server.v2.builder
|
||||
import backend.server.v2.builder.routes
|
||||
import backend.server.v2.chat.routes as chat_routes
|
||||
import backend.server.v2.executions.review.routes
|
||||
import backend.server.v2.library.db
|
||||
import backend.server.v2.library.model
|
||||
import backend.server.v2.library.routes
|
||||
import backend.server.v2.otto.routes
|
||||
import backend.server.v2.store.model
|
||||
import backend.server.v2.store.routes
|
||||
import backend.server.v2.turnstile.routes
|
||||
import backend.util.service
|
||||
import backend.util.settings
|
||||
from backend.blocks.llm import LlmModel
|
||||
@@ -275,17 +275,17 @@ app.include_router(
|
||||
tags=["v2", "admin"],
|
||||
prefix="/api/executions",
|
||||
)
|
||||
app.include_router(
|
||||
backend.server.v2.executions.review.routes.router,
|
||||
tags=["v2", "executions", "review"],
|
||||
prefix="/api/review",
|
||||
)
|
||||
app.include_router(
|
||||
backend.server.v2.library.routes.router, tags=["v2"], prefix="/api/library"
|
||||
)
|
||||
app.include_router(
|
||||
backend.server.v2.otto.routes.router, tags=["v2", "otto"], prefix="/api/otto"
|
||||
)
|
||||
app.include_router(
|
||||
backend.server.v2.turnstile.routes.router,
|
||||
tags=["v2", "turnstile"],
|
||||
prefix="/api/turnstile",
|
||||
)
|
||||
|
||||
app.include_router(
|
||||
backend.server.routers.postmark.postmark.router,
|
||||
|
||||
@@ -5,7 +5,7 @@ import time
|
||||
import uuid
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timezone
|
||||
from typing import Annotated, Any, Sequence
|
||||
from typing import Annotated, Any, Sequence, get_args
|
||||
|
||||
import pydantic
|
||||
import stripe
|
||||
@@ -44,13 +44,18 @@ from backend.data.credit import (
|
||||
get_user_credit_model,
|
||||
set_auto_top_up,
|
||||
)
|
||||
from backend.data.execution import UserContext
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.data.graph import GraphSettings
|
||||
from backend.data.model import CredentialsMetaInput, UserOnboarding
|
||||
from backend.data.notifications import NotificationPreference, NotificationPreferenceDTO
|
||||
from backend.data.onboarding import (
|
||||
FrontendOnboardingStep,
|
||||
OnboardingStep,
|
||||
UserOnboardingUpdate,
|
||||
complete_onboarding_step,
|
||||
complete_re_run_agent,
|
||||
get_recommended_agents,
|
||||
get_user_onboarding,
|
||||
increment_runs,
|
||||
onboarding_enabled,
|
||||
reset_user_onboarding,
|
||||
update_user_onboarding,
|
||||
@@ -78,6 +83,7 @@ from backend.server.model import (
|
||||
CreateAPIKeyRequest,
|
||||
CreateAPIKeyResponse,
|
||||
CreateGraph,
|
||||
GraphExecutionSource,
|
||||
RequestTopUp,
|
||||
SetGraphActiveVersion,
|
||||
TimezoneResponse,
|
||||
@@ -85,6 +91,7 @@ from backend.server.model import (
|
||||
UpdateTimezoneRequest,
|
||||
UploadFileResponse,
|
||||
)
|
||||
from backend.server.v2.store.model import StoreAgentDetails
|
||||
from backend.util.cache import cached
|
||||
from backend.util.clients import get_scheduler_client
|
||||
from backend.util.cloud_storage import get_cloud_storage_handler
|
||||
@@ -143,6 +150,28 @@ async def hide_activity_summary_if_disabled(
|
||||
return execution
|
||||
|
||||
|
||||
async def _update_library_agent_version_and_settings(
|
||||
user_id: str, agent_graph: graph_db.GraphModel
|
||||
) -> library_db.library_model.LibraryAgent:
|
||||
# Keep the library agent up to date with the new active version
|
||||
library = await library_db.update_agent_version_in_library(
|
||||
user_id, agent_graph.id, agent_graph.version
|
||||
)
|
||||
# If the graph has HITL node, initialize the setting if it's not already set.
|
||||
if (
|
||||
agent_graph.has_human_in_the_loop
|
||||
and library.settings.human_in_the_loop_safe_mode is None
|
||||
):
|
||||
await library_db.update_library_agent_settings(
|
||||
user_id=user_id,
|
||||
agent_id=library.id,
|
||||
settings=library.settings.model_copy(
|
||||
update={"human_in_the_loop_safe_mode": True}
|
||||
),
|
||||
)
|
||||
return library
|
||||
|
||||
|
||||
# Define the API routes
|
||||
v1_router = APIRouter()
|
||||
|
||||
@@ -252,9 +281,10 @@ async def update_preferences(
|
||||
|
||||
@v1_router.get(
|
||||
"/onboarding",
|
||||
summary="Get onboarding status",
|
||||
summary="Onboarding state",
|
||||
tags=["onboarding"],
|
||||
dependencies=[Security(requires_user)],
|
||||
response_model=UserOnboarding,
|
||||
)
|
||||
async def get_onboarding(user_id: Annotated[str, Security(get_user_id)]):
|
||||
return await get_user_onboarding(user_id)
|
||||
@@ -262,9 +292,10 @@ async def get_onboarding(user_id: Annotated[str, Security(get_user_id)]):
|
||||
|
||||
@v1_router.patch(
|
||||
"/onboarding",
|
||||
summary="Update onboarding progress",
|
||||
summary="Update onboarding state",
|
||||
tags=["onboarding"],
|
||||
dependencies=[Security(requires_user)],
|
||||
response_model=UserOnboarding,
|
||||
)
|
||||
async def update_onboarding(
|
||||
user_id: Annotated[str, Security(get_user_id)], data: UserOnboardingUpdate
|
||||
@@ -272,25 +303,39 @@ async def update_onboarding(
|
||||
return await update_user_onboarding(user_id, data)
|
||||
|
||||
|
||||
@v1_router.post(
|
||||
"/onboarding/step",
|
||||
summary="Complete onboarding step",
|
||||
tags=["onboarding"],
|
||||
dependencies=[Security(requires_user)],
|
||||
)
|
||||
async def onboarding_complete_step(
|
||||
user_id: Annotated[str, Security(get_user_id)], step: FrontendOnboardingStep
|
||||
):
|
||||
if step not in get_args(FrontendOnboardingStep):
|
||||
raise HTTPException(status_code=400, detail="Invalid onboarding step")
|
||||
return await complete_onboarding_step(user_id, step)
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
"/onboarding/agents",
|
||||
summary="Get recommended agents",
|
||||
summary="Recommended onboarding agents",
|
||||
tags=["onboarding"],
|
||||
dependencies=[Security(requires_user)],
|
||||
)
|
||||
async def get_onboarding_agents(
|
||||
user_id: Annotated[str, Security(get_user_id)],
|
||||
):
|
||||
) -> list[StoreAgentDetails]:
|
||||
return await get_recommended_agents(user_id)
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
"/onboarding/enabled",
|
||||
summary="Check onboarding enabled",
|
||||
summary="Is onboarding enabled",
|
||||
tags=["onboarding", "public"],
|
||||
dependencies=[Security(requires_user)],
|
||||
)
|
||||
async def is_onboarding_enabled():
|
||||
async def is_onboarding_enabled() -> bool:
|
||||
return await onboarding_enabled()
|
||||
|
||||
|
||||
@@ -299,6 +344,7 @@ async def is_onboarding_enabled():
|
||||
summary="Reset onboarding progress",
|
||||
tags=["onboarding"],
|
||||
dependencies=[Security(requires_user)],
|
||||
response_model=UserOnboarding,
|
||||
)
|
||||
async def reset_onboarding(user_id: Annotated[str, Security(get_user_id)]):
|
||||
return await reset_user_onboarding(user_id)
|
||||
@@ -387,19 +433,15 @@ async def execute_graph_block(
|
||||
if not obj:
|
||||
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
|
||||
|
||||
# Get user context for block execution
|
||||
user = await get_user_by_id(user_id)
|
||||
if not user:
|
||||
raise HTTPException(status_code=404, detail="User not found.")
|
||||
|
||||
user_context = UserContext(timezone=user.timezone)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
output = defaultdict(list)
|
||||
async for name, data in obj.execute(
|
||||
data,
|
||||
user_context=user_context,
|
||||
user_id=user_id,
|
||||
# Note: graph_exec_id and graph_id are not available for direct block execution
|
||||
):
|
||||
@@ -791,7 +833,12 @@ async def create_new_graph(
|
||||
# as the graph already valid and no sub-graphs are returned back.
|
||||
await graph_db.create_graph(graph, user_id=user_id)
|
||||
await library_db.create_library_agent(graph, user_id=user_id)
|
||||
return await on_graph_activate(graph, user_id=user_id)
|
||||
activated_graph = await on_graph_activate(graph, user_id=user_id)
|
||||
|
||||
if create_graph.source == "builder":
|
||||
await complete_onboarding_step(user_id, OnboardingStep.BUILDER_SAVE_AGENT)
|
||||
|
||||
return activated_graph
|
||||
|
||||
|
||||
@v1_router.delete(
|
||||
@@ -803,7 +850,9 @@ async def create_new_graph(
|
||||
async def delete_graph(
|
||||
graph_id: str, user_id: Annotated[str, Security(get_user_id)]
|
||||
) -> DeleteGraphResponse:
|
||||
if active_version := await graph_db.get_graph(graph_id, user_id=user_id):
|
||||
if active_version := await graph_db.get_graph(
|
||||
graph_id=graph_id, version=None, user_id=user_id
|
||||
):
|
||||
await on_graph_deactivate(active_version, user_id=user_id)
|
||||
|
||||
return {"version_counts": await graph_db.delete_graph(graph_id, user_id=user_id)}
|
||||
@@ -840,9 +889,7 @@ async def update_graph(
|
||||
|
||||
if new_graph_version.is_active:
|
||||
# Keep the library agent up to date with the new active version
|
||||
await library_db.update_agent_version_in_library(
|
||||
user_id, graph.id, graph.version
|
||||
)
|
||||
await _update_library_agent_version_and_settings(user_id, new_graph_version)
|
||||
|
||||
# Handle activation of the new graph first to ensure continuity
|
||||
new_graph_version = await on_graph_activate(new_graph_version, user_id=user_id)
|
||||
@@ -883,7 +930,11 @@ async def set_graph_active_version(
|
||||
if not new_active_graph:
|
||||
raise HTTPException(404, f"Graph #{graph_id} v{new_active_version} not found")
|
||||
|
||||
current_active_graph = await graph_db.get_graph(graph_id, user_id=user_id)
|
||||
current_active_graph = await graph_db.get_graph(
|
||||
graph_id=graph_id,
|
||||
version=None,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
# Handle activation of the new graph first to ensure continuity
|
||||
await on_graph_activate(new_active_graph, user_id=user_id)
|
||||
@@ -895,15 +946,43 @@ async def set_graph_active_version(
|
||||
)
|
||||
|
||||
# Keep the library agent up to date with the new active version
|
||||
await library_db.update_agent_version_in_library(
|
||||
user_id, new_active_graph.id, new_active_graph.version
|
||||
)
|
||||
await _update_library_agent_version_and_settings(user_id, new_active_graph)
|
||||
|
||||
if current_active_graph and current_active_graph.version != new_active_version:
|
||||
# Handle deactivation of the previously active version
|
||||
await on_graph_deactivate(current_active_graph, user_id=user_id)
|
||||
|
||||
|
||||
@v1_router.patch(
|
||||
path="/graphs/{graph_id}/settings",
|
||||
summary="Update graph settings",
|
||||
tags=["graphs"],
|
||||
dependencies=[Security(requires_user)],
|
||||
)
|
||||
async def update_graph_settings(
|
||||
graph_id: str,
|
||||
settings: GraphSettings,
|
||||
user_id: Annotated[str, Security(get_user_id)],
|
||||
) -> GraphSettings:
|
||||
"""Update graph settings for the user's library agent."""
|
||||
# Get the library agent for this graph
|
||||
library_agent = await library_db.get_library_agent_by_graph_id(
|
||||
graph_id=graph_id, user_id=user_id
|
||||
)
|
||||
if not library_agent:
|
||||
raise HTTPException(404, f"Graph #{graph_id} not found in user's library")
|
||||
|
||||
# Update the library agent settings
|
||||
updated_agent = await library_db.update_library_agent_settings(
|
||||
user_id=user_id,
|
||||
agent_id=library_agent.id,
|
||||
settings=settings,
|
||||
)
|
||||
|
||||
# Return the updated settings
|
||||
return GraphSettings.model_validate(updated_agent.settings)
|
||||
|
||||
|
||||
@v1_router.post(
|
||||
path="/graphs/{graph_id}/execute/{graph_version}",
|
||||
summary="Execute graph agent",
|
||||
@@ -917,6 +996,7 @@ async def execute_graph(
|
||||
credentials_inputs: Annotated[
|
||||
dict[str, CredentialsMetaInput], Body(..., embed=True, default_factory=dict)
|
||||
],
|
||||
source: Annotated[GraphExecutionSource | None, Body(embed=True)] = None,
|
||||
graph_version: Optional[int] = None,
|
||||
preset_id: Optional[str] = None,
|
||||
) -> execution_db.GraphExecutionMeta:
|
||||
@@ -940,6 +1020,14 @@ async def execute_graph(
|
||||
# Record successful graph execution
|
||||
record_graph_execution(graph_id=graph_id, status="success", user_id=user_id)
|
||||
record_graph_operation(operation="execute", status="success")
|
||||
await increment_runs(user_id)
|
||||
await complete_re_run_agent(user_id, graph_id)
|
||||
if source == "library":
|
||||
await complete_onboarding_step(
|
||||
user_id, OnboardingStep.MARKETPLACE_RUN_AGENT
|
||||
)
|
||||
elif source == "builder":
|
||||
await complete_onboarding_step(user_id, OnboardingStep.BUILDER_RUN_AGENT)
|
||||
return result
|
||||
except GraphValidationError as e:
|
||||
# Record failed graph execution
|
||||
@@ -1053,6 +1141,15 @@ async def list_graph_executions(
|
||||
filtered_executions = await hide_activity_summaries_if_disabled(
|
||||
paginated_result.executions, user_id
|
||||
)
|
||||
onboarding = await get_user_onboarding(user_id)
|
||||
if (
|
||||
onboarding.onboardingAgentExecutionId
|
||||
and onboarding.onboardingAgentExecutionId
|
||||
in [exec.id for exec in filtered_executions]
|
||||
and OnboardingStep.GET_RESULTS not in onboarding.completedSteps
|
||||
):
|
||||
await complete_onboarding_step(user_id, OnboardingStep.GET_RESULTS)
|
||||
|
||||
return execution_db.GraphExecutionsPaginated(
|
||||
executions=filtered_executions, pagination=paginated_result.pagination
|
||||
)
|
||||
@@ -1069,24 +1166,33 @@ async def get_graph_execution(
|
||||
graph_exec_id: str,
|
||||
user_id: Annotated[str, Security(get_user_id)],
|
||||
) -> execution_db.GraphExecution | execution_db.GraphExecutionWithNodes:
|
||||
graph = await graph_db.get_graph(graph_id=graph_id, user_id=user_id)
|
||||
if not graph:
|
||||
raise HTTPException(
|
||||
status_code=HTTP_404_NOT_FOUND, detail=f"Graph #{graph_id} not found"
|
||||
)
|
||||
|
||||
result = await execution_db.get_graph_execution(
|
||||
user_id=user_id,
|
||||
execution_id=graph_exec_id,
|
||||
include_node_executions=graph.user_id == user_id,
|
||||
include_node_executions=True,
|
||||
)
|
||||
if not result or result.graph_id != graph_id:
|
||||
raise HTTPException(
|
||||
status_code=404, detail=f"Graph execution #{graph_exec_id} not found."
|
||||
)
|
||||
|
||||
if not await graph_db.get_graph(
|
||||
graph_id=result.graph_id,
|
||||
version=result.graph_version,
|
||||
user_id=user_id,
|
||||
):
|
||||
raise HTTPException(
|
||||
status_code=HTTP_404_NOT_FOUND, detail=f"Graph #{graph_id} not found"
|
||||
)
|
||||
|
||||
# Apply feature flags to filter out disabled features
|
||||
result = await hide_activity_summary_if_disabled(result, user_id)
|
||||
onboarding = await get_user_onboarding(user_id)
|
||||
if (
|
||||
onboarding.onboardingAgentExecutionId == graph_exec_id
|
||||
and OnboardingStep.GET_RESULTS not in onboarding.completedSteps
|
||||
):
|
||||
await complete_onboarding_step(user_id, OnboardingStep.GET_RESULTS)
|
||||
|
||||
return result
|
||||
|
||||
@@ -1263,6 +1369,8 @@ async def create_graph_execution_schedule(
|
||||
result.next_run_time, user_timezone
|
||||
)
|
||||
|
||||
await complete_onboarding_step(user_id, OnboardingStep.SCHEDULE_AGENT)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
|
||||
@@ -7,6 +7,11 @@ from autogpt_libs.auth import get_user_id, requires_admin_user
|
||||
from fastapi import APIRouter, HTTPException, Security
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from backend.blocks.llm import LlmModel
|
||||
from backend.data.analytics import (
|
||||
AccuracyTrendsResponse,
|
||||
get_accuracy_trends_and_alerts,
|
||||
)
|
||||
from backend.data.execution import (
|
||||
ExecutionStatus,
|
||||
GraphExecutionMeta,
|
||||
@@ -15,6 +20,8 @@ from backend.data.execution import (
|
||||
)
|
||||
from backend.data.model import GraphExecutionStats
|
||||
from backend.executor.activity_status_generator import (
|
||||
DEFAULT_SYSTEM_PROMPT,
|
||||
DEFAULT_USER_PROMPT,
|
||||
generate_activity_status_for_execution,
|
||||
)
|
||||
from backend.executor.manager import get_db_async_client
|
||||
@@ -30,12 +37,21 @@ class ExecutionAnalyticsRequest(BaseModel):
|
||||
created_after: Optional[datetime] = Field(
|
||||
None, description="Optional created date lower bound"
|
||||
)
|
||||
model_name: Optional[str] = Field(
|
||||
"gpt-4o-mini", description="Model to use for generation"
|
||||
)
|
||||
model_name: str = Field("gpt-4o-mini", description="Model to use for generation")
|
||||
batch_size: int = Field(
|
||||
10, description="Batch size for concurrent processing", le=25, ge=1
|
||||
)
|
||||
system_prompt: Optional[str] = Field(
|
||||
None, description="Custom system prompt (default: built-in prompt)"
|
||||
)
|
||||
user_prompt: Optional[str] = Field(
|
||||
None,
|
||||
description="Custom user prompt with {{GRAPH_NAME}} and {{EXECUTION_DATA}} placeholders (default: built-in prompt)",
|
||||
)
|
||||
skip_existing: bool = Field(
|
||||
True,
|
||||
description="Whether to skip executions that already have activity status and correctness score",
|
||||
)
|
||||
|
||||
|
||||
class ExecutionAnalyticsResult(BaseModel):
|
||||
@@ -58,6 +74,31 @@ class ExecutionAnalyticsResponse(BaseModel):
|
||||
results: list[ExecutionAnalyticsResult]
|
||||
|
||||
|
||||
class ModelInfo(BaseModel):
|
||||
value: str
|
||||
label: str
|
||||
provider: str
|
||||
|
||||
|
||||
class ExecutionAnalyticsConfig(BaseModel):
|
||||
available_models: list[ModelInfo]
|
||||
default_system_prompt: str
|
||||
default_user_prompt: str
|
||||
recommended_model: str
|
||||
|
||||
|
||||
class AccuracyTrendsRequest(BaseModel):
|
||||
graph_id: str = Field(..., description="Graph ID to analyze", min_length=1)
|
||||
user_id: Optional[str] = Field(None, description="Optional user ID filter")
|
||||
days_back: int = Field(30, description="Number of days to look back", ge=7, le=90)
|
||||
drop_threshold: float = Field(
|
||||
10.0, description="Alert threshold percentage", ge=1.0, le=50.0
|
||||
)
|
||||
include_historical: bool = Field(
|
||||
False, description="Include historical data for charts"
|
||||
)
|
||||
|
||||
|
||||
router = APIRouter(
|
||||
prefix="/admin",
|
||||
tags=["admin", "execution_analytics"],
|
||||
@@ -65,6 +106,100 @@ router = APIRouter(
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/execution_analytics/config",
|
||||
response_model=ExecutionAnalyticsConfig,
|
||||
summary="Get Execution Analytics Configuration",
|
||||
)
|
||||
async def get_execution_analytics_config(
|
||||
admin_user_id: str = Security(get_user_id),
|
||||
):
|
||||
"""
|
||||
Get the configuration for execution analytics including:
|
||||
- Available AI models with metadata
|
||||
- Default system and user prompts
|
||||
- Recommended model selection
|
||||
"""
|
||||
logger.info(f"Admin user {admin_user_id} requesting execution analytics config")
|
||||
|
||||
# Generate model list from LlmModel enum with provider information
|
||||
available_models = []
|
||||
|
||||
# Function to generate friendly display names from model values
|
||||
def generate_model_label(model: LlmModel) -> str:
|
||||
"""Generate a user-friendly label from the model enum value."""
|
||||
value = model.value
|
||||
|
||||
# For all models, convert underscores/hyphens to spaces and title case
|
||||
# e.g., "gpt-4-turbo" -> "GPT 4 Turbo", "claude-3-haiku-20240307" -> "Claude 3 Haiku"
|
||||
parts = value.replace("_", "-").split("-")
|
||||
|
||||
# Handle provider prefixes (e.g., "google/", "x-ai/")
|
||||
if "/" in value:
|
||||
_, model_name = value.split("/", 1)
|
||||
parts = model_name.replace("_", "-").split("-")
|
||||
|
||||
# Capitalize and format parts
|
||||
formatted_parts = []
|
||||
for part in parts:
|
||||
# Skip date-like patterns - check for various date formats:
|
||||
# - Long dates like "20240307" (8 digits)
|
||||
# - Year components like "2024", "2025" (4 digit years >= 2020)
|
||||
# - Month/day components like "04", "16" when they appear to be dates
|
||||
if part.isdigit():
|
||||
if len(part) >= 8: # Long date format like "20240307"
|
||||
continue
|
||||
elif len(part) == 4 and int(part) >= 2020: # Year like "2024", "2025"
|
||||
continue
|
||||
elif len(part) <= 2 and int(part) <= 31: # Month/day like "04", "16"
|
||||
# Skip if this looks like a date component (basic heuristic)
|
||||
continue
|
||||
# Keep version numbers as-is
|
||||
if part.replace(".", "").isdigit():
|
||||
formatted_parts.append(part)
|
||||
# Capitalize normal words
|
||||
else:
|
||||
formatted_parts.append(
|
||||
part.upper()
|
||||
if part.upper() in ["GPT", "LLM", "API", "V0"]
|
||||
else part.capitalize()
|
||||
)
|
||||
|
||||
model_name = " ".join(formatted_parts)
|
||||
|
||||
# Format provider name for better display
|
||||
provider_name = model.provider.replace("_", " ").title()
|
||||
|
||||
# Return with provider prefix for clarity
|
||||
return f"{provider_name}: {model_name}"
|
||||
|
||||
# Include all LlmModel values (no more filtering by hardcoded list)
|
||||
recommended_model = LlmModel.GPT4O_MINI.value
|
||||
for model in LlmModel:
|
||||
label = generate_model_label(model)
|
||||
# Add "(Recommended)" suffix to the recommended model
|
||||
if model.value == recommended_model:
|
||||
label += " (Recommended)"
|
||||
|
||||
available_models.append(
|
||||
ModelInfo(
|
||||
value=model.value,
|
||||
label=label,
|
||||
provider=model.provider,
|
||||
)
|
||||
)
|
||||
|
||||
# Sort models by provider and name for better UX
|
||||
available_models.sort(key=lambda x: (x.provider, x.label))
|
||||
|
||||
return ExecutionAnalyticsConfig(
|
||||
available_models=available_models,
|
||||
default_system_prompt=DEFAULT_SYSTEM_PROMPT,
|
||||
default_user_prompt=DEFAULT_USER_PROMPT,
|
||||
recommended_model=recommended_model,
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/execution_analytics",
|
||||
response_model=ExecutionAnalyticsResponse,
|
||||
@@ -100,6 +235,7 @@ async def generate_execution_analytics(
|
||||
# Fetch executions to process
|
||||
executions = await get_graph_executions(
|
||||
graph_id=request.graph_id,
|
||||
graph_version=request.graph_version,
|
||||
user_id=request.user_id,
|
||||
created_time_gte=request.created_after,
|
||||
statuses=[
|
||||
@@ -113,21 +249,20 @@ async def generate_execution_analytics(
|
||||
f"Found {len(executions)} total executions for graph {request.graph_id}"
|
||||
)
|
||||
|
||||
# Filter executions that need analytics generation (missing activity_status or correctness_score)
|
||||
# Filter executions that need analytics generation
|
||||
executions_to_process = []
|
||||
for execution in executions:
|
||||
# Skip if we should skip existing analytics and both activity_status and correctness_score exist
|
||||
if (
|
||||
not execution.stats
|
||||
or not execution.stats.activity_status
|
||||
or execution.stats.correctness_score is None
|
||||
request.skip_existing
|
||||
and execution.stats
|
||||
and execution.stats.activity_status
|
||||
and execution.stats.correctness_score is not None
|
||||
):
|
||||
continue
|
||||
|
||||
# If version is specified, filter by it
|
||||
if (
|
||||
request.graph_version is None
|
||||
or execution.graph_version == request.graph_version
|
||||
):
|
||||
executions_to_process.append(execution)
|
||||
# Add execution to processing list
|
||||
executions_to_process.append(execution)
|
||||
|
||||
logger.info(
|
||||
f"Found {len(executions_to_process)} executions needing analytics generation"
|
||||
@@ -152,9 +287,7 @@ async def generate_execution_analytics(
|
||||
f"Processing batch {batch_idx + 1}/{total_batches} with {len(batch)} executions"
|
||||
)
|
||||
|
||||
batch_results = await _process_batch(
|
||||
batch, request.model_name or "gpt-4o-mini", db_client
|
||||
)
|
||||
batch_results = await _process_batch(batch, request, db_client)
|
||||
|
||||
for result in batch_results:
|
||||
results.append(result)
|
||||
@@ -212,7 +345,7 @@ async def generate_execution_analytics(
|
||||
|
||||
|
||||
async def _process_batch(
|
||||
executions, model_name: str, db_client
|
||||
executions, request: ExecutionAnalyticsRequest, db_client
|
||||
) -> list[ExecutionAnalyticsResult]:
|
||||
"""Process a batch of executions concurrently."""
|
||||
|
||||
@@ -237,8 +370,11 @@ async def _process_batch(
|
||||
db_client=db_client,
|
||||
user_id=execution.user_id,
|
||||
execution_status=execution.status,
|
||||
model_name=model_name, # Pass model name parameter
|
||||
model_name=request.model_name,
|
||||
skip_feature_flag=True, # Admin endpoint bypasses feature flags
|
||||
system_prompt=request.system_prompt or DEFAULT_SYSTEM_PROMPT,
|
||||
user_prompt=request.user_prompt or DEFAULT_USER_PROMPT,
|
||||
skip_existing=request.skip_existing,
|
||||
)
|
||||
|
||||
if not activity_response:
|
||||
@@ -299,3 +435,40 @@ async def _process_batch(
|
||||
return await asyncio.gather(
|
||||
*[process_single_execution(execution) for execution in executions]
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/execution_accuracy_trends",
|
||||
response_model=AccuracyTrendsResponse,
|
||||
summary="Get Execution Accuracy Trends and Alerts",
|
||||
)
|
||||
async def get_execution_accuracy_trends(
|
||||
graph_id: str,
|
||||
user_id: Optional[str] = None,
|
||||
days_back: int = 30,
|
||||
drop_threshold: float = 10.0,
|
||||
include_historical: bool = False,
|
||||
admin_user_id: str = Security(get_user_id),
|
||||
) -> AccuracyTrendsResponse:
|
||||
"""
|
||||
Get execution accuracy trends with moving averages and alert detection.
|
||||
Simple single-query approach.
|
||||
"""
|
||||
logger.info(
|
||||
f"Admin user {admin_user_id} requesting accuracy trends for graph {graph_id}"
|
||||
)
|
||||
|
||||
try:
|
||||
result = await get_accuracy_trends_and_alerts(
|
||||
graph_id=graph_id,
|
||||
days_back=days_back,
|
||||
user_id=user_id,
|
||||
drop_threshold=drop_threshold,
|
||||
include_historical=include_historical,
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(f"Error getting accuracy trends for graph {graph_id}: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@@ -1,29 +1,56 @@
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from difflib import SequenceMatcher
|
||||
from typing import Sequence
|
||||
|
||||
import prisma
|
||||
|
||||
import backend.data.block
|
||||
import backend.server.v2.library.db as library_db
|
||||
import backend.server.v2.library.model as library_model
|
||||
import backend.server.v2.store.db as store_db
|
||||
import backend.server.v2.store.model as store_model
|
||||
from backend.blocks import load_all_blocks
|
||||
from backend.blocks.llm import LlmModel
|
||||
from backend.data.block import AnyBlockSchema, BlockCategory, BlockInfo, BlockSchema
|
||||
from backend.data.db import query_raw_with_schema
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.server.v2.builder.model import (
|
||||
BlockCategoryResponse,
|
||||
BlockResponse,
|
||||
BlockType,
|
||||
CountResponse,
|
||||
FilterType,
|
||||
Provider,
|
||||
ProviderResponse,
|
||||
SearchBlocksResponse,
|
||||
SearchEntry,
|
||||
)
|
||||
from backend.util.cache import cached
|
||||
from backend.util.models import Pagination
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
llm_models = [name.name.lower().replace("_", " ") for name in LlmModel]
|
||||
_static_counts_cache: dict | None = None
|
||||
_suggested_blocks: list[BlockInfo] | None = None
|
||||
|
||||
MAX_LIBRARY_AGENT_RESULTS = 100
|
||||
MAX_MARKETPLACE_AGENT_RESULTS = 100
|
||||
MIN_SCORE_FOR_FILTERED_RESULTS = 10.0
|
||||
|
||||
SearchResultItem = BlockInfo | library_model.LibraryAgent | store_model.StoreAgent
|
||||
|
||||
|
||||
@dataclass
|
||||
class _ScoredItem:
|
||||
item: SearchResultItem
|
||||
filter_type: FilterType
|
||||
score: float
|
||||
sort_key: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class _SearchCacheEntry:
|
||||
items: list[SearchResultItem]
|
||||
total_items: dict[FilterType, int]
|
||||
|
||||
|
||||
def get_block_categories(category_blocks: int = 3) -> list[BlockCategoryResponse]:
|
||||
@@ -129,71 +156,244 @@ def get_block_by_id(block_id: str) -> BlockInfo | None:
|
||||
return None
|
||||
|
||||
|
||||
def search_blocks(
|
||||
include_blocks: bool = True,
|
||||
include_integrations: bool = True,
|
||||
query: str = "",
|
||||
page: int = 1,
|
||||
page_size: int = 50,
|
||||
) -> SearchBlocksResponse:
|
||||
async def update_search(user_id: str, search: SearchEntry) -> str:
|
||||
"""
|
||||
Get blocks based on the filter and query.
|
||||
`providers` only applies for `integrations` filter.
|
||||
Upsert a search request for the user and return the search ID.
|
||||
"""
|
||||
blocks: list[AnyBlockSchema] = []
|
||||
query = query.lower()
|
||||
if search.search_id:
|
||||
# Update existing search
|
||||
await prisma.models.BuilderSearchHistory.prisma().update(
|
||||
where={
|
||||
"id": search.search_id,
|
||||
},
|
||||
data={
|
||||
"searchQuery": search.search_query or "",
|
||||
"filter": search.filter or [], # type: ignore
|
||||
"byCreator": search.by_creator or [],
|
||||
},
|
||||
)
|
||||
return search.search_id
|
||||
else:
|
||||
# Create new search
|
||||
new_search = await prisma.models.BuilderSearchHistory.prisma().create(
|
||||
data={
|
||||
"userId": user_id,
|
||||
"searchQuery": search.search_query or "",
|
||||
"filter": search.filter or [], # type: ignore
|
||||
"byCreator": search.by_creator or [],
|
||||
}
|
||||
)
|
||||
return new_search.id
|
||||
|
||||
total = 0
|
||||
skip = (page - 1) * page_size
|
||||
take = page_size
|
||||
|
||||
async def get_recent_searches(user_id: str, limit: int = 5) -> list[SearchEntry]:
|
||||
"""
|
||||
Get the user's most recent search requests.
|
||||
"""
|
||||
searches = await prisma.models.BuilderSearchHistory.prisma().find_many(
|
||||
where={
|
||||
"userId": user_id,
|
||||
},
|
||||
order={
|
||||
"updatedAt": "desc",
|
||||
},
|
||||
take=limit,
|
||||
)
|
||||
return [
|
||||
SearchEntry(
|
||||
search_query=s.searchQuery,
|
||||
filter=s.filter, # type: ignore
|
||||
by_creator=s.byCreator,
|
||||
search_id=s.id,
|
||||
)
|
||||
for s in searches
|
||||
]
|
||||
|
||||
|
||||
async def get_sorted_search_results(
|
||||
*,
|
||||
user_id: str,
|
||||
search_query: str | None,
|
||||
filters: Sequence[FilterType],
|
||||
by_creator: Sequence[str] | None = None,
|
||||
) -> _SearchCacheEntry:
|
||||
normalized_filters: tuple[FilterType, ...] = tuple(sorted(set(filters or [])))
|
||||
normalized_creators: tuple[str, ...] = tuple(sorted(set(by_creator or [])))
|
||||
return await _build_cached_search_results(
|
||||
user_id=user_id,
|
||||
search_query=search_query or "",
|
||||
filters=normalized_filters,
|
||||
by_creator=normalized_creators,
|
||||
)
|
||||
|
||||
|
||||
@cached(ttl_seconds=300, shared_cache=True)
|
||||
async def _build_cached_search_results(
|
||||
user_id: str,
|
||||
search_query: str,
|
||||
filters: tuple[FilterType, ...],
|
||||
by_creator: tuple[str, ...],
|
||||
) -> _SearchCacheEntry:
|
||||
normalized_query = (search_query or "").strip().lower()
|
||||
|
||||
include_blocks = "blocks" in filters
|
||||
include_integrations = "integrations" in filters
|
||||
include_library_agents = "my_agents" in filters
|
||||
include_marketplace_agents = "marketplace_agents" in filters
|
||||
|
||||
scored_items: list[_ScoredItem] = []
|
||||
total_items: dict[FilterType, int] = {
|
||||
"blocks": 0,
|
||||
"integrations": 0,
|
||||
"marketplace_agents": 0,
|
||||
"my_agents": 0,
|
||||
}
|
||||
|
||||
block_results, block_total, integration_total = _collect_block_results(
|
||||
normalized_query=normalized_query,
|
||||
include_blocks=include_blocks,
|
||||
include_integrations=include_integrations,
|
||||
)
|
||||
scored_items.extend(block_results)
|
||||
total_items["blocks"] = block_total
|
||||
total_items["integrations"] = integration_total
|
||||
|
||||
if include_library_agents:
|
||||
library_response = await library_db.list_library_agents(
|
||||
user_id=user_id,
|
||||
search_term=search_query or None,
|
||||
page=1,
|
||||
page_size=MAX_LIBRARY_AGENT_RESULTS,
|
||||
)
|
||||
total_items["my_agents"] = library_response.pagination.total_items
|
||||
scored_items.extend(
|
||||
_build_library_items(
|
||||
agents=library_response.agents,
|
||||
normalized_query=normalized_query,
|
||||
)
|
||||
)
|
||||
|
||||
if include_marketplace_agents:
|
||||
marketplace_response = await store_db.get_store_agents(
|
||||
creators=list(by_creator) or None,
|
||||
search_query=search_query or None,
|
||||
page=1,
|
||||
page_size=MAX_MARKETPLACE_AGENT_RESULTS,
|
||||
)
|
||||
total_items["marketplace_agents"] = marketplace_response.pagination.total_items
|
||||
scored_items.extend(
|
||||
_build_marketplace_items(
|
||||
agents=marketplace_response.agents,
|
||||
normalized_query=normalized_query,
|
||||
)
|
||||
)
|
||||
|
||||
sorted_items = sorted(
|
||||
scored_items,
|
||||
key=lambda entry: (-entry.score, entry.sort_key, entry.filter_type),
|
||||
)
|
||||
|
||||
return _SearchCacheEntry(
|
||||
items=[entry.item for entry in sorted_items],
|
||||
total_items=total_items,
|
||||
)
|
||||
|
||||
|
||||
def _collect_block_results(
|
||||
*,
|
||||
normalized_query: str,
|
||||
include_blocks: bool,
|
||||
include_integrations: bool,
|
||||
) -> tuple[list[_ScoredItem], int, int]:
|
||||
results: list[_ScoredItem] = []
|
||||
block_count = 0
|
||||
integration_count = 0
|
||||
|
||||
if not include_blocks and not include_integrations:
|
||||
return results, block_count, integration_count
|
||||
|
||||
for block_type in load_all_blocks().values():
|
||||
block: AnyBlockSchema = block_type()
|
||||
# Skip disabled blocks
|
||||
if block.disabled:
|
||||
continue
|
||||
# Skip blocks that don't match the query
|
||||
if (
|
||||
query not in block.name.lower()
|
||||
and query not in block.description.lower()
|
||||
and not _matches_llm_model(block.input_schema, query)
|
||||
):
|
||||
continue
|
||||
keep = False
|
||||
|
||||
block_info = block.get_info()
|
||||
credentials = list(block.input_schema.get_credentials_fields().values())
|
||||
if include_integrations and len(credentials) > 0:
|
||||
keep = True
|
||||
is_integration = len(credentials) > 0
|
||||
|
||||
if is_integration and not include_integrations:
|
||||
continue
|
||||
if not is_integration and not include_blocks:
|
||||
continue
|
||||
|
||||
score = _score_block(block, block_info, normalized_query)
|
||||
if not _should_include_item(score, normalized_query):
|
||||
continue
|
||||
|
||||
filter_type: FilterType = "integrations" if is_integration else "blocks"
|
||||
if is_integration:
|
||||
integration_count += 1
|
||||
if include_blocks and len(credentials) == 0:
|
||||
keep = True
|
||||
else:
|
||||
block_count += 1
|
||||
|
||||
if not keep:
|
||||
results.append(
|
||||
_ScoredItem(
|
||||
item=block_info,
|
||||
filter_type=filter_type,
|
||||
score=score,
|
||||
sort_key=_get_item_name(block_info),
|
||||
)
|
||||
)
|
||||
|
||||
return results, block_count, integration_count
|
||||
|
||||
|
||||
def _build_library_items(
|
||||
*,
|
||||
agents: list[library_model.LibraryAgent],
|
||||
normalized_query: str,
|
||||
) -> list[_ScoredItem]:
|
||||
results: list[_ScoredItem] = []
|
||||
|
||||
for agent in agents:
|
||||
score = _score_library_agent(agent, normalized_query)
|
||||
if not _should_include_item(score, normalized_query):
|
||||
continue
|
||||
|
||||
total += 1
|
||||
if skip > 0:
|
||||
skip -= 1
|
||||
continue
|
||||
if take > 0:
|
||||
take -= 1
|
||||
blocks.append(block)
|
||||
results.append(
|
||||
_ScoredItem(
|
||||
item=agent,
|
||||
filter_type="my_agents",
|
||||
score=score,
|
||||
sort_key=_get_item_name(agent),
|
||||
)
|
||||
)
|
||||
|
||||
return SearchBlocksResponse(
|
||||
blocks=BlockResponse(
|
||||
blocks=[b.get_info() for b in blocks],
|
||||
pagination=Pagination(
|
||||
total_items=total,
|
||||
total_pages=(total + page_size - 1) // page_size,
|
||||
current_page=page,
|
||||
page_size=page_size,
|
||||
),
|
||||
),
|
||||
total_block_count=block_count,
|
||||
total_integration_count=integration_count,
|
||||
)
|
||||
return results
|
||||
|
||||
|
||||
def _build_marketplace_items(
|
||||
*,
|
||||
agents: list[store_model.StoreAgent],
|
||||
normalized_query: str,
|
||||
) -> list[_ScoredItem]:
|
||||
results: list[_ScoredItem] = []
|
||||
|
||||
for agent in agents:
|
||||
score = _score_store_agent(agent, normalized_query)
|
||||
if not _should_include_item(score, normalized_query):
|
||||
continue
|
||||
|
||||
results.append(
|
||||
_ScoredItem(
|
||||
item=agent,
|
||||
filter_type="marketplace_agents",
|
||||
score=score,
|
||||
sort_key=_get_item_name(agent),
|
||||
)
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def get_providers(
|
||||
@@ -250,16 +450,12 @@ async def get_counts(user_id: str) -> CountResponse:
|
||||
)
|
||||
|
||||
|
||||
@cached(ttl_seconds=3600)
|
||||
async def _get_static_counts():
|
||||
"""
|
||||
Get counts of blocks, integrations, and marketplace agents.
|
||||
This is cached to avoid unnecessary database queries and calculations.
|
||||
Can't use functools.cache here because the function is async.
|
||||
"""
|
||||
global _static_counts_cache
|
||||
if _static_counts_cache is not None:
|
||||
return _static_counts_cache
|
||||
|
||||
all_blocks = 0
|
||||
input_blocks = 0
|
||||
action_blocks = 0
|
||||
@@ -286,7 +482,7 @@ async def _get_static_counts():
|
||||
|
||||
marketplace_agents = await prisma.models.StoreAgent.prisma().count()
|
||||
|
||||
_static_counts_cache = {
|
||||
return {
|
||||
"all_blocks": all_blocks,
|
||||
"input_blocks": input_blocks,
|
||||
"action_blocks": action_blocks,
|
||||
@@ -295,8 +491,6 @@ async def _get_static_counts():
|
||||
"marketplace_agents": marketplace_agents,
|
||||
}
|
||||
|
||||
return _static_counts_cache
|
||||
|
||||
|
||||
def _matches_llm_model(schema_cls: type[BlockSchema], query: str) -> bool:
|
||||
for field in schema_cls.model_fields.values():
|
||||
@@ -307,6 +501,123 @@ def _matches_llm_model(schema_cls: type[BlockSchema], query: str) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
def _score_block(
|
||||
block: AnyBlockSchema,
|
||||
block_info: BlockInfo,
|
||||
normalized_query: str,
|
||||
) -> float:
|
||||
if not normalized_query:
|
||||
return 0.0
|
||||
|
||||
name = block_info.name.lower()
|
||||
description = block_info.description.lower()
|
||||
score = _score_primary_fields(name, description, normalized_query)
|
||||
|
||||
category_text = " ".join(
|
||||
category.get("category", "").lower() for category in block_info.categories
|
||||
)
|
||||
score += _score_additional_field(category_text, normalized_query, 12, 6)
|
||||
|
||||
credentials_info = block.input_schema.get_credentials_fields_info().values()
|
||||
provider_names = [
|
||||
provider.value.lower()
|
||||
for info in credentials_info
|
||||
for provider in info.provider
|
||||
]
|
||||
provider_text = " ".join(provider_names)
|
||||
score += _score_additional_field(provider_text, normalized_query, 15, 6)
|
||||
|
||||
if _matches_llm_model(block.input_schema, normalized_query):
|
||||
score += 20
|
||||
|
||||
return score
|
||||
|
||||
|
||||
def _score_library_agent(
|
||||
agent: library_model.LibraryAgent,
|
||||
normalized_query: str,
|
||||
) -> float:
|
||||
if not normalized_query:
|
||||
return 0.0
|
||||
|
||||
name = agent.name.lower()
|
||||
description = (agent.description or "").lower()
|
||||
instructions = (agent.instructions or "").lower()
|
||||
|
||||
score = _score_primary_fields(name, description, normalized_query)
|
||||
score += _score_additional_field(instructions, normalized_query, 15, 6)
|
||||
score += _score_additional_field(
|
||||
agent.creator_name.lower(), normalized_query, 10, 5
|
||||
)
|
||||
|
||||
return score
|
||||
|
||||
|
||||
def _score_store_agent(
|
||||
agent: store_model.StoreAgent,
|
||||
normalized_query: str,
|
||||
) -> float:
|
||||
if not normalized_query:
|
||||
return 0.0
|
||||
|
||||
name = agent.agent_name.lower()
|
||||
description = agent.description.lower()
|
||||
sub_heading = agent.sub_heading.lower()
|
||||
|
||||
score = _score_primary_fields(name, description, normalized_query)
|
||||
score += _score_additional_field(sub_heading, normalized_query, 12, 6)
|
||||
score += _score_additional_field(agent.creator.lower(), normalized_query, 10, 5)
|
||||
|
||||
return score
|
||||
|
||||
|
||||
def _score_primary_fields(name: str, description: str, query: str) -> float:
|
||||
score = 0.0
|
||||
if name == query:
|
||||
score += 120
|
||||
elif name.startswith(query):
|
||||
score += 90
|
||||
elif query in name:
|
||||
score += 60
|
||||
|
||||
score += SequenceMatcher(None, name, query).ratio() * 50
|
||||
if description:
|
||||
if query in description:
|
||||
score += 30
|
||||
score += SequenceMatcher(None, description, query).ratio() * 25
|
||||
return score
|
||||
|
||||
|
||||
def _score_additional_field(
|
||||
value: str,
|
||||
query: str,
|
||||
contains_weight: float,
|
||||
similarity_weight: float,
|
||||
) -> float:
|
||||
if not value or not query:
|
||||
return 0.0
|
||||
|
||||
score = 0.0
|
||||
if query in value:
|
||||
score += contains_weight
|
||||
score += SequenceMatcher(None, value, query).ratio() * similarity_weight
|
||||
return score
|
||||
|
||||
|
||||
def _should_include_item(score: float, normalized_query: str) -> bool:
|
||||
if not normalized_query:
|
||||
return True
|
||||
return score >= MIN_SCORE_FOR_FILTERED_RESULTS
|
||||
|
||||
|
||||
def _get_item_name(item: SearchResultItem) -> str:
|
||||
if isinstance(item, BlockInfo):
|
||||
return item.name.lower()
|
||||
if isinstance(item, library_model.LibraryAgent):
|
||||
return item.name.lower()
|
||||
return item.agent_name.lower()
|
||||
|
||||
|
||||
@cached(ttl_seconds=3600)
|
||||
def _get_all_providers() -> dict[ProviderName, Provider]:
|
||||
providers: dict[ProviderName, Provider] = {}
|
||||
@@ -328,25 +639,21 @@ def _get_all_providers() -> dict[ProviderName, Provider]:
|
||||
return providers
|
||||
|
||||
|
||||
@cached(ttl_seconds=3600)
|
||||
async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]:
|
||||
global _suggested_blocks
|
||||
|
||||
if _suggested_blocks is not None and len(_suggested_blocks) >= count:
|
||||
return _suggested_blocks[:count]
|
||||
|
||||
_suggested_blocks = []
|
||||
suggested_blocks = []
|
||||
# Sum the number of executions for each block type
|
||||
# Prisma cannot group by nested relations, so we do a raw query
|
||||
# Calculate the cutoff timestamp
|
||||
timestamp_threshold = datetime.now(timezone.utc) - timedelta(days=30)
|
||||
|
||||
results = await prisma.get_client().query_raw(
|
||||
results = await query_raw_with_schema(
|
||||
"""
|
||||
SELECT
|
||||
agent_node."agentBlockId" AS block_id,
|
||||
COUNT(execution.id) AS execution_count
|
||||
FROM "AgentNodeExecution" execution
|
||||
JOIN "AgentNode" agent_node ON execution."agentNodeId" = agent_node.id
|
||||
FROM {schema_prefix}"AgentNodeExecution" execution
|
||||
JOIN {schema_prefix}"AgentNode" agent_node ON execution."agentNodeId" = agent_node.id
|
||||
WHERE execution."endedTime" >= $1::timestamp
|
||||
GROUP BY agent_node."agentBlockId"
|
||||
ORDER BY execution_count DESC;
|
||||
@@ -375,7 +682,7 @@ async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]:
|
||||
# Sort blocks by execution count
|
||||
blocks.sort(key=lambda x: x[1], reverse=True)
|
||||
|
||||
_suggested_blocks = [block[0] for block in blocks]
|
||||
suggested_blocks = [block[0] for block in blocks]
|
||||
|
||||
# Return the top blocks
|
||||
return _suggested_blocks[:count]
|
||||
return suggested_blocks[:count]
|
||||
|
||||
@@ -18,10 +18,17 @@ FilterType = Literal[
|
||||
BlockType = Literal["all", "input", "action", "output"]
|
||||
|
||||
|
||||
class SearchEntry(BaseModel):
|
||||
search_query: str | None = None
|
||||
filter: list[FilterType] | None = None
|
||||
by_creator: list[str] | None = None
|
||||
search_id: str | None = None
|
||||
|
||||
|
||||
# Suggestions
|
||||
class SuggestionsResponse(BaseModel):
|
||||
otto_suggestions: list[str]
|
||||
recent_searches: list[str]
|
||||
recent_searches: list[SearchEntry]
|
||||
providers: list[ProviderName]
|
||||
top_blocks: list[BlockInfo]
|
||||
|
||||
@@ -32,7 +39,7 @@ class BlockCategoryResponse(BaseModel):
|
||||
total_blocks: int
|
||||
blocks: list[BlockInfo]
|
||||
|
||||
model_config = {"use_enum_values": False} # <== use enum names like "AI"
|
||||
model_config = {"use_enum_values": False} # Use enum names like "AI"
|
||||
|
||||
|
||||
# Input/Action/Output and see all for block categories
|
||||
@@ -53,17 +60,11 @@ class ProviderResponse(BaseModel):
|
||||
pagination: Pagination
|
||||
|
||||
|
||||
class SearchBlocksResponse(BaseModel):
|
||||
blocks: BlockResponse
|
||||
total_block_count: int
|
||||
total_integration_count: int
|
||||
|
||||
|
||||
class SearchResponse(BaseModel):
|
||||
items: list[BlockInfo | library_model.LibraryAgent | store_model.StoreAgent]
|
||||
search_id: str
|
||||
total_items: dict[FilterType, int]
|
||||
page: int
|
||||
more_pages: bool
|
||||
pagination: Pagination
|
||||
|
||||
|
||||
class CountResponse(BaseModel):
|
||||
|
||||
@@ -6,10 +6,6 @@ from autogpt_libs.auth.dependencies import get_user_id, requires_user
|
||||
|
||||
import backend.server.v2.builder.db as builder_db
|
||||
import backend.server.v2.builder.model as builder_model
|
||||
import backend.server.v2.library.db as library_db
|
||||
import backend.server.v2.library.model as library_model
|
||||
import backend.server.v2.store.db as store_db
|
||||
import backend.server.v2.store.model as store_model
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.models import Pagination
|
||||
|
||||
@@ -45,7 +41,9 @@ def sanitize_query(query: str | None) -> str | None:
|
||||
summary="Get Builder suggestions",
|
||||
response_model=builder_model.SuggestionsResponse,
|
||||
)
|
||||
async def get_suggestions() -> builder_model.SuggestionsResponse:
|
||||
async def get_suggestions(
|
||||
user_id: Annotated[str, fastapi.Security(get_user_id)],
|
||||
) -> builder_model.SuggestionsResponse:
|
||||
"""
|
||||
Get all suggestions for the Blocks Menu.
|
||||
"""
|
||||
@@ -55,11 +53,7 @@ async def get_suggestions() -> builder_model.SuggestionsResponse:
|
||||
"Help me create a list",
|
||||
"Help me feed my data to Google Maps",
|
||||
],
|
||||
recent_searches=[
|
||||
"image generation",
|
||||
"deepfake",
|
||||
"competitor analysis",
|
||||
],
|
||||
recent_searches=await builder_db.get_recent_searches(user_id),
|
||||
providers=[
|
||||
ProviderName.TWITTER,
|
||||
ProviderName.GITHUB,
|
||||
@@ -147,7 +141,6 @@ async def get_providers(
|
||||
)
|
||||
|
||||
|
||||
# Not using post method because on frontend, orval doesn't support Infinite Query with POST method.
|
||||
@router.get(
|
||||
"/search",
|
||||
summary="Builder search",
|
||||
@@ -157,7 +150,7 @@ async def get_providers(
|
||||
async def search(
|
||||
user_id: Annotated[str, fastapi.Security(get_user_id)],
|
||||
search_query: Annotated[str | None, fastapi.Query()] = None,
|
||||
filter: Annotated[list[str] | None, fastapi.Query()] = None,
|
||||
filter: Annotated[list[builder_model.FilterType] | None, fastapi.Query()] = None,
|
||||
search_id: Annotated[str | None, fastapi.Query()] = None,
|
||||
by_creator: Annotated[list[str] | None, fastapi.Query()] = None,
|
||||
page: Annotated[int, fastapi.Query()] = 1,
|
||||
@@ -176,69 +169,43 @@ async def search(
|
||||
]
|
||||
search_query = sanitize_query(search_query)
|
||||
|
||||
# Blocks&Integrations
|
||||
blocks = builder_model.SearchBlocksResponse(
|
||||
blocks=builder_model.BlockResponse(
|
||||
blocks=[],
|
||||
pagination=Pagination.empty(),
|
||||
),
|
||||
total_block_count=0,
|
||||
total_integration_count=0,
|
||||
# Get all possible results
|
||||
cached_results = await builder_db.get_sorted_search_results(
|
||||
user_id=user_id,
|
||||
search_query=search_query,
|
||||
filters=filter,
|
||||
by_creator=by_creator,
|
||||
)
|
||||
if "blocks" in filter or "integrations" in filter:
|
||||
blocks = builder_db.search_blocks(
|
||||
include_blocks="blocks" in filter,
|
||||
include_integrations="integrations" in filter,
|
||||
query=search_query or "",
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
|
||||
# Library Agents
|
||||
my_agents = library_model.LibraryAgentResponse(
|
||||
agents=[],
|
||||
pagination=Pagination.empty(),
|
||||
# Paginate results
|
||||
total_combined_items = len(cached_results.items)
|
||||
pagination = Pagination(
|
||||
total_items=total_combined_items,
|
||||
total_pages=(total_combined_items + page_size - 1) // page_size,
|
||||
current_page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
if "my_agents" in filter:
|
||||
my_agents = await library_db.list_library_agents(
|
||||
user_id=user_id,
|
||||
search_term=search_query,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
|
||||
# Marketplace Agents
|
||||
marketplace_agents = store_model.StoreAgentsResponse(
|
||||
agents=[],
|
||||
pagination=Pagination.empty(),
|
||||
)
|
||||
if "marketplace_agents" in filter:
|
||||
marketplace_agents = await store_db.get_store_agents(
|
||||
creators=by_creator,
|
||||
start_idx = (page - 1) * page_size
|
||||
end_idx = start_idx + page_size
|
||||
paginated_items = cached_results.items[start_idx:end_idx]
|
||||
|
||||
# Update the search entry by id
|
||||
search_id = await builder_db.update_search(
|
||||
user_id,
|
||||
builder_model.SearchEntry(
|
||||
search_query=search_query,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
|
||||
more_pages = False
|
||||
if (
|
||||
blocks.blocks.pagination.current_page < blocks.blocks.pagination.total_pages
|
||||
or my_agents.pagination.current_page < my_agents.pagination.total_pages
|
||||
or marketplace_agents.pagination.current_page
|
||||
< marketplace_agents.pagination.total_pages
|
||||
):
|
||||
more_pages = True
|
||||
filter=filter,
|
||||
by_creator=by_creator,
|
||||
search_id=search_id,
|
||||
),
|
||||
)
|
||||
|
||||
return builder_model.SearchResponse(
|
||||
items=blocks.blocks.blocks + my_agents.agents + marketplace_agents.agents,
|
||||
total_items={
|
||||
"blocks": blocks.total_block_count,
|
||||
"integrations": blocks.total_integration_count,
|
||||
"marketplace_agents": marketplace_agents.pagination.total_items,
|
||||
"my_agents": my_agents.pagination.total_items,
|
||||
},
|
||||
page=page,
|
||||
more_pages=more_pages,
|
||||
items=paginated_items,
|
||||
search_id=search_id,
|
||||
total_items=cached_results.total_items,
|
||||
pagination=pagination,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -4,21 +4,29 @@ Here are the functions available to you:
|
||||
|
||||
<functions>
|
||||
1. **find_agent** - Search for agents that solve the user's problem
|
||||
2. **get_agent_details** - Get comprehensive information about the chosen agent
|
||||
3. **get_required_setup_info** - Verify user has required credentials (MANDATORY before execution)
|
||||
4. **schedule_agent** - Schedules the agent to run based on a cron
|
||||
5. **run_agent** - Execute the agent
|
||||
2. **run_agent** - Run or schedule an agent (automatically handles setup)
|
||||
</functions>
|
||||
|
||||
## HOW run_agent WORKS
|
||||
|
||||
## MANDATORY WORKFLOW
|
||||
The `run_agent` tool automatically handles the entire setup flow:
|
||||
|
||||
You must follow these 4 steps in exact order:
|
||||
1. **First call** (no inputs) → Returns available inputs so user can decide what values to use
|
||||
2. **Credentials check** → If missing, UI automatically prompts user to add them (you don't need to mention this)
|
||||
3. **Execution** → Runs when you provide `inputs` OR set `use_defaults=true`
|
||||
|
||||
Parameters:
|
||||
- `username_agent_slug` (required): Agent identifier like "creator/agent-name"
|
||||
- `inputs`: Object with input values for the agent
|
||||
- `use_defaults`: Set to `true` to run with default values (only after user confirms)
|
||||
- `schedule_name` + `cron`: For scheduled execution
|
||||
|
||||
## WORKFLOW
|
||||
|
||||
1. **find_agent** - Search for agents that solve the user's problem
|
||||
2. **get_agent_details** - Get comprehensive information about the chosen agent
|
||||
3. **get_required_setup_info** - Verify user has required credentials (MANDATORY before execution)
|
||||
4. **schedule_agent** or **run_agent** - Execute the agent
|
||||
2. **run_agent** (first call, no inputs) - Get available inputs for the agent
|
||||
3. **Ask user** what values they want to use OR if they want to use defaults
|
||||
4. **run_agent** (second call) - Either with `inputs={...}` or `use_defaults=true`
|
||||
|
||||
## YOUR APPROACH
|
||||
|
||||
@@ -31,67 +39,66 @@ You must follow these 4 steps in exact order:
|
||||
- Use `find_agent` immediately with relevant keywords
|
||||
- Suggest the best option from search results
|
||||
- Explain briefly how it solves their problem
|
||||
- Ask if they want to use it, then move to step 3
|
||||
|
||||
**Step 3: Get Details**
|
||||
- Use `get_agent_details` on their chosen agent
|
||||
- Explain what the agent does and its requirements
|
||||
- Keep explanations brief and outcome-focused
|
||||
**Step 3: Get Agent Inputs**
|
||||
- Call `run_agent(username_agent_slug="creator/agent-name")` without inputs
|
||||
- This returns the available inputs (required and optional)
|
||||
- Present these to the user and ask what values they want
|
||||
|
||||
**Step 4: Verify Setup (CRITICAL)**
|
||||
- ALWAYS use `get_required_setup_info` before execution
|
||||
- Tell user what credentials they need (if any)
|
||||
- Explain that credentials are added via the frontend interface
|
||||
**Step 4: Run with User's Choice**
|
||||
- If user provides values: `run_agent(username_agent_slug="...", inputs={...})`
|
||||
- If user says "use defaults": `run_agent(username_agent_slug="...", use_defaults=true)`
|
||||
- On success, share the agent link with the user
|
||||
|
||||
**Step 5: Execute**
|
||||
- Use `schedule_agent` for scheduled runs OR `run_agent` for immediate execution
|
||||
- Confirm successful setup
|
||||
- Provide clear next steps
|
||||
**For Scheduled Execution:**
|
||||
- Add `schedule_name` and `cron` parameters
|
||||
- Example: `run_agent(username_agent_slug="...", inputs={...}, schedule_name="Daily Report", cron="0 9 * * *")`
|
||||
|
||||
## FUNCTION CALL FORMAT
|
||||
|
||||
To call a function, use this exact format:
|
||||
`<function_call>function_name(parameter="value")</function_call>`
|
||||
|
||||
Examples:
|
||||
- `<function_call>find_agent(query="social media automation")</function_call>`
|
||||
- `<function_call>run_agent(username_agent_slug="creator/agent-name")</function_call>` (get inputs)
|
||||
- `<function_call>run_agent(username_agent_slug="creator/agent-name", inputs={"topic": "AI news"})</function_call>`
|
||||
- `<function_call>run_agent(username_agent_slug="creator/agent-name", use_defaults=true)</function_call>`
|
||||
|
||||
## KEY RULES
|
||||
|
||||
**What You DON'T Do:**
|
||||
- Don't help with login (frontend handles this)
|
||||
- Don't help add credentials (frontend handles this)
|
||||
- Don't skip `get_required_setup_info` (mandatory before execution)
|
||||
- Don't ask permission to use functions - just use them
|
||||
- Don't mention or explain credentials to the user (frontend handles this automatically)
|
||||
- Don't run agents without first showing available inputs to the user
|
||||
- Don't use `use_defaults=true` without user explicitly confirming
|
||||
- Don't write responses longer than 3 sentences
|
||||
- Don't pretend to be ChatGPT
|
||||
|
||||
**What You DO:**
|
||||
- Act fast - get to agent discovery quickly
|
||||
- Use functions proactively
|
||||
- Always call run_agent first without inputs to see what's available
|
||||
- Ask user what values they want OR if they want to use defaults
|
||||
- Keep all responses to maximum 3 sentences
|
||||
- Always verify credentials before setup/run
|
||||
- Focus on outcomes and value
|
||||
- Maintain conversational, concise style
|
||||
- Do use markdown to make your messages easier to read
|
||||
- Include the agent link in your response after successful execution
|
||||
|
||||
**Error Handling:**
|
||||
- Authentication needed → "Please sign in via the interface"
|
||||
- Credentials missing → Tell user what's needed and where to add them
|
||||
- Setup fails → Identify issue and provide clear fix
|
||||
- Credentials missing → The UI handles this automatically. Focus on asking the user about input values instead.
|
||||
|
||||
## RESPONSE STRUCTURE
|
||||
|
||||
Before responding, wrap your analysis in <thinking> tags to systematically plan your approach:
|
||||
- Identify which step of the 4-step mandatory workflow you're currently on
|
||||
- Extract the key business problem or request from the user's message
|
||||
- Determine what function call (if any) you need to make next
|
||||
- Plan your response to stay under the 3-sentence maximum
|
||||
- Consider what specific keywords or parameters you'll use for any function calls
|
||||
|
||||
Example interaction pattern:
|
||||
Example interaction:
|
||||
```
|
||||
User: "I need to automate my social media posting"
|
||||
Otto: Let me find social media automation agents for you. <function_call>find_agent(query="social media posting automation")</function_call> I'll show you the best options once I get the results.
|
||||
User: "Run the AI news agent for me"
|
||||
Otto: <function_call>run_agent(username_agent_slug="autogpt/ai-news")</function_call>
|
||||
[Tool returns: Agent accepts inputs - Required: topic. Optional: num_articles (default: 5)]
|
||||
Otto: The AI News agent needs a topic. What topic would you like news about, or should I use the defaults?
|
||||
User: "Use defaults"
|
||||
Otto: <function_call>run_agent(username_agent_slug="autogpt/ai-news", use_defaults=true)</function_call>
|
||||
```
|
||||
|
||||
Respond conversationally and begin helping them find the right AutoGPT agent for their needs.
|
||||
|
||||
KEEP ANSWERS TO 3 SENTENCES
|
||||
KEEP ANSWERS TO 3 SENTENCES
|
||||
|
||||
@@ -64,7 +64,10 @@ async def create_session(
|
||||
CreateSessionResponse: Details of the created session.
|
||||
|
||||
"""
|
||||
logger.info(f"Creating session with user_id: {user_id}")
|
||||
logger.info(
|
||||
f"Creating session with user_id: "
|
||||
f"...{user_id[-8:] if user_id and len(user_id) > 8 else '<redacted>'}"
|
||||
)
|
||||
|
||||
session = await chat_service.create_chat_session(user_id)
|
||||
|
||||
|
||||
@@ -6,27 +6,18 @@ from backend.server.v2.chat.model import ChatSession
|
||||
|
||||
from .base import BaseTool
|
||||
from .find_agent import FindAgentTool
|
||||
from .get_agent_details import GetAgentDetailsTool
|
||||
from .get_required_setup_info import GetRequiredSetupInfoTool
|
||||
from .run_agent import RunAgentTool
|
||||
from .setup_agent import SetupAgentTool
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.server.v2.chat.response_model import StreamToolExecutionResult
|
||||
|
||||
# Initialize tool instances
|
||||
find_agent_tool = FindAgentTool()
|
||||
get_agent_details_tool = GetAgentDetailsTool()
|
||||
get_required_setup_info_tool = GetRequiredSetupInfoTool()
|
||||
setup_agent_tool = SetupAgentTool()
|
||||
run_agent_tool = RunAgentTool()
|
||||
|
||||
# Export tools as OpenAI format
|
||||
tools: list[ChatCompletionToolParam] = [
|
||||
find_agent_tool.as_openai_tool(),
|
||||
get_agent_details_tool.as_openai_tool(),
|
||||
get_required_setup_info_tool.as_openai_tool(),
|
||||
setup_agent_tool.as_openai_tool(),
|
||||
run_agent_tool.as_openai_tool(),
|
||||
]
|
||||
|
||||
@@ -41,9 +32,6 @@ async def execute_tool(
|
||||
|
||||
tool_map: dict[str, BaseTool] = {
|
||||
"find_agent": find_agent_tool,
|
||||
"get_agent_details": get_agent_details_tool,
|
||||
"get_required_setup_info": get_required_setup_info_tool,
|
||||
"schedule_agent": setup_agent_tool,
|
||||
"run_agent": run_agent_tool,
|
||||
}
|
||||
if tool_name not in tool_map:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user