Compare commits

..

1 Commits

Author SHA1 Message Date
Lluis Agusti
cc85a37305 fix(frontend): account/auth check issues 2025-11-26 23:18:04 +07:00
279 changed files with 4017 additions and 48783 deletions

View File

@@ -44,12 +44,6 @@ jobs:
with:
fetch-depth: 1
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@v1.3.1
with:
large-packages: false # slow
docker-images: false # limited benefit
# Backend Python/Poetry setup (mirrors platform-backend-ci.yml)
- name: Set up Python
uses: actions/setup-python@v5

View File

@@ -12,10 +12,6 @@ on:
- "autogpt_platform/frontend/**"
merge_group:
concurrency:
group: ${{ github.workflow }}-${{ github.event_name == 'merge_group' && format('merge-queue-{0}', github.ref) || format('{0}-{1}', github.ref, github.event.pull_request.number || github.sha) }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
defaults:
run:
shell: bash

View File

@@ -12,10 +12,6 @@ on:
- "autogpt_platform/**"
merge_group:
concurrency:
group: ${{ github.workflow }}-${{ github.event_name == 'merge_group' && format('merge-queue-{0}', github.ref) || github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
defaults:
run:
shell: bash

View File

@@ -1,4 +1,4 @@
.PHONY: start-core stop-core logs-core format lint migrate run-backend run-frontend load-store-agents
.PHONY: start-core stop-core logs-core format lint migrate run-backend run-frontend
# Run just Supabase + Redis + RabbitMQ
start-core:
@@ -42,10 +42,7 @@ run-frontend:
test-data:
cd backend && poetry run python test/test_data_creator.py
load-store-agents:
cd backend && poetry run python test/load_store_agents.py
help:
@echo "Usage: make <target>"
@echo "Targets:"
@@ -57,5 +54,4 @@ help:
@echo " migrate - Run backend database migrations"
@echo " run-backend - Run the backend FastAPI server"
@echo " run-frontend - Run the frontend Next.js development server"
@echo " test-data - Run the test data creator"
@echo " load-store-agents - Load store agents from agents/ folder into test database"
@echo " test-data - Run the test data creator"

View File

@@ -1,242 +0,0 @@
listing_id,storeListingVersionId,slug,agent_name,agent_video,agent_image,featured,sub_heading,description,categories,useForOnboarding,is_available
6e60a900-9d7d-490e-9af2-a194827ed632,d85882b8-633f-44ce-a315-c20a8c123d19,flux-ai-image-generator,Flux AI Image Generator,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/ca154dd1-140e-454c-91bd-2d8a00de3f08.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/577d995d-bc38-40a9-a23f-1f30f5774bdb.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/415db1b7-115c-43ab-bd6c-4e9f7ef95be1.jpg""]",false,Transform ideas into breathtaking images,"Transform ideas into breathtaking images with this AI-powered Image Generator. Using cutting-edge Flux AI technology, the tool crafts highly detailed, photorealistic visuals from simple text prompts. Perfect for artists, marketers, and content creators, this generator produces unique images tailored to user specifications. From fantastical scenes to lifelike portraits, users can unleash creativity with professional-quality results in seconds. Easy to use and endlessly versatile, bring imagination to life with the AI Image Generator today!","[""creative""]",false,true
f11fc6e9-6166-4676-ac5d-f07127b270c1,c775f60d-b99f-418b-8fe0-53172258c3ce,youtube-transcription-scraper,YouTube Transcription Scraper,https://youtu.be/H8S3pU68lGE,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/65bce54b-0124-4b0d-9e3e-f9b89d0dc99e.jpg""]",false,Fetch the transcriptions from the most popular YouTube videos in your chosen topic,"Effortlessly gather transcriptions from multiple YouTube videos with this agent. It scrapes and compiles video transcripts into a clean, organized list, making it easy to extract insights, quotes, or content from various sources in one go. Ideal for researchers, content creators, and marketers looking to quickly analyze or repurpose video content.","[""writing""]",false,true
17908889-b599-4010-8e4f-bed19b8f3446,6e16e65a-ad34-4108-b4fd-4a23fced5ea2,business-ownerceo-finder,Decision Maker Lead Finder,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/1020d94e-b6a2-4fa7-bbdf-2c218b0de563.jpg""]",false,Contact CEOs today,"Find the key decision-makers you need, fast.
This agent identifies business owners or CEOs of local companies in any area you choose. Simply enter what kind of businesses youre looking for and where, and it will:
* Search the area and gather public information
* Return names, roles, and contact details when available
* Provide smart Google search suggestions if details arent found
Perfect for:
* B2B sales teams seeking verified leads
* Recruiters sourcing local talent
* Researchers looking to connect with business leaders
Save hours of manual searching and get straight to the people who matter most.","[""business""]",true,true
72beca1d-45ea-4403-a7ce-e2af168ee428,415b7352-0dc6-4214-9d87-0ad3751b711d,smart-meeting-brief,Smart Meeting Prep,https://youtu.be/9ydZR2hkxaY,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/2f116ce1-63ae-4d39-a5cd-f514defc2b97.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/0a71a60a-2263-4f12-9836-9c76ab49f155.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/95327695-9184-403c-907a-a9d3bdafa6a5.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/2bc77788-790b-47d4-8a61-ce97b695e9f5.png""]",true,Business meeting briefings delivered daily,"Never walk into a meeting unprepared again. Every day at 4 pm, the Smart Meeting Prep Agent scans your calendar for tomorrow's external meetings. It reviews your past email exchanges, researches each participant's background and role, and compiles the insights into a concise briefing, so you can close your workday ready for tomorrow's calls.
How It Works
1. At 4 pm, the agent scans your calendar and identifies external meetings scheduled for the next day.
2. It reviews recent email threads with each participant to surface key relationship history and communication context.
3. It conducts online research to gather publicly available information on roles, company backgrounds, and relevant professional data.
4. It produces a unified briefing for each participant, including past exchange highlights, profile notes, and strategic conversation points.","[""personal""]",true,true
9fa5697a-617b-4fae-aea0-7dbbed279976,b8ceb480-a7a2-4c90-8513-181a49f7071f,automated-support-ai,Automated Support Agent,https://youtu.be/nBMfu_5sgDA,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/ed56febc-2205-4179-9e7e-505d8500b66c.png""]",true,Automate up to 80 percent of inbound support emails,"Overview:
Support teams spend countless hours on basic tickets. This agent automates repetitive customer support tasks. It reads incoming requests, researches your knowledge base, and responds automatically when confident. When unsure, it escalates to a human for final resolution.
How it Works:
New support emails are routed to the agent.
The agent checks internal documentation for answers.
It measures confidence in the answer found and either replies directly or escalates to a human.
Business Value:
Automating the easy 80 percent of support tickets allows your team to focus on high-value, complex customer issues, improving efficiency and response times.","[""business""]",false,true
2bdac92b-a12c-4131-bb46-0e3b89f61413,31daf49d-31d3-476b-aa4c-099abc59b458,unspirational-poster-maker,Unspirational Poster Maker,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/6a490dac-27e5-405f-a4c4-8d1c55b85060.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/d343fbb5-478c-4e38-94df-4337293b61f1.jpg""]",false,Because adulting is hard,"This witty AI agent generates hilariously relatable ""motivational"" posters that tackle the everyday struggles of procrastination, overthinking, and workplace chaos with a blend of absurdity and sarcasm. From goldfish facing impossible tasks to cats in existential crises, The Unspirational Poster Maker designs tongue-in-cheek graphics and captions that mock productivity clichés and embrace our collective struggles to ""get it together."" Perfect for adding a touch of humour to the workday, these posters remind us that sometimes, all we can do is laugh at the chaos.","[""creative""]",false,true
9adf005e-2854-4cc7-98cf-f7103b92a7b7,a03b0d8c-4751-43d6-a54e-c3b7856ba4e3,ai-shortform-video-generator-create-viral-ready-content,AI Video Generator,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/8d2670b9-fea5-4966-a597-0a4511bffdc3.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/aabe8aec-0110-4ce7-a259-4f86fe8fe07d.png""]",false,Create Viral-Ready Shorts Content in Seconds,"OVERVIEW
Transform any trending headline or broad topic into a polished, vertical short-form video in a single run.
The agent automates research, scriptwriting, metadata creation, and Revid.ai rendering, returning one ready-to-publish MP4 plus its title, script and hashtags.
HOW IT WORKS
1. Input a topic or an exact news headline.
2. The agent fetches live search results and selects the most engaging related story.
3. Key facts are summarised into concise research notes.
4. Claude writes a 3035 second script with visual cues, a three-second hook, tension loops, and a call-to-action.
5. GPT-4o generates an eye-catching title and one or two discoverability hashtags.
6. The script is sent to a state-of-the-art AI video generator to render a single 9:16 MP4 (default: 720 p, 30 fps, voice “Brian”, style “movingImage”, music “Bladerunner 2049”).
All voice, style and resolution settings can be adjusted in the Builder before you press ""Run"".
7. Output delivered: Title, Script, Hashtags, Video URL.
KEY USE CASES
- Broad-topic explainers (e.g. “Artificial Intelligence” or “Climate Tech”).
- Real-time newsjacking with a specific breaking headline.
- Product-launch spotlights and quick event recaps while interest is high.
BUSINESS VALUE
- One-click speed: from idea to finished video in minutes.
- Consistent brand look: Revid presets keep voice, style and aspect ratio on spec.
- No-code workflow: marketers create social video without design or development queues.
- Cloud convenience: Auto-GPT Cloud users are pre-configured with all required keys.
Self-hosted users simply add OpenAI, Anthropic, Perplexity (OpenRouter/Jina) and Revid keys once.
IMPORTANT NOTES
- The agent outputs exactly one video per execution. Run it again for additional shorts.
- Video rendering time varies; AI-generated footage may take several minutes.","[""writing""]",false,true
864e48ef-fee5-42c1-b6a4-2ae139db9fc1,55d40473-0f31-4ada-9e40-d3a7139fcbd4,automated-blog-writer,Automated SEO Blog Writer,https://youtu.be/nKcDCbDVobs,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/2dd5f95b-5b30-4bf8-a11b-bac776c5141a.jpg""]",true,"Automate research, writing, and publishing for high-ranking blog posts","Scale your blog with a fully automated content engine. The Automated SEO Blog Writer learns your brand voice, finds high-demand keywords, and creates SEO-optimized articles that attract organic traffic and boost visibility.
How it works:
1. Share your pitch, website, and values.
2. The agent studies your site and uncovers proven SEO opportunities.
3. It spends two hours researching and drafting each post.
4. You set the cadence—publishing runs on autopilot.
Business value: Consistently publish research-backed, optimized posts that build domain authority, rankings, and thought leadership while you focus on what matters most.
Use cases:
• Founders: Keep your blog active with no time drain.
• Agencies: Deliver scalable SEO content for clients.
• Strategists: Automate execution, focus on strategy.
• Marketers: Drive steady organic growth.
• Local businesses: Capture nearby search traffic.","[""writing""]",false,true
6046f42e-eb84-406f-bae0-8e052064a4fa,a548e507-09a7-4b30-909c-f63fcda10fff,lead-finder-local-businesses,Lead Finder,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/abd6605f-d5f8-426b-af36-052e8ba5044f.webp""]",false,Auto-Prospect Like a Pro,"Turbo-charge your local lead generation with the AutoGPT Marketplaces top Google Maps prospecting agent. “Lead Finder: Local Businesses” delivers verified, ready-to-contact prospects in any niche and city—so you can focus on closing, not searching.
**WHAT IT DOES**
• Searches Google Maps via the official API (no scraping)
• Prompts like “dentists in Chicago” or “coffee shops near me”
• Returns: Name, Website, Rating, Reviews, **Phone & Address**
• Exports instantly to your CRM, sheet, or outreach workflow
**WHY YOULL LOVE IT**
✓ Hyper-targeted leads in minutes
✓ Unlimited searches & locations
✓ Zero CAPTCHAs or IP blocks
✓ Works on AutoGPT Cloud or self-hosted (with your API key)
✓ Cut prospecting time by 90%
**PERFECT FOR**
— Marketers & PPC agencies
— SEO consultants & designers
— SaaS founders & sales teams
Stop scrolling directories—start filling your pipeline. Start now and let AI prospect while you profit.
→ Click *Add to Library* and own your market today.","[""business""]",true,true
f623c862-24e9-44fc-8ce8-d8282bb51ad2,eafa21d3-bf14-4f63-a97f-a5ee41df83b3,linkedin-post-generator,LinkedIn Post Generator,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/297f6a8e-81a8-43e2-b106-c7ad4a5662df.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/fceebdc1-aef6-4000-97fc-4ef587f56bda.png""]",false,Autocraft LinkedIn gold,"Create researchdriven, highimpact LinkedIn posts in minutes. This agent searches YouTube for the best videos on your chosen topic, pulls their transcripts, and distils the most valuable insights into a polished post ready for your company page or personal feed.
FEATURES
• Automated YouTube research discovers and analyses topranked videos so you dont have to
• AIcurated synthesis combines multiple transcripts into one authoritative narrative
• Full creative control adjust style, tone, objective, opinion, clarity, target word count and number of videos
• LinkedInoptimised output hook, 23 key points, CTA, strategic line breaks, 35 hashtags, no markdown
• Oneclick publish returns a readytopost text block (≤1 300 characters)
HOW IT WORKS
1. Enter a topic and your preferred writing parameters.
2. The agent builds a YouTube search, fetches the page, and extracts the top N video URLs.
3. It pulls each transcript, then feeds them—plus your settings—into Claude 3.5 Sonnet.
4. The model writes a concise, engaging post designed for maximum LinkedIn engagement.
USE CASES
• Thoughtleadership updates backed by fresh video research
• Rapid industry summaries after major events, webinars, or conferences
• Consistent LinkedIn content for busy founders, marketers, and creators
WHY YOULL LOVE IT
Save hours of manual research, avoid surfacelevel hottakes, and publish posts that showcase real expertise—without the heavy lift.","[""writing""]",true,true
7d4120ad-b6b3-4419-8bdb-7dd7d350ef32,e7bb29a1-23c7-4fee-aa3b-5426174b8c52,youtube-to-linkedin-post-converter,YouTube to LinkedIn Post Converter,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/f084b326-a708-4396-be51-7ba59ad2ef32.png""]",false,Transform Your YouTube Videos into Engaging LinkedIn Posts with AI,"WHAT IT DOES:
This agent converts YouTube video content into a LinkedIn post by analyzing the video's transcript. It provides you with a tailored post that reflects the core ideas, key takeaways, and tone of the original video, optimizing it for engagement on LinkedIn.
HOW IT WORKS:
- You provide the URL to the YouTube video (required)
- You can choose the structure for the LinkedIn post (e.g., Personal Achievement Story, Lesson Learned, Thought Leadership, etc.)
- You can also select the tone (e.g., Inspirational, Analytical, Conversational, etc.)
- The transcript of the video is analyzed by the GPT-4 model and the Claude 3.5 Sonnet model
- The models extract key insights, memorable quotes, and the main points from the video
- Youll receive a LinkedIn post, formatted according to your chosen structure and tone, optimized for professional engagement
INPUTS:
- Source YouTube Video Provide the URL to the YouTube video
- Structure Choose the post format (e.g., Personal Achievement Story, Thought Leadership, etc.)
- Content Specify the main message or idea of the post (e.g., Hot Take, Key Takeaways, etc.)
- Tone Select the tone for the post (e.g., Conversational, Inspirational, etc.)
OUTPUT:
- LinkedIn Post A well-crafted, AI-generated LinkedIn post with a professional tone, based on the video content and your specified preferences
Perfect for content creators, marketers, and professionals who want to repurpose YouTube videos for LinkedIn and boost their professional branding.","[""writing""]",false,true
c61d6a83-ea48-4df8-b447-3da2d9fe5814,00fdd42c-a14c-4d19-a567-65374ea0e87f,personalized-morning-coffee-newsletter,Personal Newsletter,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/f4b38e4c-8166-4caf-9411-96c9c4c82d4c.png""]",false,Start your day with personalized AI newsletters that deliver credibility and context for every interest or mood.,"This Personal Newsletter Agent provides a bespoke daily digest on your favorite topics and tone. Whether you prefer industry insights, lighthearted reads, or breaking news, this agent crafts your own unique newsletter to keep you informed and entertained.
How It Works
1. Enter your favorite topics, industries, or areas of interest.
2. Choose your tone—professional, casual, or humorous.
3. Set your preferred delivery cadence: daily or weekly.
4. The agent scans top sources and compiles 35 engaging stories, insights, and fun facts into a conversational newsletter.
Skip the morning scroll and enjoy a thoughtfully curated newsletter designed just for you. Stay ahead of trends, spark creative ideas, and enjoy an effortless, informed start to your day.
Use Cases
• Executives: Get a daily digest of market updates and leadership insights.
• Marketers: Receive curated creative trends and campaign inspiration.
• Entrepreneurs: Stay updated on your industry without information overload.","[""research""]",true,true
e2e49cfc-4a39-4d62-a6b3-c095f6d025ff,fc2c9976-0962-4625-a27b-d316573a9e7f,email-address-finder,Email Scout - Contact Finder Assistant,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/da8a690a-7a8b-4c1d-b6f8-e2f840c0205d.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/6a2ac25c-1609-4881-8140-e6da2421afb3.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/26179263-fe06-45bd-b6a0-0754660a0a46.jpg""]",false,Find contact details from name and location using AI search,"Finding someone's professional email address can be time-consuming and frustrating. Manual searching across multiple websites, social profiles, and business directories often leads to dead ends or outdated information.
Email Scout automates this process by intelligently searching across publicly available sources when you provide a person's name and location. Simply input basic information like ""Tim Cook, USA"" or ""Sarah Smith, London"" and let the AI assistant do the work of finding potential contact details.
Key Features:
- Quick search from just name and location
- Scans multiple public sources
- Automated AI-powered search process
- Easy to use with simple inputs
Perfect for recruiters, business development professionals, researchers, and anyone needing to establish professional contact.
Note: This tool searches only publicly available information. Search results depend on what contact information people have made public. Some searches may not yield results if the information isn't publicly accessible.","[""""]",false,true
81bcc372-0922-4a36-bc35-f7b1e51d6939,e437cc95-e671-489d-b915-76561fba8c7f,ai-youtube-to-blog-converter,YouTube Video to SEO Blog Writer,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/239e5a41-2515-4e1c-96ef-31d0d37ecbeb.webp"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/c7d96966-786f-4be6-ad7d-3a51c84efc0e.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/0275a74c-e2c2-4e29-a6e4-3a616c3c35dd.png""]",false,One link. One click. One powerful blog post.,"Effortlessly transform your YouTube videos into high-quality, SEO-optimized blog posts.
Your videos deserve a second life—in writing.
Make your content work twice as hard by repurposing it into engaging, searchable articles.
Perfect for content creators, marketers, and bloggers, this tool analyzes video content and generates well-structured blog posts tailored to your tone, audience, and word count. Just paste a YouTube URL and let the AI handle the rest.
FEATURES
• CONTENT ANALYSIS
Extracts key points from the video while preserving your message and intent.
• CUSTOMIZABLE OUTPUT
Select a tone that fits your audience: casual, professional, educational, or formal.
• SEO OPTIMIZATION
Automatically creates engaging titles and structured subheadings for better search visibility.
• USER-FRIENDLY
Repurpose your videos into written content to expand your reach and improve accessibility.
Whether you're looking to grow your blog, boost SEO, or simply get more out of your content, the AI YouTube-to-Blog Converter makes it effortless.
","[""writing""]",true,true
5c3510d2-fc8b-4053-8e19-67f53c86eb1a,f2cc74bb-f43f-4395-9c35-ecb30b5b4fc9,ai-webpage-copy-improver,AI Webpage Copy Improver,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/d562d26f-5891-4b09-8859-fbb205972313.jpg""]",false,Boost Your Website's Search Engine Performance,"Elevate your web content with this powerful AI Webpage Copy Improver. Designed for marketers, SEO specialists, and web developers, this tool analyses and enhances website copy for maximum impact. Using advanced language models, it optimizes text for better clarity, SEO performance, and increased conversion rates. The AI examines your existing content, identifies areas for improvement, and generates refined copy that maintains your brand voice while boosting engagement. From homepage headlines to product descriptions, transform your web presence with AI-driven insights. Improve readability, incorporate targeted keywords, and craft compelling calls-to-action - all with the click of a button. Take your digital marketing to the next level with the AI Webpage Copy Improver.","[""marketing""]",true,true
94d03bd3-7d44-4d47-b60c-edb2f89508d6,b6f6f0d3-49f4-4e3b-8155-ffe9141b32c0,domain-name-finder,Domain Name Finder,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/28545e09-b2b8-4916-b4c6-67f982510a78.jpeg""]",false,Instantly generate brand-ready domain names that are actually available,"Overview:
Finding a domain name that fits your brand shouldnt take hours of searching and failed checks. The Domain Name Finder Agent turns your pitch into hundreds of creative, brand-ready domain ideas—filtered by live availability so every result is actionable.
How It Works
1. Input your product pitch, company name, or core keywords.
2. The agent analyzes brand tone, audience, and industry context.
3. It generates a list of unique, memorable domains that match your criteria.
4. All names are pre-filtered for real-time availability, so you can register immediately.
Business Value
Save hours of guesswork and eliminate dead ends. Accelerate brand launches, startup naming, and campaign creation with ready-to-claim domains.
Key Use Cases
• Startup Founders: Quickly find brand-ready domains for MVP launches or rebrands.
• Marketers: Test name options across campaigns with instant availability data.
• Entrepreneurs: Validate ideas faster with instant domain options.","[""business""]",false,true
7a831906-daab-426f-9d66-bcf98d869426,516d813b-d1bc-470f-add7-c63a4b2c2bad,ai-function,AI Function,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/620e8117-2ee1-4384-89e6-c2ef4ec3d9c9.webp"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/476259e2-5a79-4a7b-8e70-deeebfca70d7.png""]",false,Never Code Again,"AI FUNCTION MAGIC
Your AIpowered assistant for turning plainEnglish descriptions into working Python functions.
HOW IT WORKS
1. Describe what the function should do.
2. Specify the inputs it needs.
3. Receive the generated Python code.
FEATURES
- Effortless Function Generation: convert naturallanguage specs into complete functions.
- Customizable Inputs: define the parameters that matter to you.
- Versatile Use Cases: simulate data, automate tasks, prototype ideas.
- Seamless Integration: add the generated function directly to your codebase.
EXAMPLE
Request: “Create a function that generates 20 examples of fake people, each with a name, date of birth, job title, and age.”
Input parameter: number_of_people (default 20)
Result: a list of dictionaries such as
[
{ ""name"": ""Emma Martinez"", ""date_of_birth"": ""19921103"", ""job_title"": ""Data Analyst"", ""age"": 32 },
{ ""name"": ""Liam OConnor"", ""date_of_birth"": ""19850719"", ""job_title"": ""Marketing Manager"", ""age"": 39 },
…18 more entries…
]","[""development""]",false,true
1 listing_id storeListingVersionId slug agent_name agent_video agent_image featured sub_heading description categories useForOnboarding is_available
2 6e60a900-9d7d-490e-9af2-a194827ed632 d85882b8-633f-44ce-a315-c20a8c123d19 flux-ai-image-generator Flux AI Image Generator ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/ca154dd1-140e-454c-91bd-2d8a00de3f08.jpg","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/577d995d-bc38-40a9-a23f-1f30f5774bdb.jpg","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/415db1b7-115c-43ab-bd6c-4e9f7ef95be1.jpg"] false Transform ideas into breathtaking images Transform ideas into breathtaking images with this AI-powered Image Generator. Using cutting-edge Flux AI technology, the tool crafts highly detailed, photorealistic visuals from simple text prompts. Perfect for artists, marketers, and content creators, this generator produces unique images tailored to user specifications. From fantastical scenes to lifelike portraits, users can unleash creativity with professional-quality results in seconds. Easy to use and endlessly versatile, bring imagination to life with the AI Image Generator today! ["creative"] false true
3 f11fc6e9-6166-4676-ac5d-f07127b270c1 c775f60d-b99f-418b-8fe0-53172258c3ce youtube-transcription-scraper YouTube Transcription Scraper https://youtu.be/H8S3pU68lGE ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/65bce54b-0124-4b0d-9e3e-f9b89d0dc99e.jpg"] false Fetch the transcriptions from the most popular YouTube videos in your chosen topic Effortlessly gather transcriptions from multiple YouTube videos with this agent. It scrapes and compiles video transcripts into a clean, organized list, making it easy to extract insights, quotes, or content from various sources in one go. Ideal for researchers, content creators, and marketers looking to quickly analyze or repurpose video content. ["writing"] false true
4 17908889-b599-4010-8e4f-bed19b8f3446 6e16e65a-ad34-4108-b4fd-4a23fced5ea2 business-ownerceo-finder Decision Maker Lead Finder ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/1020d94e-b6a2-4fa7-bbdf-2c218b0de563.jpg"] false Contact CEOs today Find the key decision-makers you need, fast. This agent identifies business owners or CEOs of local companies in any area you choose. Simply enter what kind of businesses you’re looking for and where, and it will: * Search the area and gather public information * Return names, roles, and contact details when available * Provide smart Google search suggestions if details aren’t found Perfect for: * B2B sales teams seeking verified leads * Recruiters sourcing local talent * Researchers looking to connect with business leaders Save hours of manual searching and get straight to the people who matter most. ["business"] true true
5 72beca1d-45ea-4403-a7ce-e2af168ee428 415b7352-0dc6-4214-9d87-0ad3751b711d smart-meeting-brief Smart Meeting Prep https://youtu.be/9ydZR2hkxaY ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/2f116ce1-63ae-4d39-a5cd-f514defc2b97.png","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/0a71a60a-2263-4f12-9836-9c76ab49f155.png","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/95327695-9184-403c-907a-a9d3bdafa6a5.png","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/2bc77788-790b-47d4-8a61-ce97b695e9f5.png"] true Business meeting briefings delivered daily Never walk into a meeting unprepared again. Every day at 4 pm, the Smart Meeting Prep Agent scans your calendar for tomorrow's external meetings. It reviews your past email exchanges, researches each participant's background and role, and compiles the insights into a concise briefing, so you can close your workday ready for tomorrow's calls. How It Works 1. At 4 pm, the agent scans your calendar and identifies external meetings scheduled for the next day. 2. It reviews recent email threads with each participant to surface key relationship history and communication context. 3. It conducts online research to gather publicly available information on roles, company backgrounds, and relevant professional data. 4. It produces a unified briefing for each participant, including past exchange highlights, profile notes, and strategic conversation points. ["personal"] true true
6 9fa5697a-617b-4fae-aea0-7dbbed279976 b8ceb480-a7a2-4c90-8513-181a49f7071f automated-support-ai Automated Support Agent https://youtu.be/nBMfu_5sgDA ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/ed56febc-2205-4179-9e7e-505d8500b66c.png"] true Automate up to 80 percent of inbound support emails Overview: Support teams spend countless hours on basic tickets. This agent automates repetitive customer support tasks. It reads incoming requests, researches your knowledge base, and responds automatically when confident. When unsure, it escalates to a human for final resolution. How it Works: New support emails are routed to the agent. The agent checks internal documentation for answers. It measures confidence in the answer found and either replies directly or escalates to a human. Business Value: Automating the easy 80 percent of support tickets allows your team to focus on high-value, complex customer issues, improving efficiency and response times. ["business"] false true
7 2bdac92b-a12c-4131-bb46-0e3b89f61413 31daf49d-31d3-476b-aa4c-099abc59b458 unspirational-poster-maker Unspirational Poster Maker ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/6a490dac-27e5-405f-a4c4-8d1c55b85060.jpg","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/d343fbb5-478c-4e38-94df-4337293b61f1.jpg"] false Because adulting is hard This witty AI agent generates hilariously relatable "motivational" posters that tackle the everyday struggles of procrastination, overthinking, and workplace chaos with a blend of absurdity and sarcasm. From goldfish facing impossible tasks to cats in existential crises, The Unspirational Poster Maker designs tongue-in-cheek graphics and captions that mock productivity clichés and embrace our collective struggles to "get it together." Perfect for adding a touch of humour to the workday, these posters remind us that sometimes, all we can do is laugh at the chaos. ["creative"] false true
8 9adf005e-2854-4cc7-98cf-f7103b92a7b7 a03b0d8c-4751-43d6-a54e-c3b7856ba4e3 ai-shortform-video-generator-create-viral-ready-content AI Video Generator ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/8d2670b9-fea5-4966-a597-0a4511bffdc3.png","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/aabe8aec-0110-4ce7-a259-4f86fe8fe07d.png"] false Create Viral-Ready Shorts Content in Seconds OVERVIEW Transform any trending headline or broad topic into a polished, vertical short-form video in a single run. The agent automates research, scriptwriting, metadata creation, and Revid.ai rendering, returning one ready-to-publish MP4 plus its title, script and hashtags. HOW IT WORKS 1. Input a topic or an exact news headline. 2. The agent fetches live search results and selects the most engaging related story. 3. Key facts are summarised into concise research notes. 4. Claude writes a 30–35 second script with visual cues, a three-second hook, tension loops, and a call-to-action. 5. GPT-4o generates an eye-catching title and one or two discoverability hashtags. 6. The script is sent to a state-of-the-art AI video generator to render a single 9:16 MP4 (default: 720 p, 30 fps, voice “Brian”, style “movingImage”, music “Bladerunner 2049”). – All voice, style and resolution settings can be adjusted in the Builder before you press "Run". 7. Output delivered: Title, Script, Hashtags, Video URL. KEY USE CASES - Broad-topic explainers (e.g. “Artificial Intelligence” or “Climate Tech”). - Real-time newsjacking with a specific breaking headline. - Product-launch spotlights and quick event recaps while interest is high. BUSINESS VALUE - One-click speed: from idea to finished video in minutes. - Consistent brand look: Revid presets keep voice, style and aspect ratio on spec. - No-code workflow: marketers create social video without design or development queues. - Cloud convenience: Auto-GPT Cloud users are pre-configured with all required keys. Self-hosted users simply add OpenAI, Anthropic, Perplexity (OpenRouter/Jina) and Revid keys once. IMPORTANT NOTES - The agent outputs exactly one video per execution. Run it again for additional shorts. - Video rendering time varies; AI-generated footage may take several minutes. ["writing"] false true
9 864e48ef-fee5-42c1-b6a4-2ae139db9fc1 55d40473-0f31-4ada-9e40-d3a7139fcbd4 automated-blog-writer Automated SEO Blog Writer https://youtu.be/nKcDCbDVobs ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/2dd5f95b-5b30-4bf8-a11b-bac776c5141a.jpg"] true Automate research, writing, and publishing for high-ranking blog posts Scale your blog with a fully automated content engine. The Automated SEO Blog Writer learns your brand voice, finds high-demand keywords, and creates SEO-optimized articles that attract organic traffic and boost visibility. How it works: 1. Share your pitch, website, and values. 2. The agent studies your site and uncovers proven SEO opportunities. 3. It spends two hours researching and drafting each post. 4. You set the cadence—publishing runs on autopilot. Business value: Consistently publish research-backed, optimized posts that build domain authority, rankings, and thought leadership while you focus on what matters most. Use cases: • Founders: Keep your blog active with no time drain. • Agencies: Deliver scalable SEO content for clients. • Strategists: Automate execution, focus on strategy. • Marketers: Drive steady organic growth. • Local businesses: Capture nearby search traffic. ["writing"] false true
10 6046f42e-eb84-406f-bae0-8e052064a4fa a548e507-09a7-4b30-909c-f63fcda10fff lead-finder-local-businesses Lead Finder ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/abd6605f-d5f8-426b-af36-052e8ba5044f.webp"] false Auto-Prospect Like a Pro Turbo-charge your local lead generation with the AutoGPT Marketplace’s top Google Maps prospecting agent. “Lead Finder: Local Businesses” delivers verified, ready-to-contact prospects in any niche and city—so you can focus on closing, not searching. **WHAT IT DOES** • Searches Google Maps via the official API (no scraping) • Prompts like “dentists in Chicago” or “coffee shops near me” • Returns: Name, Website, Rating, Reviews, **Phone & Address** • Exports instantly to your CRM, sheet, or outreach workflow **WHY YOU’LL LOVE IT** ✓ Hyper-targeted leads in minutes ✓ Unlimited searches & locations ✓ Zero CAPTCHAs or IP blocks ✓ Works on AutoGPT Cloud or self-hosted (with your API key) ✓ Cut prospecting time by 90% **PERFECT FOR** — Marketers & PPC agencies — SEO consultants & designers — SaaS founders & sales teams Stop scrolling directories—start filling your pipeline. Start now and let AI prospect while you profit. → Click *Add to Library* and own your market today. ["business"] true true
11 f623c862-24e9-44fc-8ce8-d8282bb51ad2 eafa21d3-bf14-4f63-a97f-a5ee41df83b3 linkedin-post-generator LinkedIn Post Generator ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/297f6a8e-81a8-43e2-b106-c7ad4a5662df.png","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/fceebdc1-aef6-4000-97fc-4ef587f56bda.png"] false Auto‑craft LinkedIn gold Create research‑driven, high‑impact LinkedIn posts in minutes. This agent searches YouTube for the best videos on your chosen topic, pulls their transcripts, and distils the most valuable insights into a polished post ready for your company page or personal feed. FEATURES • Automated YouTube research – discovers and analyses top‑ranked videos so you don’t have to • AI‑curated synthesis – combines multiple transcripts into one authoritative narrative • Full creative control – adjust style, tone, objective, opinion, clarity, target word count and number of videos • LinkedIn‑optimised output – hook, 2‑3 key points, CTA, strategic line breaks, 3‑5 hashtags, no markdown • One‑click publish – returns a ready‑to‑post text block (≤1 300 characters) HOW IT WORKS 1. Enter a topic and your preferred writing parameters. 2. The agent builds a YouTube search, fetches the page, and extracts the top N video URLs. 3. It pulls each transcript, then feeds them—plus your settings—into Claude 3.5 Sonnet. 4. The model writes a concise, engaging post designed for maximum LinkedIn engagement. USE CASES • Thought‑leadership updates backed by fresh video research • Rapid industry summaries after major events, webinars, or conferences • Consistent LinkedIn content for busy founders, marketers, and creators WHY YOU’LL LOVE IT Save hours of manual research, avoid surface‑level hot‑takes, and publish posts that showcase real expertise—without the heavy lift. ["writing"] true true
12 7d4120ad-b6b3-4419-8bdb-7dd7d350ef32 e7bb29a1-23c7-4fee-aa3b-5426174b8c52 youtube-to-linkedin-post-converter YouTube to LinkedIn Post Converter ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/f084b326-a708-4396-be51-7ba59ad2ef32.png"] false Transform Your YouTube Videos into Engaging LinkedIn Posts with AI WHAT IT DOES: This agent converts YouTube video content into a LinkedIn post by analyzing the video's transcript. It provides you with a tailored post that reflects the core ideas, key takeaways, and tone of the original video, optimizing it for engagement on LinkedIn. HOW IT WORKS: - You provide the URL to the YouTube video (required) - You can choose the structure for the LinkedIn post (e.g., Personal Achievement Story, Lesson Learned, Thought Leadership, etc.) - You can also select the tone (e.g., Inspirational, Analytical, Conversational, etc.) - The transcript of the video is analyzed by the GPT-4 model and the Claude 3.5 Sonnet model - The models extract key insights, memorable quotes, and the main points from the video - You’ll receive a LinkedIn post, formatted according to your chosen structure and tone, optimized for professional engagement INPUTS: - Source YouTube Video – Provide the URL to the YouTube video - Structure – Choose the post format (e.g., Personal Achievement Story, Thought Leadership, etc.) - Content – Specify the main message or idea of the post (e.g., Hot Take, Key Takeaways, etc.) - Tone – Select the tone for the post (e.g., Conversational, Inspirational, etc.) OUTPUT: - LinkedIn Post – A well-crafted, AI-generated LinkedIn post with a professional tone, based on the video content and your specified preferences Perfect for content creators, marketers, and professionals who want to repurpose YouTube videos for LinkedIn and boost their professional branding. ["writing"] false true
13 c61d6a83-ea48-4df8-b447-3da2d9fe5814 00fdd42c-a14c-4d19-a567-65374ea0e87f personalized-morning-coffee-newsletter Personal Newsletter ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/f4b38e4c-8166-4caf-9411-96c9c4c82d4c.png"] false Start your day with personalized AI newsletters that deliver credibility and context for every interest or mood. This Personal Newsletter Agent provides a bespoke daily digest on your favorite topics and tone. Whether you prefer industry insights, lighthearted reads, or breaking news, this agent crafts your own unique newsletter to keep you informed and entertained. How It Works 1. Enter your favorite topics, industries, or areas of interest. 2. Choose your tone—professional, casual, or humorous. 3. Set your preferred delivery cadence: daily or weekly. 4. The agent scans top sources and compiles 3–5 engaging stories, insights, and fun facts into a conversational newsletter. Skip the morning scroll and enjoy a thoughtfully curated newsletter designed just for you. Stay ahead of trends, spark creative ideas, and enjoy an effortless, informed start to your day. Use Cases • Executives: Get a daily digest of market updates and leadership insights. • Marketers: Receive curated creative trends and campaign inspiration. • Entrepreneurs: Stay updated on your industry without information overload. ["research"] true true
14 e2e49cfc-4a39-4d62-a6b3-c095f6d025ff fc2c9976-0962-4625-a27b-d316573a9e7f email-address-finder Email Scout - Contact Finder Assistant ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/da8a690a-7a8b-4c1d-b6f8-e2f840c0205d.jpg","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/6a2ac25c-1609-4881-8140-e6da2421afb3.jpg","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/26179263-fe06-45bd-b6a0-0754660a0a46.jpg"] false Find contact details from name and location using AI search Finding someone's professional email address can be time-consuming and frustrating. Manual searching across multiple websites, social profiles, and business directories often leads to dead ends or outdated information. Email Scout automates this process by intelligently searching across publicly available sources when you provide a person's name and location. Simply input basic information like "Tim Cook, USA" or "Sarah Smith, London" and let the AI assistant do the work of finding potential contact details. Key Features: - Quick search from just name and location - Scans multiple public sources - Automated AI-powered search process - Easy to use with simple inputs Perfect for recruiters, business development professionals, researchers, and anyone needing to establish professional contact. Note: This tool searches only publicly available information. Search results depend on what contact information people have made public. Some searches may not yield results if the information isn't publicly accessible. [""] false true
15 81bcc372-0922-4a36-bc35-f7b1e51d6939 e437cc95-e671-489d-b915-76561fba8c7f ai-youtube-to-blog-converter YouTube Video to SEO Blog Writer ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/239e5a41-2515-4e1c-96ef-31d0d37ecbeb.webp","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/c7d96966-786f-4be6-ad7d-3a51c84efc0e.png","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/0275a74c-e2c2-4e29-a6e4-3a616c3c35dd.png"] false One link. One click. One powerful blog post. Effortlessly transform your YouTube videos into high-quality, SEO-optimized blog posts. Your videos deserve a second life—in writing. Make your content work twice as hard by repurposing it into engaging, searchable articles. Perfect for content creators, marketers, and bloggers, this tool analyzes video content and generates well-structured blog posts tailored to your tone, audience, and word count. Just paste a YouTube URL and let the AI handle the rest. FEATURES • CONTENT ANALYSIS Extracts key points from the video while preserving your message and intent. • CUSTOMIZABLE OUTPUT Select a tone that fits your audience: casual, professional, educational, or formal. • SEO OPTIMIZATION Automatically creates engaging titles and structured subheadings for better search visibility. • USER-FRIENDLY Repurpose your videos into written content to expand your reach and improve accessibility. Whether you're looking to grow your blog, boost SEO, or simply get more out of your content, the AI YouTube-to-Blog Converter makes it effortless. ["writing"] true true
16 5c3510d2-fc8b-4053-8e19-67f53c86eb1a f2cc74bb-f43f-4395-9c35-ecb30b5b4fc9 ai-webpage-copy-improver AI Webpage Copy Improver ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/d562d26f-5891-4b09-8859-fbb205972313.jpg"] false Boost Your Website's Search Engine Performance Elevate your web content with this powerful AI Webpage Copy Improver. Designed for marketers, SEO specialists, and web developers, this tool analyses and enhances website copy for maximum impact. Using advanced language models, it optimizes text for better clarity, SEO performance, and increased conversion rates. The AI examines your existing content, identifies areas for improvement, and generates refined copy that maintains your brand voice while boosting engagement. From homepage headlines to product descriptions, transform your web presence with AI-driven insights. Improve readability, incorporate targeted keywords, and craft compelling calls-to-action - all with the click of a button. Take your digital marketing to the next level with the AI Webpage Copy Improver. ["marketing"] true true
17 94d03bd3-7d44-4d47-b60c-edb2f89508d6 b6f6f0d3-49f4-4e3b-8155-ffe9141b32c0 domain-name-finder Domain Name Finder ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/28545e09-b2b8-4916-b4c6-67f982510a78.jpeg"] false Instantly generate brand-ready domain names that are actually available Overview: Finding a domain name that fits your brand shouldn’t take hours of searching and failed checks. The Domain Name Finder Agent turns your pitch into hundreds of creative, brand-ready domain ideas—filtered by live availability so every result is actionable. How It Works 1. Input your product pitch, company name, or core keywords. 2. The agent analyzes brand tone, audience, and industry context. 3. It generates a list of unique, memorable domains that match your criteria. 4. All names are pre-filtered for real-time availability, so you can register immediately. Business Value Save hours of guesswork and eliminate dead ends. Accelerate brand launches, startup naming, and campaign creation with ready-to-claim domains. Key Use Cases • Startup Founders: Quickly find brand-ready domains for MVP launches or rebrands. • Marketers: Test name options across campaigns with instant availability data. • Entrepreneurs: Validate ideas faster with instant domain options. ["business"] false true
18 7a831906-daab-426f-9d66-bcf98d869426 516d813b-d1bc-470f-add7-c63a4b2c2bad ai-function AI Function ["https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/620e8117-2ee1-4384-89e6-c2ef4ec3d9c9.webp","https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/476259e2-5a79-4a7b-8e70-deeebfca70d7.png"] false Never Code Again AI FUNCTION MAGIC Your AI‑powered assistant for turning plain‑English descriptions into working Python functions. HOW IT WORKS 1. Describe what the function should do. 2. Specify the inputs it needs. 3. Receive the generated Python code. FEATURES - Effortless Function Generation: convert natural‑language specs into complete functions. - Customizable Inputs: define the parameters that matter to you. - Versatile Use Cases: simulate data, automate tasks, prototype ideas. - Seamless Integration: add the generated function directly to your codebase. EXAMPLE Request: “Create a function that generates 20 examples of fake people, each with a name, date of birth, job title, and age.” Input parameter: number_of_people (default 20) Result: a list of dictionaries such as [ { "name": "Emma Martinez", "date_of_birth": "1992‑11‑03", "job_title": "Data Analyst", "age": 32 }, { "name": "Liam O’Connor", "date_of_birth": "1985‑07‑19", "job_title": "Marketing Manager", "age": 39 }, …18 more entries… ] ["development"] false true

View File

@@ -1,590 +0,0 @@
{
"id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
"version": 29,
"is_active": false,
"name": "Unspirational Poster Maker",
"description": "This witty AI agent generates hilariously relatable \"motivational\" posters that tackle the everyday struggles of procrastination, overthinking, and workplace chaos with a blend of absurdity and sarcasm. From goldfish facing impossible tasks to cats in existential crises, The Unspirational Poster Maker designs tongue-in-cheek graphics and captions that mock productivity clich\u00e9s and embrace our collective struggles to \"get it together.\" Perfect for adding a touch of humour to the workday, these posters remind us that sometimes, all we can do is laugh at the chaos.",
"instructions": null,
"recommended_schedule_cron": null,
"nodes": [
{
"id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
"input_default": {
"name": "Generated Image",
"description": "The resulting generated image ready for you to review and post."
},
"metadata": {
"position": {
"x": 2329.937006807125,
"y": 80.49068076698347
}
},
"input_links": [
{
"id": "c6c511e8-e6a4-4969-9bc8-f67d60c1e229",
"source_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"source_name": "result",
"sink_name": "value",
"is_static": false
},
{
"id": "20845dda-91de-4508-8077-0504b1a5ae03",
"source_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"source_name": "result",
"sink_name": "value",
"is_static": false
},
{
"id": "6524c611-774b-45e9-899d-9a6aa80c549c",
"source_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"source_name": "result",
"sink_name": "value",
"is_static": false
},
{
"id": "714a0821-e5ba-4af7-9432-50491adda7b1",
"source_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"source_name": "result",
"sink_name": "value",
"is_static": false
}
],
"output_links": [],
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "7e026d19-f9a6-412f-8082-610f9ba0c410",
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
"input_default": {
"name": "Theme",
"value": "Cooking"
},
"metadata": {
"position": {
"x": -1219.5966324967521,
"y": 80.50339731789956
}
},
"input_links": [],
"output_links": [
{
"id": "8c2bd1f7-b17b-4835-81b6-bb336097aa7a",
"source_id": "7e026d19-f9a6-412f-8082-610f9ba0c410",
"sink_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"source_name": "result",
"sink_name": "prompt_values_#_THEME",
"is_static": true
}
],
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "28bda769-b88b-44c9-be5c-52c2667f137e",
"block_id": "6ab085e2-20b3-4055-bc3e-08036e01eca6",
"input_default": {
"upscale": "No Upscale"
},
"metadata": {
"position": {
"x": 1132.373897280427,
"y": 88.44610377514573
}
},
"input_links": [
{
"id": "54588c74-e090-4e49-89e4-844b9952a585",
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"sink_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
}
],
"output_links": [
{
"id": "20845dda-91de-4508-8077-0504b1a5ae03",
"source_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"source_name": "result",
"sink_name": "value",
"is_static": false
}
],
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
"block_id": "6ab085e2-20b3-4055-bc3e-08036e01eca6",
"input_default": {
"upscale": "No Upscale"
},
"metadata": {
"position": {
"x": 590.7543882245375,
"y": 85.69546832466654
}
},
"input_links": [
{
"id": "66646786-3006-4417-a6b7-0158f2603d1d",
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"sink_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
}
],
"output_links": [
{
"id": "6524c611-774b-45e9-899d-9a6aa80c549c",
"source_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"source_name": "result",
"sink_name": "value",
"is_static": false
}
],
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "576c5677-9050-4d1c-aad4-36b820c04fef",
"block_id": "6ab085e2-20b3-4055-bc3e-08036e01eca6",
"input_default": {
"upscale": "No Upscale"
},
"metadata": {
"position": {
"x": 60.48904654237981,
"y": 86.06183359510214
}
},
"input_links": [
{
"id": "201d3e03-bc06-4cee-846d-4c3c804d8857",
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"sink_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
}
],
"output_links": [
{
"id": "714a0821-e5ba-4af7-9432-50491adda7b1",
"source_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"source_name": "result",
"sink_name": "value",
"is_static": false
}
],
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
"block_id": "6ab085e2-20b3-4055-bc3e-08036e01eca6",
"input_default": {
"prompt": "A cat sprawled dramatically across an important-looking document during a work-from-home meeting, making direct eye contact with the camera while knocking over a coffee mug in slow motion. Text Overlay: \"Chaos is a career path. Be the obstacle everyone has to work around.\"",
"upscale": "No Upscale"
},
"metadata": {
"position": {
"x": 1668.3572666956795,
"y": 89.69665262457966
}
},
"input_links": [
{
"id": "509b7587-1940-4a06-808d-edde9a74f400",
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"sink_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
}
],
"output_links": [
{
"id": "c6c511e8-e6a4-4969-9bc8-f67d60c1e229",
"source_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"source_name": "result",
"sink_name": "value",
"is_static": false
}
],
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
"input_default": {
"model": "gpt-4o",
"prompt": "<example_output>\nA photo of a sloth lounging on a desk, with its head resting on a keyboard. The keyboard is on top of a laptop with a blank spreadsheet open. A to-do list is placed beside the laptop, with the top item written as \"Do literally anything\". There is a text overlay that says \"If you can't outwork them, outnap them.\".\n</example_output>\n\nCreate a relatable satirical, snarky, user-deprecating motivational style image based on the theme: \"{{THEME}}\".\n\nOutput only the image description and caption, without any additional commentary or formatting.",
"prompt_values": {}
},
"metadata": {
"position": {
"x": -561.1139207164056,
"y": 78.60434452403524
}
},
"input_links": [
{
"id": "8c2bd1f7-b17b-4835-81b6-bb336097aa7a",
"source_id": "7e026d19-f9a6-412f-8082-610f9ba0c410",
"sink_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"source_name": "result",
"sink_name": "prompt_values_#_THEME",
"is_static": true
}
],
"output_links": [
{
"id": "54588c74-e090-4e49-89e4-844b9952a585",
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"sink_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
},
{
"id": "201d3e03-bc06-4cee-846d-4c3c804d8857",
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"sink_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
},
{
"id": "509b7587-1940-4a06-808d-edde9a74f400",
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"sink_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
},
{
"id": "66646786-3006-4417-a6b7-0158f2603d1d",
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"sink_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
}
],
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
"graph_version": 29,
"webhook_id": null,
"webhook": null
}
],
"links": [
{
"id": "66646786-3006-4417-a6b7-0158f2603d1d",
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"sink_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
},
{
"id": "c6c511e8-e6a4-4969-9bc8-f67d60c1e229",
"source_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"source_name": "result",
"sink_name": "value",
"is_static": false
},
{
"id": "6524c611-774b-45e9-899d-9a6aa80c549c",
"source_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"source_name": "result",
"sink_name": "value",
"is_static": false
},
{
"id": "20845dda-91de-4508-8077-0504b1a5ae03",
"source_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"source_name": "result",
"sink_name": "value",
"is_static": false
},
{
"id": "8c2bd1f7-b17b-4835-81b6-bb336097aa7a",
"source_id": "7e026d19-f9a6-412f-8082-610f9ba0c410",
"sink_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"source_name": "result",
"sink_name": "prompt_values_#_THEME",
"is_static": true
},
{
"id": "201d3e03-bc06-4cee-846d-4c3c804d8857",
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"sink_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
},
{
"id": "714a0821-e5ba-4af7-9432-50491adda7b1",
"source_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
"source_name": "result",
"sink_name": "value",
"is_static": false
},
{
"id": "54588c74-e090-4e49-89e4-844b9952a585",
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"sink_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
},
{
"id": "509b7587-1940-4a06-808d-edde9a74f400",
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
"sink_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
}
],
"forked_from_id": null,
"forked_from_version": null,
"sub_graphs": [],
"user_id": "",
"created_at": "2024-12-20T19:58:34.390Z",
"input_schema": {
"type": "object",
"properties": {
"Theme": {
"advanced": false,
"secret": false,
"title": "Theme",
"default": "Cooking"
}
},
"required": []
},
"output_schema": {
"type": "object",
"properties": {
"Generated Image": {
"advanced": false,
"secret": false,
"title": "Generated Image",
"description": "The resulting generated image ready for you to review and post."
}
},
"required": [
"Generated Image"
]
},
"has_external_trigger": false,
"has_human_in_the_loop": false,
"trigger_setup_info": null,
"credentials_input_schema": {
"properties": {
"ideogram_api_key_credentials": {
"credentials_provider": [
"ideogram"
],
"credentials_types": [
"api_key"
],
"properties": {
"id": {
"title": "Id",
"type": "string"
},
"title": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"title": "Title"
},
"provider": {
"const": "ideogram",
"title": "Provider",
"type": "string"
},
"type": {
"const": "api_key",
"title": "Type",
"type": "string"
}
},
"required": [
"id",
"provider",
"type"
],
"title": "CredentialsMetaInput[Literal[<ProviderName.IDEOGRAM: 'ideogram'>], Literal['api_key']]",
"type": "object",
"discriminator_values": []
},
"openai_api_key_credentials": {
"credentials_provider": [
"openai"
],
"credentials_types": [
"api_key"
],
"properties": {
"id": {
"title": "Id",
"type": "string"
},
"title": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"title": "Title"
},
"provider": {
"const": "openai",
"title": "Provider",
"type": "string"
},
"type": {
"const": "api_key",
"title": "Type",
"type": "string"
}
},
"required": [
"id",
"provider",
"type"
],
"title": "CredentialsMetaInput[Literal[<ProviderName.OPENAI: 'openai'>], Literal['api_key']]",
"type": "object",
"discriminator": "model",
"discriminator_mapping": {
"Llama-3.3-70B-Instruct": "llama_api",
"Llama-3.3-8B-Instruct": "llama_api",
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
"amazon/nova-lite-v1": "open_router",
"amazon/nova-micro-v1": "open_router",
"amazon/nova-pro-v1": "open_router",
"claude-3-7-sonnet-20250219": "anthropic",
"claude-3-haiku-20240307": "anthropic",
"claude-haiku-4-5-20251001": "anthropic",
"claude-opus-4-1-20250805": "anthropic",
"claude-opus-4-20250514": "anthropic",
"claude-opus-4-5-20251101": "anthropic",
"claude-sonnet-4-20250514": "anthropic",
"claude-sonnet-4-5-20250929": "anthropic",
"cohere/command-r-08-2024": "open_router",
"cohere/command-r-plus-08-2024": "open_router",
"deepseek/deepseek-chat": "open_router",
"deepseek/deepseek-r1-0528": "open_router",
"dolphin-mistral:latest": "ollama",
"google/gemini-2.0-flash-001": "open_router",
"google/gemini-2.0-flash-lite-001": "open_router",
"google/gemini-2.5-flash": "open_router",
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
"google/gemini-2.5-pro-preview-03-25": "open_router",
"google/gemini-3-pro-preview": "open_router",
"gpt-3.5-turbo": "openai",
"gpt-4-turbo": "openai",
"gpt-4.1-2025-04-14": "openai",
"gpt-4.1-mini-2025-04-14": "openai",
"gpt-4o": "openai",
"gpt-4o-mini": "openai",
"gpt-5-2025-08-07": "openai",
"gpt-5-chat-latest": "openai",
"gpt-5-mini-2025-08-07": "openai",
"gpt-5-nano-2025-08-07": "openai",
"gpt-5.1-2025-11-13": "openai",
"gryphe/mythomax-l2-13b": "open_router",
"llama-3.1-8b-instant": "groq",
"llama-3.3-70b-versatile": "groq",
"llama3": "ollama",
"llama3.1:405b": "ollama",
"llama3.2": "ollama",
"llama3.3": "ollama",
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
"meta-llama/llama-4-maverick": "open_router",
"meta-llama/llama-4-scout": "open_router",
"microsoft/wizardlm-2-8x22b": "open_router",
"mistralai/mistral-nemo": "open_router",
"moonshotai/kimi-k2": "open_router",
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
"o1": "openai",
"o1-mini": "openai",
"o3-2025-04-16": "openai",
"o3-mini": "openai",
"openai/gpt-oss-120b": "open_router",
"openai/gpt-oss-20b": "open_router",
"perplexity/sonar": "open_router",
"perplexity/sonar-deep-research": "open_router",
"perplexity/sonar-pro": "open_router",
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
"qwen/qwen3-coder": "open_router",
"v0-1.0-md": "v0",
"v0-1.5-lg": "v0",
"v0-1.5-md": "v0",
"x-ai/grok-4": "open_router",
"x-ai/grok-4-fast": "open_router",
"x-ai/grok-4.1-fast": "open_router",
"x-ai/grok-code-fast-1": "open_router"
},
"discriminator_values": [
"gpt-4o"
]
}
},
"required": [
"ideogram_api_key_credentials",
"openai_api_key_credentials"
],
"title": "UnspirationalPosterMakerCredentialsInputSchema",
"type": "object"
}
}

View File

@@ -1,447 +0,0 @@
{
"id": "622849a7-5848-4838-894d-01f8f07e3fad",
"version": 18,
"is_active": true,
"name": "AI Function",
"description": "## AI-Powered Function Magic: Never code again!\nProvide a description of a python function and your inputs and AI will provide the results.",
"instructions": null,
"recommended_schedule_cron": null,
"nodes": [
{
"id": "26ff2973-3f9a-451d-b902-d45e5da0a7fe",
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
"input_default": {
"name": "return",
"title": null,
"value": null,
"format": "",
"secret": false,
"advanced": false,
"description": "The value returned by the function"
},
"metadata": {
"position": {
"x": 1598.8622921127233,
"y": 291.59140862204725
}
},
"input_links": [
{
"id": "caecc1de-fdbc-4fd9-9570-074057bb15f9",
"source_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"sink_id": "26ff2973-3f9a-451d-b902-d45e5da0a7fe",
"source_name": "response",
"sink_name": "value",
"is_static": false
}
],
"output_links": [],
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
"graph_version": 18,
"webhook_id": null,
"webhook": null
},
{
"id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
"input_default": {
"model": "o3-mini",
"retry": 3,
"prompt": "{{ARGS}}",
"sys_prompt": "You are now the following python function:\n\n```\n# {{DESCRIPTION}}\n{{FUNCTION}}\n```\n\nThe user will provide your input arguments.\nOnly respond with your `return` value.\nDo not include any commentary or additional text in your response. \nDo not include ``` backticks or any other decorators.",
"ollama_host": "localhost:11434",
"prompt_values": {}
},
"metadata": {
"position": {
"x": 995,
"y": 290.50000000000006
}
},
"input_links": [
{
"id": "dc7cb15f-76cc-4533-b96c-dd9e3f7f75ed",
"source_id": "4eab3a55-20f2-4c1d-804c-7377ba8202d2",
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"source_name": "result",
"sink_name": "prompt_values_#_FUNCTION",
"is_static": true
},
{
"id": "093bdca5-9f44-42f9-8e1c-276dd2971675",
"source_id": "844530de-2354-46d8-b748-67306b7bbca1",
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"source_name": "result",
"sink_name": "prompt_values_#_ARGS",
"is_static": true
},
{
"id": "6c63d8ee-b63d-4ff6-bae0-7db8f99bb7af",
"source_id": "0fd6ef54-c1cd-478d-b764-17e40f882b99",
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"source_name": "result",
"sink_name": "prompt_values_#_DESCRIPTION",
"is_static": true
}
],
"output_links": [
{
"id": "caecc1de-fdbc-4fd9-9570-074057bb15f9",
"source_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"sink_id": "26ff2973-3f9a-451d-b902-d45e5da0a7fe",
"source_name": "response",
"sink_name": "value",
"is_static": false
}
],
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
"graph_version": 18,
"webhook_id": null,
"webhook": null
},
{
"id": "4eab3a55-20f2-4c1d-804c-7377ba8202d2",
"block_id": "7fcd3bcb-8e1b-4e69-903d-32d3d4a92158",
"input_default": {
"name": "Function Definition",
"title": null,
"value": "def fake_people(n: int) -> list[dict]:",
"secret": false,
"advanced": false,
"description": "The function definition (text). This is what you would type on the first line of the function when programming.\n\ne.g \"def fake_people(n: int) -> list[dict]:\"",
"placeholder_values": []
},
"metadata": {
"position": {
"x": -672.6908629664215,
"y": 302.42044359789116
}
},
"input_links": [],
"output_links": [
{
"id": "dc7cb15f-76cc-4533-b96c-dd9e3f7f75ed",
"source_id": "4eab3a55-20f2-4c1d-804c-7377ba8202d2",
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"source_name": "result",
"sink_name": "prompt_values_#_FUNCTION",
"is_static": true
}
],
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
"graph_version": 18,
"webhook_id": null,
"webhook": null
},
{
"id": "844530de-2354-46d8-b748-67306b7bbca1",
"block_id": "7fcd3bcb-8e1b-4e69-903d-32d3d4a92158",
"input_default": {
"name": "Arguments",
"title": null,
"value": "20",
"secret": false,
"advanced": false,
"description": "The function's inputs\n\ne.g \"20\"",
"placeholder_values": []
},
"metadata": {
"position": {
"x": -158.1623599617334,
"y": 295.410856928333
}
},
"input_links": [],
"output_links": [
{
"id": "093bdca5-9f44-42f9-8e1c-276dd2971675",
"source_id": "844530de-2354-46d8-b748-67306b7bbca1",
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"source_name": "result",
"sink_name": "prompt_values_#_ARGS",
"is_static": true
}
],
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
"graph_version": 18,
"webhook_id": null,
"webhook": null
},
{
"id": "0fd6ef54-c1cd-478d-b764-17e40f882b99",
"block_id": "90a56ffb-7024-4b2b-ab50-e26c5e5ab8ba",
"input_default": {
"name": "Description",
"title": null,
"value": "Generates n examples of fake data representing people, each with a name, DoB, Job title, and an age.",
"secret": false,
"advanced": false,
"description": "Describe what the function does.\n\ne.g \"Generates n examples of fake data representing people, each with a name, DoB, Job title, and an age.\"",
"placeholder_values": []
},
"metadata": {
"position": {
"x": 374.4548658057796,
"y": 290.3779121974126
}
},
"input_links": [],
"output_links": [
{
"id": "6c63d8ee-b63d-4ff6-bae0-7db8f99bb7af",
"source_id": "0fd6ef54-c1cd-478d-b764-17e40f882b99",
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"source_name": "result",
"sink_name": "prompt_values_#_DESCRIPTION",
"is_static": true
}
],
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
"graph_version": 18,
"webhook_id": null,
"webhook": null
}
],
"links": [
{
"id": "caecc1de-fdbc-4fd9-9570-074057bb15f9",
"source_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"sink_id": "26ff2973-3f9a-451d-b902-d45e5da0a7fe",
"source_name": "response",
"sink_name": "value",
"is_static": false
},
{
"id": "6c63d8ee-b63d-4ff6-bae0-7db8f99bb7af",
"source_id": "0fd6ef54-c1cd-478d-b764-17e40f882b99",
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"source_name": "result",
"sink_name": "prompt_values_#_DESCRIPTION",
"is_static": true
},
{
"id": "093bdca5-9f44-42f9-8e1c-276dd2971675",
"source_id": "844530de-2354-46d8-b748-67306b7bbca1",
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"source_name": "result",
"sink_name": "prompt_values_#_ARGS",
"is_static": true
},
{
"id": "dc7cb15f-76cc-4533-b96c-dd9e3f7f75ed",
"source_id": "4eab3a55-20f2-4c1d-804c-7377ba8202d2",
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
"source_name": "result",
"sink_name": "prompt_values_#_FUNCTION",
"is_static": true
}
],
"forked_from_id": null,
"forked_from_version": null,
"sub_graphs": [],
"user_id": "",
"created_at": "2025-04-19T17:10:48.857Z",
"input_schema": {
"type": "object",
"properties": {
"Function Definition": {
"advanced": false,
"anyOf": [
{
"format": "short-text",
"type": "string"
},
{
"type": "null"
}
],
"secret": false,
"title": "Function Definition",
"description": "The function definition (text). This is what you would type on the first line of the function when programming.\n\ne.g \"def fake_people(n: int) -> list[dict]:\"",
"default": "def fake_people(n: int) -> list[dict]:"
},
"Arguments": {
"advanced": false,
"anyOf": [
{
"format": "short-text",
"type": "string"
},
{
"type": "null"
}
],
"secret": false,
"title": "Arguments",
"description": "The function's inputs\n\ne.g \"20\"",
"default": "20"
},
"Description": {
"advanced": false,
"anyOf": [
{
"format": "long-text",
"type": "string"
},
{
"type": "null"
}
],
"secret": false,
"title": "Description",
"description": "Describe what the function does.\n\ne.g \"Generates n examples of fake data representing people, each with a name, DoB, Job title, and an age.\"",
"default": "Generates n examples of fake data representing people, each with a name, DoB, Job title, and an age."
}
},
"required": []
},
"output_schema": {
"type": "object",
"properties": {
"return": {
"advanced": false,
"secret": false,
"title": "return",
"description": "The value returned by the function"
}
},
"required": [
"return"
]
},
"has_external_trigger": false,
"has_human_in_the_loop": false,
"trigger_setup_info": null,
"credentials_input_schema": {
"properties": {
"openai_api_key_credentials": {
"credentials_provider": [
"openai"
],
"credentials_types": [
"api_key"
],
"properties": {
"id": {
"title": "Id",
"type": "string"
},
"title": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"title": "Title"
},
"provider": {
"const": "openai",
"title": "Provider",
"type": "string"
},
"type": {
"const": "api_key",
"title": "Type",
"type": "string"
}
},
"required": [
"id",
"provider",
"type"
],
"title": "CredentialsMetaInput[Literal[<ProviderName.OPENAI: 'openai'>], Literal['api_key']]",
"type": "object",
"discriminator": "model",
"discriminator_mapping": {
"Llama-3.3-70B-Instruct": "llama_api",
"Llama-3.3-8B-Instruct": "llama_api",
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
"amazon/nova-lite-v1": "open_router",
"amazon/nova-micro-v1": "open_router",
"amazon/nova-pro-v1": "open_router",
"claude-3-7-sonnet-20250219": "anthropic",
"claude-3-haiku-20240307": "anthropic",
"claude-haiku-4-5-20251001": "anthropic",
"claude-opus-4-1-20250805": "anthropic",
"claude-opus-4-20250514": "anthropic",
"claude-opus-4-5-20251101": "anthropic",
"claude-sonnet-4-20250514": "anthropic",
"claude-sonnet-4-5-20250929": "anthropic",
"cohere/command-r-08-2024": "open_router",
"cohere/command-r-plus-08-2024": "open_router",
"deepseek/deepseek-chat": "open_router",
"deepseek/deepseek-r1-0528": "open_router",
"dolphin-mistral:latest": "ollama",
"google/gemini-2.0-flash-001": "open_router",
"google/gemini-2.0-flash-lite-001": "open_router",
"google/gemini-2.5-flash": "open_router",
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
"google/gemini-2.5-pro-preview-03-25": "open_router",
"google/gemini-3-pro-preview": "open_router",
"gpt-3.5-turbo": "openai",
"gpt-4-turbo": "openai",
"gpt-4.1-2025-04-14": "openai",
"gpt-4.1-mini-2025-04-14": "openai",
"gpt-4o": "openai",
"gpt-4o-mini": "openai",
"gpt-5-2025-08-07": "openai",
"gpt-5-chat-latest": "openai",
"gpt-5-mini-2025-08-07": "openai",
"gpt-5-nano-2025-08-07": "openai",
"gpt-5.1-2025-11-13": "openai",
"gryphe/mythomax-l2-13b": "open_router",
"llama-3.1-8b-instant": "groq",
"llama-3.3-70b-versatile": "groq",
"llama3": "ollama",
"llama3.1:405b": "ollama",
"llama3.2": "ollama",
"llama3.3": "ollama",
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
"meta-llama/llama-4-maverick": "open_router",
"meta-llama/llama-4-scout": "open_router",
"microsoft/wizardlm-2-8x22b": "open_router",
"mistralai/mistral-nemo": "open_router",
"moonshotai/kimi-k2": "open_router",
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
"o1": "openai",
"o1-mini": "openai",
"o3-2025-04-16": "openai",
"o3-mini": "openai",
"openai/gpt-oss-120b": "open_router",
"openai/gpt-oss-20b": "open_router",
"perplexity/sonar": "open_router",
"perplexity/sonar-deep-research": "open_router",
"perplexity/sonar-pro": "open_router",
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
"qwen/qwen3-coder": "open_router",
"v0-1.0-md": "v0",
"v0-1.5-lg": "v0",
"v0-1.5-md": "v0",
"x-ai/grok-4": "open_router",
"x-ai/grok-4-fast": "open_router",
"x-ai/grok-4.1-fast": "open_router",
"x-ai/grok-code-fast-1": "open_router"
},
"discriminator_values": [
"o3-mini"
]
}
},
"required": [
"openai_api_key_credentials"
],
"title": "AIFunctionCredentialsInputSchema",
"type": "object"
}
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -1,403 +0,0 @@
{
"id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
"version": 12,
"is_active": true,
"name": "Flux AI Image Generator",
"description": "Transform ideas into breathtaking images with this AI-powered Image Generator. Using cutting-edge Flux AI technology, the tool crafts highly detailed, photorealistic visuals from simple text prompts. Perfect for artists, marketers, and content creators, this generator produces unique images tailored to user specifications. From fantastical scenes to lifelike portraits, users can unleash creativity with professional-quality results in seconds. Easy to use and endlessly versatile, bring imagination to life with the AI Image Generator today!",
"instructions": null,
"recommended_schedule_cron": null,
"nodes": [
{
"id": "7482c59d-725f-4686-82b9-0dfdc4e92316",
"block_id": "cc10ff7b-7753-4ff2-9af6-9399b1a7eddc",
"input_default": {
"text": "Press the \"Advanced\" toggle and input your replicate API key.\n\nYou can get one here:\nhttps://replicate.com/account/api-tokens\n"
},
"metadata": {
"position": {
"x": 872.8268131538296,
"y": 614.9436919065381
}
},
"input_links": [],
"output_links": [],
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
"graph_version": 12,
"webhook_id": null,
"webhook": null
},
{
"id": "0d1dec1a-e4ee-4349-9673-449a01bbf14e",
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
"input_default": {
"name": "Generated Image"
},
"metadata": {
"position": {
"x": 1453.6844137728922,
"y": 963.2466395125115
}
},
"input_links": [
{
"id": "06665d23-2f3d-4445-8f22-573446fcff5b",
"source_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
"sink_id": "0d1dec1a-e4ee-4349-9673-449a01bbf14e",
"source_name": "result",
"sink_name": "value",
"is_static": false
}
],
"output_links": [],
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
"graph_version": 12,
"webhook_id": null,
"webhook": null
},
{
"id": "6f24c45f-1548-4eda-9784-da06ce0abef8",
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
"input_default": {
"name": "Image Subject",
"value": "Otto the friendly, purple \"Chief Automation Octopus\" helping people automate their tedious tasks.",
"description": "The subject of the image"
},
"metadata": {
"position": {
"x": -314.43009631839783,
"y": 962.935949165938
}
},
"input_links": [],
"output_links": [
{
"id": "1077c61a-a32a-4ed7-becf-11bcf835b914",
"source_id": "6f24c45f-1548-4eda-9784-da06ce0abef8",
"sink_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
"source_name": "result",
"sink_name": "prompt_values_#_TOPIC",
"is_static": true
}
],
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
"graph_version": 12,
"webhook_id": null,
"webhook": null
},
{
"id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
"block_id": "90f8c45e-e983-4644-aa0b-b4ebe2f531bc",
"input_default": {
"prompt": "dog",
"output_format": "png",
"replicate_model_name": "Flux Pro 1.1"
},
"metadata": {
"position": {
"x": 873.0119949791526,
"y": 966.1604399052493
}
},
"input_links": [
{
"id": "a17ec505-9377-4700-8fe0-124ca81d43a9",
"source_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
"sink_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
}
],
"output_links": [
{
"id": "06665d23-2f3d-4445-8f22-573446fcff5b",
"source_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
"sink_id": "0d1dec1a-e4ee-4349-9673-449a01bbf14e",
"source_name": "result",
"sink_name": "value",
"is_static": false
}
],
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
"graph_version": 12,
"webhook_id": null,
"webhook": null
},
{
"id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
"input_default": {
"model": "gpt-4o-mini",
"prompt": "Generate an incredibly detailed, photorealistic image prompt about {{TOPIC}}, describing the camera it's taken with and prompting the diffusion model to use all the best quality techniques.\n\nOutput only the prompt with no additional commentary.",
"prompt_values": {}
},
"metadata": {
"position": {
"x": 277.3057034159709,
"y": 962.8382498113764
}
},
"input_links": [
{
"id": "1077c61a-a32a-4ed7-becf-11bcf835b914",
"source_id": "6f24c45f-1548-4eda-9784-da06ce0abef8",
"sink_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
"source_name": "result",
"sink_name": "prompt_values_#_TOPIC",
"is_static": true
}
],
"output_links": [
{
"id": "a17ec505-9377-4700-8fe0-124ca81d43a9",
"source_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
"sink_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
}
],
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
"graph_version": 12,
"webhook_id": null,
"webhook": null
}
],
"links": [
{
"id": "1077c61a-a32a-4ed7-becf-11bcf835b914",
"source_id": "6f24c45f-1548-4eda-9784-da06ce0abef8",
"sink_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
"source_name": "result",
"sink_name": "prompt_values_#_TOPIC",
"is_static": true
},
{
"id": "06665d23-2f3d-4445-8f22-573446fcff5b",
"source_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
"sink_id": "0d1dec1a-e4ee-4349-9673-449a01bbf14e",
"source_name": "result",
"sink_name": "value",
"is_static": false
},
{
"id": "a17ec505-9377-4700-8fe0-124ca81d43a9",
"source_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
"sink_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
"source_name": "response",
"sink_name": "prompt",
"is_static": false
}
],
"forked_from_id": null,
"forked_from_version": null,
"sub_graphs": [],
"user_id": "",
"created_at": "2024-12-20T18:46:11.492Z",
"input_schema": {
"type": "object",
"properties": {
"Image Subject": {
"advanced": false,
"secret": false,
"title": "Image Subject",
"description": "The subject of the image",
"default": "Otto the friendly, purple \"Chief Automation Octopus\" helping people automate their tedious tasks."
}
},
"required": []
},
"output_schema": {
"type": "object",
"properties": {
"Generated Image": {
"advanced": false,
"secret": false,
"title": "Generated Image"
}
},
"required": [
"Generated Image"
]
},
"has_external_trigger": false,
"has_human_in_the_loop": false,
"trigger_setup_info": null,
"credentials_input_schema": {
"properties": {
"replicate_api_key_credentials": {
"credentials_provider": [
"replicate"
],
"credentials_types": [
"api_key"
],
"properties": {
"id": {
"title": "Id",
"type": "string"
},
"title": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"title": "Title"
},
"provider": {
"const": "replicate",
"title": "Provider",
"type": "string"
},
"type": {
"const": "api_key",
"title": "Type",
"type": "string"
}
},
"required": [
"id",
"provider",
"type"
],
"title": "CredentialsMetaInput[Literal[<ProviderName.REPLICATE: 'replicate'>], Literal['api_key']]",
"type": "object",
"discriminator_values": []
},
"openai_api_key_credentials": {
"credentials_provider": [
"openai"
],
"credentials_types": [
"api_key"
],
"properties": {
"id": {
"title": "Id",
"type": "string"
},
"title": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"title": "Title"
},
"provider": {
"const": "openai",
"title": "Provider",
"type": "string"
},
"type": {
"const": "api_key",
"title": "Type",
"type": "string"
}
},
"required": [
"id",
"provider",
"type"
],
"title": "CredentialsMetaInput[Literal[<ProviderName.OPENAI: 'openai'>], Literal['api_key']]",
"type": "object",
"discriminator": "model",
"discriminator_mapping": {
"Llama-3.3-70B-Instruct": "llama_api",
"Llama-3.3-8B-Instruct": "llama_api",
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
"amazon/nova-lite-v1": "open_router",
"amazon/nova-micro-v1": "open_router",
"amazon/nova-pro-v1": "open_router",
"claude-3-7-sonnet-20250219": "anthropic",
"claude-3-haiku-20240307": "anthropic",
"claude-haiku-4-5-20251001": "anthropic",
"claude-opus-4-1-20250805": "anthropic",
"claude-opus-4-20250514": "anthropic",
"claude-opus-4-5-20251101": "anthropic",
"claude-sonnet-4-20250514": "anthropic",
"claude-sonnet-4-5-20250929": "anthropic",
"cohere/command-r-08-2024": "open_router",
"cohere/command-r-plus-08-2024": "open_router",
"deepseek/deepseek-chat": "open_router",
"deepseek/deepseek-r1-0528": "open_router",
"dolphin-mistral:latest": "ollama",
"google/gemini-2.0-flash-001": "open_router",
"google/gemini-2.0-flash-lite-001": "open_router",
"google/gemini-2.5-flash": "open_router",
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
"google/gemini-2.5-pro-preview-03-25": "open_router",
"google/gemini-3-pro-preview": "open_router",
"gpt-3.5-turbo": "openai",
"gpt-4-turbo": "openai",
"gpt-4.1-2025-04-14": "openai",
"gpt-4.1-mini-2025-04-14": "openai",
"gpt-4o": "openai",
"gpt-4o-mini": "openai",
"gpt-5-2025-08-07": "openai",
"gpt-5-chat-latest": "openai",
"gpt-5-mini-2025-08-07": "openai",
"gpt-5-nano-2025-08-07": "openai",
"gpt-5.1-2025-11-13": "openai",
"gryphe/mythomax-l2-13b": "open_router",
"llama-3.1-8b-instant": "groq",
"llama-3.3-70b-versatile": "groq",
"llama3": "ollama",
"llama3.1:405b": "ollama",
"llama3.2": "ollama",
"llama3.3": "ollama",
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
"meta-llama/llama-4-maverick": "open_router",
"meta-llama/llama-4-scout": "open_router",
"microsoft/wizardlm-2-8x22b": "open_router",
"mistralai/mistral-nemo": "open_router",
"moonshotai/kimi-k2": "open_router",
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
"o1": "openai",
"o1-mini": "openai",
"o3-2025-04-16": "openai",
"o3-mini": "openai",
"openai/gpt-oss-120b": "open_router",
"openai/gpt-oss-20b": "open_router",
"perplexity/sonar": "open_router",
"perplexity/sonar-deep-research": "open_router",
"perplexity/sonar-pro": "open_router",
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
"qwen/qwen3-coder": "open_router",
"v0-1.0-md": "v0",
"v0-1.5-lg": "v0",
"v0-1.5-md": "v0",
"x-ai/grok-4": "open_router",
"x-ai/grok-4-fast": "open_router",
"x-ai/grok-4.1-fast": "open_router",
"x-ai/grok-code-fast-1": "open_router"
},
"discriminator_values": [
"gpt-4o-mini"
]
}
},
"required": [
"replicate_api_key_credentials",
"openai_api_key_credentials"
],
"title": "FluxAIImageGeneratorCredentialsInputSchema",
"type": "object"
}
}

View File

@@ -1,505 +0,0 @@
{
"id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
"version": 12,
"is_active": true,
"name": "AI Webpage Copy Improver",
"description": "Elevate your web content with this powerful AI Webpage Copy Improver. Designed for marketers, SEO specialists, and web developers, this tool analyses and enhances website copy for maximum impact. Using advanced language models, it optimizes text for better clarity, SEO performance, and increased conversion rates. The AI examines your existing content, identifies areas for improvement, and generates refined copy that maintains your brand voice while boosting engagement. From homepage headlines to product descriptions, transform your web presence with AI-driven insights. Improve readability, incorporate targeted keywords, and craft compelling calls-to-action - all with the click of a button. Take your digital marketing to the next level with the AI Webpage Copy Improver.",
"instructions": null,
"recommended_schedule_cron": null,
"nodes": [
{
"id": "130ec496-f75d-4fe2-9cd6-8c00d08ea4a7",
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
"input_default": {
"name": "Improved Webpage Copy"
},
"metadata": {
"position": {
"x": 1039.5884372540172,
"y": -0.8359099621230968
}
},
"input_links": [
{
"id": "d4334477-3616-454f-a430-614ca27f5b36",
"source_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
"sink_id": "130ec496-f75d-4fe2-9cd6-8c00d08ea4a7",
"source_name": "response",
"sink_name": "value",
"is_static": false
}
],
"output_links": [],
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
"graph_version": 12,
"webhook_id": null,
"webhook": null
},
{
"id": "cefccd07-fe70-4feb-bf76-46b20aaa5d35",
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
"input_default": {
"name": "Original Page Analysis",
"description": "Analysis of the webpage as it currently stands."
},
"metadata": {
"position": {
"x": 1037.7724103954706,
"y": -606.5934325506903
}
},
"input_links": [
{
"id": "f979ab78-0903-4f19-a7c2-a419d5d81aef",
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
"sink_id": "cefccd07-fe70-4feb-bf76-46b20aaa5d35",
"source_name": "response",
"sink_name": "value",
"is_static": false
}
],
"output_links": [],
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
"graph_version": 12,
"webhook_id": null,
"webhook": null
},
{
"id": "375f8bc3-afd9-4025-ad8e-9aeb329af7ce",
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
"input_default": {
"name": "Homepage URL",
"value": "https://agpt.co",
"description": "Enter the URL of the homepage you want to improve"
},
"metadata": {
"position": {
"x": -1195.1455674454749,
"y": 0
}
},
"input_links": [],
"output_links": [
{
"id": "cbb12335-fefd-4560-9fff-98675130fbad",
"source_id": "375f8bc3-afd9-4025-ad8e-9aeb329af7ce",
"sink_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
"source_name": "result",
"sink_name": "url",
"is_static": true
}
],
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
"graph_version": 12,
"webhook_id": null,
"webhook": null
},
{
"id": "b40595c6-dba3-4779-a129-cd4f01fff103",
"block_id": "436c3984-57fd-4b85-8e9a-459b356883bd",
"input_default": {
"raw_content": false
},
"metadata": {
"position": {
"x": -631.7330786555249,
"y": 1.9638396496230826
}
},
"input_links": [
{
"id": "cbb12335-fefd-4560-9fff-98675130fbad",
"source_id": "375f8bc3-afd9-4025-ad8e-9aeb329af7ce",
"sink_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
"source_name": "result",
"sink_name": "url",
"is_static": true
}
],
"output_links": [
{
"id": "adfa6113-77b3-4e32-b136-3e694b87553e",
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
"source_name": "content",
"sink_name": "prompt_values_#_CONTENT",
"is_static": false
},
{
"id": "5d5656fd-4208-4296-bc70-e39cc31caada",
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
"sink_id": "08612ce2-625b-4c17-accd-3acace7b6477",
"source_name": "content",
"sink_name": "prompt_values_#_CONTENT",
"is_static": false
}
],
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
"graph_version": 12,
"webhook_id": null,
"webhook": null
},
{
"id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
"input_default": {
"model": "gpt-4o",
"prompt": "Current Webpage Content:\n```\n{{CONTENT}}\n```\n\nBased on the following analysis of the webpage content:\n\n```\n{{ANALYSIS}}\n```\n\nRewrite and improve the content to address the identified issues. Focus on:\n1. Enhancing clarity and readability\n2. Optimizing for SEO (suggest and incorporate relevant keywords)\n3. Improving calls-to-action for better conversion rates\n4. Refining the structure and organization\n5. Maintaining brand consistency while improving the overall tone\n\nProvide the improved content in HTML format inside a code-block with \"```\" backticks, preserving the original structure where appropriate. Also, include a brief summary of the changes made and their potential impact.",
"prompt_values": {}
},
"metadata": {
"position": {
"x": 488.37278423303917,
"y": 0
}
},
"input_links": [
{
"id": "adfa6113-77b3-4e32-b136-3e694b87553e",
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
"source_name": "content",
"sink_name": "prompt_values_#_CONTENT",
"is_static": false
},
{
"id": "6bcca45d-c9d5-439e-ac43-e4a1264d8f57",
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
"source_name": "response",
"sink_name": "prompt_values_#_ANALYSIS",
"is_static": false
}
],
"output_links": [
{
"id": "d4334477-3616-454f-a430-614ca27f5b36",
"source_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
"sink_id": "130ec496-f75d-4fe2-9cd6-8c00d08ea4a7",
"source_name": "response",
"sink_name": "value",
"is_static": false
}
],
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
"graph_version": 12,
"webhook_id": null,
"webhook": null
},
{
"id": "08612ce2-625b-4c17-accd-3acace7b6477",
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
"input_default": {
"model": "gpt-4o",
"prompt": "Analyze the following webpage content and provide a detailed report on its current state, including strengths and weaknesses in terms of clarity, SEO optimization, and potential for conversion:\n\n{{CONTENT}}\n\nInclude observations on:\n1. Overall readability and clarity\n2. Use of keywords and SEO-friendly language\n3. Effectiveness of calls-to-action\n4. Structure and organization of content\n5. Tone and brand consistency",
"prompt_values": {}
},
"metadata": {
"position": {
"x": -72.66206703605442,
"y": -0.58403945075381
}
},
"input_links": [
{
"id": "5d5656fd-4208-4296-bc70-e39cc31caada",
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
"sink_id": "08612ce2-625b-4c17-accd-3acace7b6477",
"source_name": "content",
"sink_name": "prompt_values_#_CONTENT",
"is_static": false
}
],
"output_links": [
{
"id": "f979ab78-0903-4f19-a7c2-a419d5d81aef",
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
"sink_id": "cefccd07-fe70-4feb-bf76-46b20aaa5d35",
"source_name": "response",
"sink_name": "value",
"is_static": false
},
{
"id": "6bcca45d-c9d5-439e-ac43-e4a1264d8f57",
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
"source_name": "response",
"sink_name": "prompt_values_#_ANALYSIS",
"is_static": false
}
],
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
"graph_version": 12,
"webhook_id": null,
"webhook": null
}
],
"links": [
{
"id": "adfa6113-77b3-4e32-b136-3e694b87553e",
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
"source_name": "content",
"sink_name": "prompt_values_#_CONTENT",
"is_static": false
},
{
"id": "d4334477-3616-454f-a430-614ca27f5b36",
"source_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
"sink_id": "130ec496-f75d-4fe2-9cd6-8c00d08ea4a7",
"source_name": "response",
"sink_name": "value",
"is_static": false
},
{
"id": "5d5656fd-4208-4296-bc70-e39cc31caada",
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
"sink_id": "08612ce2-625b-4c17-accd-3acace7b6477",
"source_name": "content",
"sink_name": "prompt_values_#_CONTENT",
"is_static": false
},
{
"id": "f979ab78-0903-4f19-a7c2-a419d5d81aef",
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
"sink_id": "cefccd07-fe70-4feb-bf76-46b20aaa5d35",
"source_name": "response",
"sink_name": "value",
"is_static": false
},
{
"id": "6bcca45d-c9d5-439e-ac43-e4a1264d8f57",
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
"source_name": "response",
"sink_name": "prompt_values_#_ANALYSIS",
"is_static": false
},
{
"id": "cbb12335-fefd-4560-9fff-98675130fbad",
"source_id": "375f8bc3-afd9-4025-ad8e-9aeb329af7ce",
"sink_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
"source_name": "result",
"sink_name": "url",
"is_static": true
}
],
"forked_from_id": null,
"forked_from_version": null,
"sub_graphs": [],
"user_id": "",
"created_at": "2024-12-20T19:47:22.036Z",
"input_schema": {
"type": "object",
"properties": {
"Homepage URL": {
"advanced": false,
"secret": false,
"title": "Homepage URL",
"description": "Enter the URL of the homepage you want to improve",
"default": "https://agpt.co"
}
},
"required": []
},
"output_schema": {
"type": "object",
"properties": {
"Improved Webpage Copy": {
"advanced": false,
"secret": false,
"title": "Improved Webpage Copy"
},
"Original Page Analysis": {
"advanced": false,
"secret": false,
"title": "Original Page Analysis",
"description": "Analysis of the webpage as it currently stands."
}
},
"required": [
"Improved Webpage Copy",
"Original Page Analysis"
]
},
"has_external_trigger": false,
"has_human_in_the_loop": false,
"trigger_setup_info": null,
"credentials_input_schema": {
"properties": {
"jina_api_key_credentials": {
"credentials_provider": [
"jina"
],
"credentials_types": [
"api_key"
],
"properties": {
"id": {
"title": "Id",
"type": "string"
},
"title": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"title": "Title"
},
"provider": {
"const": "jina",
"title": "Provider",
"type": "string"
},
"type": {
"const": "api_key",
"title": "Type",
"type": "string"
}
},
"required": [
"id",
"provider",
"type"
],
"title": "CredentialsMetaInput[Literal[<ProviderName.JINA: 'jina'>], Literal['api_key']]",
"type": "object",
"discriminator_values": []
},
"openai_api_key_credentials": {
"credentials_provider": [
"openai"
],
"credentials_types": [
"api_key"
],
"properties": {
"id": {
"title": "Id",
"type": "string"
},
"title": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"title": "Title"
},
"provider": {
"const": "openai",
"title": "Provider",
"type": "string"
},
"type": {
"const": "api_key",
"title": "Type",
"type": "string"
}
},
"required": [
"id",
"provider",
"type"
],
"title": "CredentialsMetaInput[Literal[<ProviderName.OPENAI: 'openai'>], Literal['api_key']]",
"type": "object",
"discriminator": "model",
"discriminator_mapping": {
"Llama-3.3-70B-Instruct": "llama_api",
"Llama-3.3-8B-Instruct": "llama_api",
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
"amazon/nova-lite-v1": "open_router",
"amazon/nova-micro-v1": "open_router",
"amazon/nova-pro-v1": "open_router",
"claude-3-7-sonnet-20250219": "anthropic",
"claude-3-haiku-20240307": "anthropic",
"claude-haiku-4-5-20251001": "anthropic",
"claude-opus-4-1-20250805": "anthropic",
"claude-opus-4-20250514": "anthropic",
"claude-opus-4-5-20251101": "anthropic",
"claude-sonnet-4-20250514": "anthropic",
"claude-sonnet-4-5-20250929": "anthropic",
"cohere/command-r-08-2024": "open_router",
"cohere/command-r-plus-08-2024": "open_router",
"deepseek/deepseek-chat": "open_router",
"deepseek/deepseek-r1-0528": "open_router",
"dolphin-mistral:latest": "ollama",
"google/gemini-2.0-flash-001": "open_router",
"google/gemini-2.0-flash-lite-001": "open_router",
"google/gemini-2.5-flash": "open_router",
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
"google/gemini-2.5-pro-preview-03-25": "open_router",
"google/gemini-3-pro-preview": "open_router",
"gpt-3.5-turbo": "openai",
"gpt-4-turbo": "openai",
"gpt-4.1-2025-04-14": "openai",
"gpt-4.1-mini-2025-04-14": "openai",
"gpt-4o": "openai",
"gpt-4o-mini": "openai",
"gpt-5-2025-08-07": "openai",
"gpt-5-chat-latest": "openai",
"gpt-5-mini-2025-08-07": "openai",
"gpt-5-nano-2025-08-07": "openai",
"gpt-5.1-2025-11-13": "openai",
"gryphe/mythomax-l2-13b": "open_router",
"llama-3.1-8b-instant": "groq",
"llama-3.3-70b-versatile": "groq",
"llama3": "ollama",
"llama3.1:405b": "ollama",
"llama3.2": "ollama",
"llama3.3": "ollama",
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
"meta-llama/llama-4-maverick": "open_router",
"meta-llama/llama-4-scout": "open_router",
"microsoft/wizardlm-2-8x22b": "open_router",
"mistralai/mistral-nemo": "open_router",
"moonshotai/kimi-k2": "open_router",
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
"o1": "openai",
"o1-mini": "openai",
"o3-2025-04-16": "openai",
"o3-mini": "openai",
"openai/gpt-oss-120b": "open_router",
"openai/gpt-oss-20b": "open_router",
"perplexity/sonar": "open_router",
"perplexity/sonar-deep-research": "open_router",
"perplexity/sonar-pro": "open_router",
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
"qwen/qwen3-coder": "open_router",
"v0-1.0-md": "v0",
"v0-1.5-lg": "v0",
"v0-1.5-md": "v0",
"x-ai/grok-4": "open_router",
"x-ai/grok-4-fast": "open_router",
"x-ai/grok-4.1-fast": "open_router",
"x-ai/grok-code-fast-1": "open_router"
},
"discriminator_values": [
"gpt-4o"
]
}
},
"required": [
"jina_api_key_credentials",
"openai_api_key_credentials"
],
"title": "AIWebpageCopyImproverCredentialsInputSchema",
"type": "object"
}
}

View File

@@ -1,615 +0,0 @@
{
"id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
"version": 29,
"is_active": true,
"name": "Email Address Finder",
"description": "Input information of a business and find their email address",
"instructions": null,
"recommended_schedule_cron": null,
"nodes": [
{
"id": "04cad535-9f1a-4876-8b07-af5897d8c282",
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
"input_default": {
"name": "Address",
"value": "USA"
},
"metadata": {
"position": {
"x": 1047.9357219838776,
"y": 1067.9123910370954
}
},
"input_links": [],
"output_links": [
{
"id": "aac29f7b-3cd1-4c91-9a2a-72a8301c0957",
"source_id": "04cad535-9f1a-4876-8b07-af5897d8c282",
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
"source_name": "result",
"sink_name": "values_#_ADDRESS",
"is_static": true
}
],
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
"block_id": "3146e4fe-2cdd-4f29-bd12-0c9d5bb4deb0",
"input_default": {
"group": 1,
"pattern": "<email>(.*?)<\\/email>"
},
"metadata": {
"position": {
"x": 3381.2821481740634,
"y": 246.091098184158
}
},
"input_links": [
{
"id": "9f8188ce-1f3d-46fb-acda-b2a57c0e5da6",
"source_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
"sink_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
"source_name": "response",
"sink_name": "text",
"is_static": false
}
],
"output_links": [
{
"id": "b15b5143-27b7-486e-a166-4095e72e5235",
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
"sink_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
"source_name": "negative",
"sink_name": "values_#_Result",
"is_static": false
},
{
"id": "23591872-3c6b-4562-87d3-5b6ade698e48",
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
"source_name": "positive",
"sink_name": "value",
"is_static": false
}
],
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
"input_default": {
"name": "Email"
},
"metadata": {
"position": {
"x": 4525.4246310882,
"y": 246.36913665010354
}
},
"input_links": [
{
"id": "d87b07ea-dcec-4d38-a644-2c1d741ea3cb",
"source_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
"source_name": "output",
"sink_name": "value",
"is_static": false
},
{
"id": "23591872-3c6b-4562-87d3-5b6ade698e48",
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
"source_name": "positive",
"sink_name": "value",
"is_static": false
}
],
"output_links": [],
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "4a41df99-ffe2-4c12-b528-632979c9c030",
"block_id": "87840993-2053-44b7-8da4-187ad4ee518c",
"input_default": {},
"metadata": {
"position": {
"x": 2182.7499999999995,
"y": 242.00001144409185
}
},
"input_links": [
{
"id": "2e411d3d-79ba-4958-9c1c-b76a45a2e649",
"source_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
"sink_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
"source_name": "output",
"sink_name": "query",
"is_static": false
}
],
"output_links": [
{
"id": "899cc7d8-a96b-4107-b3c6-4c78edcf0c6b",
"source_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
"source_name": "results",
"sink_name": "prompt_values_#_WEBSITE_CONTENT",
"is_static": false
}
],
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
"input_default": {
"name": "Business Name",
"value": "Tim Cook"
},
"metadata": {
"position": {
"x": 1049.9704155272595,
"y": 244.49931152418344
}
},
"input_links": [],
"output_links": [
{
"id": "946b522c-365f-4ee0-96f9-28863d9882ea",
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
"source_name": "result",
"sink_name": "values_#_NAME",
"is_static": true
},
{
"id": "43e920a7-0bb4-4fae-9a22-91df95c7342a",
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
"source_name": "result",
"sink_name": "prompt_values_#_BUSINESS_NAME",
"is_static": true
}
],
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
"block_id": "db7d8f02-2f44-4c55-ab7a-eae0941f0c30",
"input_default": {
"format": "Email Address of {{NAME}}, {{ADDRESS}}",
"values": {}
},
"metadata": {
"position": {
"x": 1625.25,
"y": 243.25001144409185
}
},
"input_links": [
{
"id": "946b522c-365f-4ee0-96f9-28863d9882ea",
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
"source_name": "result",
"sink_name": "values_#_NAME",
"is_static": true
},
{
"id": "aac29f7b-3cd1-4c91-9a2a-72a8301c0957",
"source_id": "04cad535-9f1a-4876-8b07-af5897d8c282",
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
"source_name": "result",
"sink_name": "values_#_ADDRESS",
"is_static": true
}
],
"output_links": [
{
"id": "2e411d3d-79ba-4958-9c1c-b76a45a2e649",
"source_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
"sink_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
"source_name": "output",
"sink_name": "query",
"is_static": false
}
],
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "266b7255-11c4-4b88-99e2-85db31a2e865",
"block_id": "db7d8f02-2f44-4c55-ab7a-eae0941f0c30",
"input_default": {
"format": "Failed to find email. \nResult:\n{{RESULT}}",
"values": {}
},
"metadata": {
"position": {
"x": 3949.7493830805934,
"y": 705.209819698647
}
},
"input_links": [
{
"id": "b15b5143-27b7-486e-a166-4095e72e5235",
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
"sink_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
"source_name": "negative",
"sink_name": "values_#_Result",
"is_static": false
}
],
"output_links": [
{
"id": "d87b07ea-dcec-4d38-a644-2c1d741ea3cb",
"source_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
"source_name": "output",
"sink_name": "value",
"is_static": false
}
],
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
"graph_version": 29,
"webhook_id": null,
"webhook": null
},
{
"id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
"input_default": {
"model": "claude-sonnet-4-5-20250929",
"prompt": "<business_website>\n{{WEBSITE_CONTENT}}\n</business_website>\n\nExtract the Contact Email of {{BUSINESS_NAME}}.\n\nIf no email that can be used to contact {{BUSINESS_NAME}} is present, output `N/A`.\nDo not share any emails other than the email for this specific entity.\n\nIf multiple present pick the likely best one.\n\nRespond with the email (or N/A) inside <email></email> tags.\n\nExample Response:\n\n<thoughts_or_comments>\nThere were many emails present, but luckily one was for {{BUSINESS_NAME}} which I have included below.\n</thoughts_or_comments>\n<email>\nexample@email.com\n</email>",
"prompt_values": {}
},
"metadata": {
"position": {
"x": 2774.879259081777,
"y": 243.3102035752969
}
},
"input_links": [
{
"id": "43e920a7-0bb4-4fae-9a22-91df95c7342a",
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
"source_name": "result",
"sink_name": "prompt_values_#_BUSINESS_NAME",
"is_static": true
},
{
"id": "899cc7d8-a96b-4107-b3c6-4c78edcf0c6b",
"source_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
"source_name": "results",
"sink_name": "prompt_values_#_WEBSITE_CONTENT",
"is_static": false
}
],
"output_links": [
{
"id": "9f8188ce-1f3d-46fb-acda-b2a57c0e5da6",
"source_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
"sink_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
"source_name": "response",
"sink_name": "text",
"is_static": false
}
],
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
"graph_version": 29,
"webhook_id": null,
"webhook": null
}
],
"links": [
{
"id": "9f8188ce-1f3d-46fb-acda-b2a57c0e5da6",
"source_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
"sink_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
"source_name": "response",
"sink_name": "text",
"is_static": false
},
{
"id": "b15b5143-27b7-486e-a166-4095e72e5235",
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
"sink_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
"source_name": "negative",
"sink_name": "values_#_Result",
"is_static": false
},
{
"id": "d87b07ea-dcec-4d38-a644-2c1d741ea3cb",
"source_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
"source_name": "output",
"sink_name": "value",
"is_static": false
},
{
"id": "946b522c-365f-4ee0-96f9-28863d9882ea",
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
"source_name": "result",
"sink_name": "values_#_NAME",
"is_static": true
},
{
"id": "23591872-3c6b-4562-87d3-5b6ade698e48",
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
"source_name": "positive",
"sink_name": "value",
"is_static": false
},
{
"id": "43e920a7-0bb4-4fae-9a22-91df95c7342a",
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
"source_name": "result",
"sink_name": "prompt_values_#_BUSINESS_NAME",
"is_static": true
},
{
"id": "2e411d3d-79ba-4958-9c1c-b76a45a2e649",
"source_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
"sink_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
"source_name": "output",
"sink_name": "query",
"is_static": false
},
{
"id": "aac29f7b-3cd1-4c91-9a2a-72a8301c0957",
"source_id": "04cad535-9f1a-4876-8b07-af5897d8c282",
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
"source_name": "result",
"sink_name": "values_#_ADDRESS",
"is_static": true
},
{
"id": "899cc7d8-a96b-4107-b3c6-4c78edcf0c6b",
"source_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
"source_name": "results",
"sink_name": "prompt_values_#_WEBSITE_CONTENT",
"is_static": false
}
],
"forked_from_id": null,
"forked_from_version": null,
"sub_graphs": [],
"user_id": "",
"created_at": "2025-01-03T00:46:30.244Z",
"input_schema": {
"type": "object",
"properties": {
"Address": {
"advanced": false,
"secret": false,
"title": "Address",
"default": "USA"
},
"Business Name": {
"advanced": false,
"secret": false,
"title": "Business Name",
"default": "Tim Cook"
}
},
"required": []
},
"output_schema": {
"type": "object",
"properties": {
"Email": {
"advanced": false,
"secret": false,
"title": "Email"
}
},
"required": [
"Email"
]
},
"has_external_trigger": false,
"has_human_in_the_loop": false,
"trigger_setup_info": null,
"credentials_input_schema": {
"properties": {
"jina_api_key_credentials": {
"credentials_provider": [
"jina"
],
"credentials_types": [
"api_key"
],
"properties": {
"id": {
"title": "Id",
"type": "string"
},
"title": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"title": "Title"
},
"provider": {
"const": "jina",
"title": "Provider",
"type": "string"
},
"type": {
"const": "api_key",
"title": "Type",
"type": "string"
}
},
"required": [
"id",
"provider",
"type"
],
"title": "CredentialsMetaInput[Literal[<ProviderName.JINA: 'jina'>], Literal['api_key']]",
"type": "object",
"discriminator_values": []
},
"anthropic_api_key_credentials": {
"credentials_provider": [
"anthropic"
],
"credentials_types": [
"api_key"
],
"properties": {
"id": {
"title": "Id",
"type": "string"
},
"title": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"title": "Title"
},
"provider": {
"const": "anthropic",
"title": "Provider",
"type": "string"
},
"type": {
"const": "api_key",
"title": "Type",
"type": "string"
}
},
"required": [
"id",
"provider",
"type"
],
"title": "CredentialsMetaInput[Literal[<ProviderName.ANTHROPIC: 'anthropic'>], Literal['api_key']]",
"type": "object",
"discriminator": "model",
"discriminator_mapping": {
"Llama-3.3-70B-Instruct": "llama_api",
"Llama-3.3-8B-Instruct": "llama_api",
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
"amazon/nova-lite-v1": "open_router",
"amazon/nova-micro-v1": "open_router",
"amazon/nova-pro-v1": "open_router",
"claude-3-7-sonnet-20250219": "anthropic",
"claude-3-haiku-20240307": "anthropic",
"claude-haiku-4-5-20251001": "anthropic",
"claude-opus-4-1-20250805": "anthropic",
"claude-opus-4-20250514": "anthropic",
"claude-opus-4-5-20251101": "anthropic",
"claude-sonnet-4-20250514": "anthropic",
"claude-sonnet-4-5-20250929": "anthropic",
"cohere/command-r-08-2024": "open_router",
"cohere/command-r-plus-08-2024": "open_router",
"deepseek/deepseek-chat": "open_router",
"deepseek/deepseek-r1-0528": "open_router",
"dolphin-mistral:latest": "ollama",
"google/gemini-2.0-flash-001": "open_router",
"google/gemini-2.0-flash-lite-001": "open_router",
"google/gemini-2.5-flash": "open_router",
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
"google/gemini-2.5-pro-preview-03-25": "open_router",
"google/gemini-3-pro-preview": "open_router",
"gpt-3.5-turbo": "openai",
"gpt-4-turbo": "openai",
"gpt-4.1-2025-04-14": "openai",
"gpt-4.1-mini-2025-04-14": "openai",
"gpt-4o": "openai",
"gpt-4o-mini": "openai",
"gpt-5-2025-08-07": "openai",
"gpt-5-chat-latest": "openai",
"gpt-5-mini-2025-08-07": "openai",
"gpt-5-nano-2025-08-07": "openai",
"gpt-5.1-2025-11-13": "openai",
"gryphe/mythomax-l2-13b": "open_router",
"llama-3.1-8b-instant": "groq",
"llama-3.3-70b-versatile": "groq",
"llama3": "ollama",
"llama3.1:405b": "ollama",
"llama3.2": "ollama",
"llama3.3": "ollama",
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
"meta-llama/llama-4-maverick": "open_router",
"meta-llama/llama-4-scout": "open_router",
"microsoft/wizardlm-2-8x22b": "open_router",
"mistralai/mistral-nemo": "open_router",
"moonshotai/kimi-k2": "open_router",
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
"o1": "openai",
"o1-mini": "openai",
"o3-2025-04-16": "openai",
"o3-mini": "openai",
"openai/gpt-oss-120b": "open_router",
"openai/gpt-oss-20b": "open_router",
"perplexity/sonar": "open_router",
"perplexity/sonar-deep-research": "open_router",
"perplexity/sonar-pro": "open_router",
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
"qwen/qwen3-coder": "open_router",
"v0-1.0-md": "v0",
"v0-1.5-lg": "v0",
"v0-1.5-md": "v0",
"x-ai/grok-4": "open_router",
"x-ai/grok-4-fast": "open_router",
"x-ai/grok-4.1-fast": "open_router",
"x-ai/grok-code-fast-1": "open_router"
},
"discriminator_values": [
"claude-sonnet-4-5-20250929"
]
}
},
"required": [
"jina_api_key_credentials",
"anthropic_api_key_credentials"
],
"title": "EmailAddressFinderCredentialsInputSchema",
"type": "object"
}
}

View File

@@ -11,7 +11,7 @@ from backend.data.block import (
BlockType,
get_block,
)
from backend.data.execution import ExecutionContext, ExecutionStatus, NodesInputMasks
from backend.data.execution import ExecutionStatus, NodesInputMasks
from backend.data.model import NodeExecutionStats, SchemaField
from backend.util.json import validate_with_jsonschema
from backend.util.retry import func_retry
@@ -72,9 +72,9 @@ class AgentExecutorBlock(Block):
input_data: Input,
*,
graph_exec_id: str,
execution_context: ExecutionContext,
**kwargs,
) -> BlockOutput:
from backend.executor import utils as execution_utils
graph_exec = await execution_utils.add_graph_execution(
@@ -83,9 +83,8 @@ class AgentExecutorBlock(Block):
user_id=input_data.user_id,
inputs=input_data.inputs,
nodes_input_masks=input_data.nodes_input_masks,
execution_context=execution_context.model_copy(
update={"parent_execution_id": graph_exec_id},
),
parent_graph_exec_id=graph_exec_id,
is_sub_graph=True, # AgentExecutorBlock executions are always sub-graphs
)
logger = execution_utils.LogMetadata(

View File

@@ -1,4 +1,3 @@
import asyncio
from enum import Enum
from typing import Literal
@@ -20,7 +19,7 @@ from backend.data.model import (
SchemaField,
)
from backend.integrations.providers import ProviderName
from backend.util.file import MediaFileType, store_media_file
from backend.util.file import MediaFileType
class GeminiImageModel(str, Enum):
@@ -28,20 +27,6 @@ class GeminiImageModel(str, Enum):
NANO_BANANA_PRO = "google/nano-banana-pro"
class AspectRatio(str, Enum):
MATCH_INPUT_IMAGE = "match_input_image"
ASPECT_1_1 = "1:1"
ASPECT_2_3 = "2:3"
ASPECT_3_2 = "3:2"
ASPECT_3_4 = "3:4"
ASPECT_4_3 = "4:3"
ASPECT_4_5 = "4:5"
ASPECT_5_4 = "5:4"
ASPECT_9_16 = "9:16"
ASPECT_16_9 = "16:9"
ASPECT_21_9 = "21:9"
class OutputFormat(str, Enum):
JPG = "jpg"
PNG = "png"
@@ -84,11 +69,6 @@ class AIImageCustomizerBlock(Block):
default=[],
title="Input Images",
)
aspect_ratio: AspectRatio = SchemaField(
description="Aspect ratio of the generated image",
default=AspectRatio.MATCH_INPUT_IMAGE,
title="Aspect Ratio",
)
output_format: OutputFormat = SchemaField(
description="Format of the output image",
default=OutputFormat.PNG,
@@ -112,7 +92,6 @@ class AIImageCustomizerBlock(Block):
"prompt": "Make the scene more vibrant and colorful",
"model": GeminiImageModel.NANO_BANANA,
"images": [],
"aspect_ratio": AspectRatio.MATCH_INPUT_IMAGE,
"output_format": OutputFormat.JPG,
"credentials": TEST_CREDENTIALS_INPUT,
},
@@ -137,25 +116,11 @@ class AIImageCustomizerBlock(Block):
**kwargs,
) -> BlockOutput:
try:
# Convert local file paths to Data URIs (base64) so Replicate can access them
processed_images = await asyncio.gather(
*(
store_media_file(
graph_exec_id=graph_exec_id,
file=img,
user_id=user_id,
return_content=True,
)
for img in input_data.images
)
)
result = await self.run_model(
api_key=credentials.api_key,
model_name=input_data.model.value,
prompt=input_data.prompt,
images=processed_images,
aspect_ratio=input_data.aspect_ratio.value,
images=input_data.images,
output_format=input_data.output_format.value,
)
yield "image_url", result
@@ -168,14 +133,12 @@ class AIImageCustomizerBlock(Block):
model_name: str,
prompt: str,
images: list[MediaFileType],
aspect_ratio: str,
output_format: str,
) -> MediaFileType:
client = ReplicateClient(api_token=api_key.get_secret_value())
input_params: dict = {
"prompt": prompt,
"aspect_ratio": aspect_ratio,
"output_format": output_format,
}

View File

@@ -1,9 +1,8 @@
import base64
import io
import mimetypes
from enum import Enum
from pathlib import Path
from typing import Any, Literal, cast
from typing import Any
import discord
from pydantic import SecretStr
@@ -34,19 +33,6 @@ TEST_CREDENTIALS = TEST_BOT_CREDENTIALS
TEST_CREDENTIALS_INPUT = TEST_BOT_CREDENTIALS_INPUT
class ThreadArchiveDuration(str, Enum):
"""Discord thread auto-archive duration options"""
ONE_HOUR = "60"
ONE_DAY = "1440"
THREE_DAYS = "4320"
ONE_WEEK = "10080"
def to_minutes(self) -> int:
"""Convert the duration string to minutes for Discord API"""
return int(self.value)
class ReadDiscordMessagesBlock(Block):
class Input(BlockSchemaInput):
credentials: DiscordCredentials = DiscordCredentialsField()
@@ -1180,211 +1166,3 @@ class DiscordChannelInfoBlock(Block):
raise ValueError(f"Login error occurred: {login_err}")
except Exception as e:
raise ValueError(f"An error occurred: {e}")
class CreateDiscordThreadBlock(Block):
class Input(BlockSchemaInput):
credentials: DiscordCredentials = DiscordCredentialsField()
channel_name: str = SchemaField(
description="Channel ID or channel name to create the thread in"
)
server_name: str = SchemaField(
description="Server name (only needed if using channel name)",
advanced=True,
default="",
)
thread_name: str = SchemaField(description="The name of the thread to create")
is_private: bool = SchemaField(
description="Whether to create a private thread (requires Boost Level 2+) or public thread",
default=False,
)
auto_archive_duration: ThreadArchiveDuration = SchemaField(
description="Duration before the thread is automatically archived",
advanced=True,
default=ThreadArchiveDuration.ONE_WEEK,
)
message_content: str = SchemaField(
description="Optional initial message to send in the thread",
advanced=True,
default="",
)
class Output(BlockSchemaOutput):
status: str = SchemaField(description="Operation status")
thread_id: str = SchemaField(description="ID of the created thread")
thread_name: str = SchemaField(description="Name of the created thread")
def __init__(self):
super().__init__(
id="e8f3c9a2-7b5d-4f1e-9c6a-3d8e2b4f7a1c",
input_schema=CreateDiscordThreadBlock.Input,
output_schema=CreateDiscordThreadBlock.Output,
description="Creates a new thread in a Discord channel.",
categories={BlockCategory.SOCIAL},
test_input={
"channel_name": "general",
"thread_name": "Test Thread",
"is_private": False,
"auto_archive_duration": ThreadArchiveDuration.ONE_HOUR,
"credentials": TEST_CREDENTIALS_INPUT,
},
test_output=[
("status", "Thread created successfully"),
("thread_id", "123456789012345678"),
("thread_name", "Test Thread"),
],
test_mock={
"create_thread": lambda *args, **kwargs: {
"status": "Thread created successfully",
"thread_id": "123456789012345678",
"thread_name": "Test Thread",
}
},
test_credentials=TEST_CREDENTIALS,
)
async def create_thread(
self,
token: str,
channel_name: str,
server_name: str | None,
thread_name: str,
is_private: bool,
auto_archive_duration: ThreadArchiveDuration,
message_content: str,
) -> dict:
intents = discord.Intents.default()
intents.guilds = True
intents.message_content = True # Required for sending messages in threads
client = discord.Client(intents=intents)
result = {}
@client.event
async def on_ready():
channel = None
# Try to parse as channel ID first
try:
channel_id = int(channel_name)
try:
channel = await client.fetch_channel(channel_id)
except discord.errors.NotFound:
result["status"] = f"Channel with ID {channel_id} not found"
await client.close()
return
except discord.errors.Forbidden:
result["status"] = (
f"Bot does not have permission to view channel {channel_id}"
)
await client.close()
return
except ValueError:
# Not an ID, treat as channel name
# Collect all matching channels to detect duplicates
matching_channels = []
for guild in client.guilds:
# Skip guilds if server_name is provided and doesn't match
if (
server_name
and server_name.strip()
and guild.name != server_name
):
continue
for ch in guild.text_channels:
if ch.name == channel_name:
matching_channels.append(ch)
if not matching_channels:
result["status"] = f"Channel not found: {channel_name}"
await client.close()
return
elif len(matching_channels) > 1:
result["status"] = (
f"Multiple channels named '{channel_name}' found. "
"Please specify server_name to disambiguate."
)
await client.close()
return
else:
channel = matching_channels[0]
if not channel:
result["status"] = "Failed to resolve channel"
await client.close()
return
# Type check - ensure it's a text channel that can create threads
if not hasattr(channel, "create_thread"):
result["status"] = (
f"Channel {channel_name} cannot create threads (not a text channel)"
)
await client.close()
return
# After the hasattr check, we know channel is a TextChannel
channel = cast(discord.TextChannel, channel)
try:
# Create the thread using discord.py 2.0+ API
thread_type = (
discord.ChannelType.private_thread
if is_private
else discord.ChannelType.public_thread
)
# Cast to the specific Literal type that discord.py expects
duration_minutes = cast(
Literal[60, 1440, 4320, 10080], auto_archive_duration.to_minutes()
)
# The 'type' parameter exists in discord.py 2.0+ but isn't in type stubs yet
# pyright: ignore[reportCallIssue]
thread = await channel.create_thread(
name=thread_name,
type=thread_type,
auto_archive_duration=duration_minutes,
)
# Send initial message if provided
if message_content:
await thread.send(message_content)
result["status"] = "Thread created successfully"
result["thread_id"] = str(thread.id)
result["thread_name"] = thread.name
except discord.errors.Forbidden as e:
result["status"] = (
f"Bot does not have permission to create threads in this channel. {str(e)}"
)
except Exception as e:
result["status"] = f"Error creating thread: {str(e)}"
finally:
await client.close()
await client.start(token)
return result
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
result = await self.create_thread(
token=credentials.api_key.get_secret_value(),
channel_name=input_data.channel_name,
server_name=input_data.server_name or None,
thread_name=input_data.thread_name,
is_private=input_data.is_private,
auto_archive_duration=input_data.auto_archive_duration,
message_content=input_data.message_content,
)
yield "status", result.get("status", "Unknown error")
if "thread_id" in result:
yield "thread_id", result["thread_id"]
if "thread_name" in result:
yield "thread_name", result["thread_name"]
except discord.errors.LoginFailure as login_err:
raise ValueError(f"Login error occurred: {login_err}")

View File

@@ -1,155 +0,0 @@
from typing import Any, Literal, Optional
from pydantic import BaseModel, ConfigDict, Field
from backend.data.model import SchemaField
AttachmentView = Literal[
"DOCS",
"DOCUMENTS",
"SPREADSHEETS",
"PRESENTATIONS",
"DOCS_IMAGES",
"FOLDERS",
]
ATTACHMENT_VIEWS: tuple[AttachmentView, ...] = (
"DOCS",
"DOCUMENTS",
"SPREADSHEETS",
"PRESENTATIONS",
"DOCS_IMAGES",
"FOLDERS",
)
class _GoogleDriveFileBase(BaseModel):
"""Internal base class for Google Drive file representation."""
model_config = ConfigDict(populate_by_name=True)
id: str = Field(description="Google Drive file/folder ID")
name: Optional[str] = Field(None, description="File/folder name")
mime_type: Optional[str] = Field(
None,
alias="mimeType",
description="MIME type (e.g., application/vnd.google-apps.document)",
)
url: Optional[str] = Field(None, description="URL to open the file")
icon_url: Optional[str] = Field(None, alias="iconUrl", description="Icon URL")
is_folder: Optional[bool] = Field(
None, alias="isFolder", description="Whether this is a folder"
)
class GoogleDriveFile(_GoogleDriveFileBase):
"""
Represents a Google Drive file/folder with optional credentials for chaining.
Used for both inputs and outputs in Google Drive blocks. The `_credentials_id`
field enables chaining between blocks - when one block outputs a file, the
next block can use the same credentials to access it.
When used with GoogleDriveFileField(), the frontend renders a combined
auth + file picker UI that automatically populates `_credentials_id`.
"""
# Hidden field for credential ID - populated by frontend, preserved in outputs
credentials_id: Optional[str] = Field(
None,
alias="_credentials_id",
description="Internal: credential ID for authentication",
)
def GoogleDriveFileField(
*,
title: str,
description: str | None = None,
credentials_kwarg: str = "credentials",
credentials_scopes: list[str] | None = None,
allowed_views: list[AttachmentView] | None = None,
allowed_mime_types: list[str] | None = None,
placeholder: str | None = None,
**kwargs: Any,
) -> Any:
"""
Creates a Google Drive file input field with auto-generated credentials.
This field type produces a single UI element that handles both:
1. Google OAuth authentication
2. File selection via Google Drive Picker
The system automatically generates a credentials field, and the credentials
are passed to the run() method using the specified kwarg name.
Args:
title: Field title shown in UI
description: Field description/help text
credentials_kwarg: Name of the kwarg that will receive GoogleCredentials
in the run() method (default: "credentials")
credentials_scopes: OAuth scopes required (default: drive.file)
allowed_views: List of view types to show in picker (default: ["DOCS"])
allowed_mime_types: Filter by MIME types
placeholder: Placeholder text for the button
**kwargs: Additional SchemaField arguments
Returns:
Field definition that produces GoogleDriveFile
Example:
>>> class MyBlock(Block):
... class Input(BlockSchemaInput):
... spreadsheet: GoogleDriveFile = GoogleDriveFileField(
... title="Select Spreadsheet",
... credentials_kwarg="creds",
... allowed_views=["SPREADSHEETS"],
... )
...
... async def run(
... self, input_data: Input, *, creds: GoogleCredentials, **kwargs
... ):
... # creds is automatically populated
... file = input_data.spreadsheet
"""
# Determine scopes - drive.file is sufficient for picker-selected files
scopes = credentials_scopes or ["https://www.googleapis.com/auth/drive.file"]
# Build picker configuration with auto_credentials embedded
picker_config = {
"multiselect": False,
"allow_folder_selection": False,
"allowed_views": list(allowed_views) if allowed_views else ["DOCS"],
"scopes": scopes,
# Auto-credentials config tells frontend to include _credentials_id in output
"auto_credentials": {
"provider": "google",
"type": "oauth2",
"scopes": scopes,
"kwarg_name": credentials_kwarg,
},
}
if allowed_mime_types:
picker_config["allowed_mime_types"] = list(allowed_mime_types)
return SchemaField(
default=None,
title=title,
description=description,
placeholder=placeholder or "Select from Google Drive",
# Use google-drive-picker format so frontend renders existing component
format="google-drive-picker",
advanced=False,
json_schema_extra={
"google_drive_picker_config": picker_config,
# Also keep auto_credentials at top level for backend detection
"auto_credentials": {
"provider": "google",
"type": "oauth2",
"scopes": scopes,
"kwarg_name": credentials_kwarg,
},
**kwargs,
},
)

File diff suppressed because it is too large Load Diff

View File

@@ -184,13 +184,7 @@ class SendWebRequestBlock(Block):
)
# ─── Execute request ─────────────────────────────────────────
# Use raise_for_status=False so HTTP errors (4xx, 5xx) are returned
# as response objects instead of raising exceptions, allowing proper
# handling via client_error and server_error outputs
response = await Requests(
raise_for_status=False,
retry_max_attempts=1, # allow callers to handle HTTP errors immediately
).request(
response = await Requests().request(
input_data.method.value,
input_data.url,
headers=input_data.headers,

View File

@@ -1,169 +0,0 @@
import logging
from typing import Any, Literal
from prisma.enums import ReviewStatus
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
BlockType,
)
from backend.data.execution import ExecutionContext, ExecutionStatus
from backend.data.human_review import ReviewResult
from backend.data.model import SchemaField
from backend.executor.manager import async_update_node_execution_status
from backend.util.clients import get_database_manager_async_client
logger = logging.getLogger(__name__)
class HumanInTheLoopBlock(Block):
"""
This block pauses execution and waits for human approval or modification of the data.
When executed, it creates a pending review entry and sets the node execution status
to REVIEW. The execution will remain paused until a human user either:
- Approves the data (with or without modifications)
- Rejects the data
This is useful for workflows that require human validation or intervention before
proceeding to the next steps.
"""
class Input(BlockSchemaInput):
data: Any = SchemaField(description="The data to be reviewed by a human user")
name: str = SchemaField(
description="A descriptive name for what this data represents",
)
editable: bool = SchemaField(
description="Whether the human reviewer can edit the data",
default=True,
advanced=True,
)
class Output(BlockSchemaOutput):
reviewed_data: Any = SchemaField(
description="The data after human review (may be modified)"
)
status: Literal["approved", "rejected"] = SchemaField(
description="Status of the review: 'approved' or 'rejected'"
)
review_message: str = SchemaField(
description="Any message provided by the reviewer", default=""
)
def __init__(self):
super().__init__(
id="8b2a7b3c-6e9d-4a5f-8c1b-2e3f4a5b6c7d",
description="Pause execution and wait for human approval or modification of data",
categories={BlockCategory.BASIC},
input_schema=HumanInTheLoopBlock.Input,
output_schema=HumanInTheLoopBlock.Output,
block_type=BlockType.HUMAN_IN_THE_LOOP,
test_input={
"data": {"name": "John Doe", "age": 30},
"name": "User profile data",
"editable": True,
},
test_output=[
("status", "approved"),
("reviewed_data", {"name": "John Doe", "age": 30}),
],
test_mock={
"get_or_create_human_review": lambda *_args, **_kwargs: ReviewResult(
data={"name": "John Doe", "age": 30},
status=ReviewStatus.APPROVED,
message="",
processed=False,
node_exec_id="test-node-exec-id",
),
"update_node_execution_status": lambda *_args, **_kwargs: None,
"update_review_processed_status": lambda *_args, **_kwargs: None,
},
)
async def get_or_create_human_review(self, **kwargs):
return await get_database_manager_async_client().get_or_create_human_review(
**kwargs
)
async def update_node_execution_status(self, **kwargs):
return await async_update_node_execution_status(
db_client=get_database_manager_async_client(), **kwargs
)
async def update_review_processed_status(self, node_exec_id: str, processed: bool):
return await get_database_manager_async_client().update_review_processed_status(
node_exec_id, processed
)
async def run(
self,
input_data: Input,
*,
user_id: str,
node_exec_id: str,
graph_exec_id: str,
graph_id: str,
graph_version: int,
execution_context: ExecutionContext,
**kwargs,
) -> BlockOutput:
if not execution_context.safe_mode:
logger.info(
f"HITL block skipping review for node {node_exec_id} - safe mode disabled"
)
yield "status", "approved"
yield "reviewed_data", input_data.data
yield "review_message", "Auto-approved (safe mode disabled)"
return
try:
result = await self.get_or_create_human_review(
user_id=user_id,
node_exec_id=node_exec_id,
graph_exec_id=graph_exec_id,
graph_id=graph_id,
graph_version=graph_version,
input_data=input_data.data,
message=input_data.name,
editable=input_data.editable,
)
except Exception as e:
logger.error(f"Error in HITL block for node {node_exec_id}: {str(e)}")
raise
if result is None:
logger.info(
f"HITL block pausing execution for node {node_exec_id} - awaiting human review"
)
try:
await self.update_node_execution_status(
exec_id=node_exec_id,
status=ExecutionStatus.REVIEW,
)
return
except Exception as e:
logger.error(
f"Failed to update node status for HITL block {node_exec_id}: {str(e)}"
)
raise
if not result.processed:
await self.update_review_processed_status(
node_exec_id=node_exec_id, processed=True
)
if result.status == ReviewStatus.APPROVED:
yield "status", "approved"
yield "reviewed_data", result.data
if result.message:
yield "review_message", result.message
elif result.status == ReviewStatus.REJECTED:
yield "status", "rejected"
if result.message:
yield "review_message", result.message

View File

@@ -2,8 +2,6 @@ import copy
from datetime import date, time
from typing import Any, Optional
# Import for Google Drive file input block
from backend.blocks.google._drive import AttachmentView, GoogleDriveFile
from backend.data.block import (
Block,
BlockCategory,
@@ -648,119 +646,6 @@ class AgentTableInputBlock(AgentInputBlock):
yield "result", input_data.value if input_data.value is not None else []
class AgentGoogleDriveFileInputBlock(AgentInputBlock):
"""
This block allows users to select a file from Google Drive.
It provides a Google Drive file picker UI that handles both authentication
and file selection. The selected file information (ID, name, URL, etc.)
is output for use by other blocks like Google Sheets Read.
"""
class Input(AgentInputBlock.Input):
value: Optional[GoogleDriveFile] = SchemaField(
description="The selected Google Drive file.",
default=None,
advanced=False,
title="Selected File",
)
allowed_views: list[AttachmentView] = SchemaField(
description="Which views to show in the file picker (DOCS, SPREADSHEETS, PRESENTATIONS, etc.).",
default_factory=lambda: ["DOCS", "SPREADSHEETS", "PRESENTATIONS"],
advanced=False,
title="Allowed Views",
)
allow_folder_selection: bool = SchemaField(
description="Whether to allow selecting folders.",
default=False,
advanced=True,
title="Allow Folder Selection",
)
def generate_schema(self):
"""Generate schema for the value field with Google Drive picker format."""
schema = super().generate_schema()
# Default scopes for drive.file access
scopes = ["https://www.googleapis.com/auth/drive.file"]
# Build picker configuration
picker_config = {
"multiselect": False, # Single file selection only for now
"allow_folder_selection": self.allow_folder_selection,
"allowed_views": (
list(self.allowed_views) if self.allowed_views else ["DOCS"]
),
"scopes": scopes,
# Auto-credentials config tells frontend to include _credentials_id in output
"auto_credentials": {
"provider": "google",
"type": "oauth2",
"scopes": scopes,
"kwarg_name": "credentials",
},
}
# Set format and config for frontend to render Google Drive picker
schema["format"] = "google-drive-picker"
schema["google_drive_picker_config"] = picker_config
# Also keep auto_credentials at top level for backend detection
schema["auto_credentials"] = {
"provider": "google",
"type": "oauth2",
"scopes": scopes,
"kwarg_name": "credentials",
}
if self.value is not None:
schema["default"] = self.value.model_dump()
return schema
class Output(AgentInputBlock.Output):
result: GoogleDriveFile = SchemaField(
description="The selected Google Drive file with ID, name, URL, and other metadata."
)
def __init__(self):
test_file = GoogleDriveFile.model_validate(
{
"id": "test-file-id",
"name": "Test Spreadsheet",
"mimeType": "application/vnd.google-apps.spreadsheet",
"url": "https://docs.google.com/spreadsheets/d/test-file-id",
}
)
super().__init__(
id="d3b32f15-6fd7-40e3-be52-e083f51b19a2",
description="Block for selecting a file from Google Drive.",
disabled=not config.enable_agent_input_subtype_blocks,
input_schema=AgentGoogleDriveFileInputBlock.Input,
output_schema=AgentGoogleDriveFileInputBlock.Output,
test_input=[
{
"name": "spreadsheet_input",
"description": "Select a spreadsheet from Google Drive",
"allowed_views": ["SPREADSHEETS"],
"value": {
"id": "test-file-id",
"name": "Test Spreadsheet",
"mimeType": "application/vnd.google-apps.spreadsheet",
"url": "https://docs.google.com/spreadsheets/d/test-file-id",
},
}
],
test_output=[("result", test_file)],
)
async def run(self, input_data: Input, *args, **kwargs) -> BlockOutput:
"""
Yields the selected Google Drive file.
"""
if input_data.value is not None:
yield "result", input_data.value
IO_BLOCK_IDs = [
AgentInputBlock().id,
AgentOutputBlock().id,
@@ -773,5 +658,4 @@ IO_BLOCK_IDs = [
AgentDropdownInputBlock().id,
AgentToggleInputBlock().id,
AgentTableInputBlock().id,
AgentGoogleDriveFileInputBlock().id,
]

View File

@@ -1,24 +1,14 @@
from typing import Any, Type
from typing import Type
import pytest
from backend.data.block import Block, BlockSchemaInput, get_blocks
from backend.data.model import SchemaField
from backend.data.block import Block, get_blocks
from backend.util.test import execute_block_test
SKIP_BLOCK_TESTS = {
"HumanInTheLoopBlock",
}
@pytest.mark.parametrize("block", get_blocks().values(), ids=lambda b: b().name)
async def test_available_blocks(block: Type[Block]):
block_instance = block()
if block_instance.__class__.__name__ in SKIP_BLOCK_TESTS:
pytest.skip(
f"Skipping {block_instance.__class__.__name__} - requires external service"
)
await execute_block_test(block_instance)
await execute_block_test(block())
@pytest.mark.parametrize("block", get_blocks().values(), ids=lambda b: b().name)
@@ -133,148 +123,3 @@ async def test_block_ids_valid(block: Type[Block]):
), f"Block {block.name} ID is UUID version {parsed_uuid.version}, expected version 4"
except ValueError:
pytest.fail(f"Block {block.name} has invalid UUID format: {block_instance.id}")
class TestAutoCredentialsFieldsValidation:
"""Tests for auto_credentials field validation in BlockSchema."""
def test_duplicate_auto_credentials_kwarg_name_raises_error(self):
"""Test that duplicate kwarg_name in auto_credentials raises ValueError."""
class DuplicateKwargSchema(BlockSchemaInput):
"""Schema with duplicate auto_credentials kwarg_name."""
# Both fields explicitly use the same kwarg_name "credentials"
file1: dict[str, Any] | None = SchemaField(
description="First file input",
default=None,
json_schema_extra={
"auto_credentials": {
"provider": "google",
"type": "oauth2",
"scopes": ["https://www.googleapis.com/auth/drive.file"],
"kwarg_name": "credentials",
}
},
)
file2: dict[str, Any] | None = SchemaField(
description="Second file input",
default=None,
json_schema_extra={
"auto_credentials": {
"provider": "google",
"type": "oauth2",
"scopes": ["https://www.googleapis.com/auth/drive.file"],
"kwarg_name": "credentials", # Duplicate kwarg_name!
}
},
)
with pytest.raises(ValueError) as exc_info:
DuplicateKwargSchema.get_auto_credentials_fields()
error_message = str(exc_info.value)
assert "Duplicate auto_credentials kwarg_name 'credentials'" in error_message
assert "file1" in error_message
assert "file2" in error_message
def test_unique_auto_credentials_kwarg_names_succeed(self):
"""Test that unique kwarg_name values work correctly."""
class UniqueKwargSchema(BlockSchemaInput):
"""Schema with unique auto_credentials kwarg_name values."""
file1: dict[str, Any] | None = SchemaField(
description="First file input",
default=None,
json_schema_extra={
"auto_credentials": {
"provider": "google",
"type": "oauth2",
"scopes": ["https://www.googleapis.com/auth/drive.file"],
"kwarg_name": "file1_credentials",
}
},
)
file2: dict[str, Any] | None = SchemaField(
description="Second file input",
default=None,
json_schema_extra={
"auto_credentials": {
"provider": "google",
"type": "oauth2",
"scopes": ["https://www.googleapis.com/auth/drive.file"],
"kwarg_name": "file2_credentials", # Different kwarg_name
}
},
)
# Should not raise
result = UniqueKwargSchema.get_auto_credentials_fields()
assert "file1_credentials" in result
assert "file2_credentials" in result
assert result["file1_credentials"]["field_name"] == "file1"
assert result["file2_credentials"]["field_name"] == "file2"
def test_default_kwarg_name_is_credentials(self):
"""Test that missing kwarg_name defaults to 'credentials'."""
class DefaultKwargSchema(BlockSchemaInput):
"""Schema with auto_credentials missing kwarg_name."""
file: dict[str, Any] | None = SchemaField(
description="File input",
default=None,
json_schema_extra={
"auto_credentials": {
"provider": "google",
"type": "oauth2",
"scopes": ["https://www.googleapis.com/auth/drive.file"],
# No kwarg_name specified - should default to "credentials"
}
},
)
result = DefaultKwargSchema.get_auto_credentials_fields()
assert "credentials" in result
assert result["credentials"]["field_name"] == "file"
def test_duplicate_default_kwarg_name_raises_error(self):
"""Test that two fields with default kwarg_name raises ValueError."""
class DefaultDuplicateSchema(BlockSchemaInput):
"""Schema where both fields omit kwarg_name, defaulting to 'credentials'."""
file1: dict[str, Any] | None = SchemaField(
description="First file input",
default=None,
json_schema_extra={
"auto_credentials": {
"provider": "google",
"type": "oauth2",
"scopes": ["https://www.googleapis.com/auth/drive.file"],
# No kwarg_name - defaults to "credentials"
}
},
)
file2: dict[str, Any] | None = SchemaField(
description="Second file input",
default=None,
json_schema_extra={
"auto_credentials": {
"provider": "google",
"type": "oauth2",
"scopes": ["https://www.googleapis.com/auth/drive.file"],
# No kwarg_name - also defaults to "credentials"
}
},
)
with pytest.raises(ValueError) as exc_info:
DefaultDuplicateSchema.get_auto_credentials_fields()
assert "Duplicate auto_credentials kwarg_name 'credentials'" in str(
exc_info.value
)

View File

@@ -14,7 +14,7 @@ from backend.data.block import (
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.data.execution import ExecutionContext
from backend.data.execution import UserContext
from backend.data.model import SchemaField
# Shared timezone literal type for all time/date blocks
@@ -188,9 +188,10 @@ class GetCurrentTimeBlock(Block):
)
async def run(
self, input_data: Input, *, execution_context: ExecutionContext, **kwargs
self, input_data: Input, *, user_context: UserContext, **kwargs
) -> BlockOutput:
effective_timezone = execution_context.user_timezone
# Extract timezone from user_context (always present)
effective_timezone = user_context.timezone
# Get the appropriate timezone
tz = _get_timezone(input_data.format_type, effective_timezone)
@@ -297,10 +298,10 @@ class GetCurrentDateBlock(Block):
],
)
async def run(
self, input_data: Input, *, execution_context: ExecutionContext, **kwargs
) -> BlockOutput:
effective_timezone = execution_context.user_timezone
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
# Extract timezone from user_context (required keyword argument)
user_context: UserContext = kwargs["user_context"]
effective_timezone = user_context.timezone
try:
offset = int(input_data.offset)
@@ -403,10 +404,10 @@ class GetCurrentDateAndTimeBlock(Block):
],
)
async def run(
self, input_data: Input, *, execution_context: ExecutionContext, **kwargs
) -> BlockOutput:
effective_timezone = execution_context.user_timezone
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
# Extract timezone from user_context (required keyword argument)
user_context: UserContext = kwargs["user_context"]
effective_timezone = user_context.timezone
# Get the appropriate timezone
tz = _get_timezone(input_data.format_type, effective_timezone)

View File

@@ -1,4 +1,4 @@
from datetime import datetime, timedelta, timezone
from datetime import datetime
from typing import Any, Dict
from backend.blocks.twitter._mappers import (
@@ -237,12 +237,6 @@ class TweetDurationBuilder:
def add_start_time(self, start_time: datetime | None):
if start_time:
# Twitter API requires start_time to be at least 10 seconds before now
max_start_time = datetime.now(timezone.utc) - timedelta(seconds=10)
if start_time.tzinfo is None:
start_time = start_time.replace(tzinfo=timezone.utc)
if start_time > max_start_time:
start_time = max_start_time
self.params["start_time"] = start_time
return self

View File

@@ -51,10 +51,8 @@ class ResponseDataSerializer(BaseSerializer):
return serialized_item
@classmethod
def serialize_list(cls, data: List[Dict[str, Any]] | None) -> List[Dict[str, Any]]:
def serialize_list(cls, data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Serializes a list of dictionary items"""
if not data:
return []
return [cls.serialize_dict(item) for item in data]

View File

@@ -408,7 +408,7 @@ class ListExpansionInputs(BlockSchemaInput):
class TweetTimeWindowInputs(BlockSchemaInput):
start_time: datetime | None = SchemaField(
description="Start time in YYYY-MM-DDTHH:mm:ssZ format. If set to a time less than 10 seconds ago, it will be automatically adjusted to 10 seconds ago (Twitter API requirement).",
description="Start time in YYYY-MM-DDTHH:mm:ssZ format",
placeholder="Enter start time",
default=None,
advanced=False,

View File

@@ -1,13 +1,9 @@
import logging
from typing import Literal
from urllib.parse import parse_qs, urlparse
from pydantic import SecretStr
from youtube_transcript_api._api import YouTubeTranscriptApi
from youtube_transcript_api._errors import NoTranscriptFound
from youtube_transcript_api._transcripts import FetchedTranscript
from youtube_transcript_api.formatters import TextFormatter
from youtube_transcript_api.proxies import WebshareProxyConfig
from backend.data.block import (
Block,
@@ -16,42 +12,7 @@ from backend.data.block import (
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.data.model import (
CredentialsField,
CredentialsMetaInput,
SchemaField,
UserPasswordCredentials,
)
from backend.integrations.providers import ProviderName
logger = logging.getLogger(__name__)
TEST_CREDENTIALS = UserPasswordCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="webshare_proxy",
username=SecretStr("mock-webshare-username"),
password=SecretStr("mock-webshare-password"),
title="Mock Webshare Proxy credentials",
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.title,
}
WebshareProxyCredentials = UserPasswordCredentials
WebshareProxyCredentialsInput = CredentialsMetaInput[
Literal[ProviderName.WEBSHARE_PROXY],
Literal["user_password"],
]
def WebshareProxyCredentialsField() -> WebshareProxyCredentialsInput:
return CredentialsField(
description="Webshare proxy credentials for fetching YouTube transcripts",
)
from backend.data.model import SchemaField
class TranscribeYoutubeVideoBlock(Block):
@@ -61,7 +22,6 @@ class TranscribeYoutubeVideoBlock(Block):
description="The URL of the YouTube video to transcribe",
placeholder="https://www.youtube.com/watch?v=dQw4w9WgXcQ",
)
credentials: WebshareProxyCredentialsInput = WebshareProxyCredentialsField()
class Output(BlockSchemaOutput):
video_id: str = SchemaField(description="The extracted YouTube video ID")
@@ -75,12 +35,9 @@ class TranscribeYoutubeVideoBlock(Block):
id="f3a8f7e1-4b1d-4e5f-9f2a-7c3d5a2e6b4c",
input_schema=TranscribeYoutubeVideoBlock.Input,
output_schema=TranscribeYoutubeVideoBlock.Output,
description="Transcribes a YouTube video using a proxy.",
description="Transcribes a YouTube video.",
categories={BlockCategory.SOCIAL},
test_input={
"youtube_url": "https://www.youtube.com/watch?v=dQw4w9WgXcQ",
"credentials": TEST_CREDENTIALS_INPUT,
},
test_input={"youtube_url": "https://www.youtube.com/watch?v=dQw4w9WgXcQ"},
test_output=[
("video_id", "dQw4w9WgXcQ"),
(
@@ -88,9 +45,8 @@ class TranscribeYoutubeVideoBlock(Block):
"Never gonna give you up\nNever gonna let you down",
),
],
test_credentials=TEST_CREDENTIALS,
test_mock={
"get_transcript": lambda video_id, credentials: [
"get_transcript": lambda video_id: [
{"text": "Never gonna give you up"},
{"text": "Never gonna let you down"},
],
@@ -113,27 +69,16 @@ class TranscribeYoutubeVideoBlock(Block):
return parsed_url.path.split("/")[2]
raise ValueError(f"Invalid YouTube URL: {url}")
def get_transcript(
self, video_id: str, credentials: WebshareProxyCredentials
) -> FetchedTranscript:
@staticmethod
def get_transcript(video_id: str) -> FetchedTranscript:
"""
Get transcript for a video, preferring English but falling back to any available language.
:param video_id: The YouTube video ID
:param credentials: The Webshare proxy credentials
:return: The fetched transcript
:raises: Any exception except NoTranscriptFound for requested languages
"""
logger.warning(
"Using Webshare proxy for YouTube transcript fetch (video_id=%s)",
video_id,
)
proxy_config = WebshareProxyConfig(
proxy_username=credentials.username.get_secret_value(),
proxy_password=credentials.password.get_secret_value(),
)
api = YouTubeTranscriptApi(proxy_config=proxy_config)
api = YouTubeTranscriptApi()
try:
# Try to get English transcript first (default behavior)
return api.fetch(video_id=video_id)
@@ -156,17 +101,11 @@ class TranscribeYoutubeVideoBlock(Block):
transcript_text = formatter.format_transcript(transcript)
return transcript_text
async def run(
self,
input_data: Input,
*,
credentials: WebshareProxyCredentials,
**kwargs,
) -> BlockOutput:
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
video_id = self.extract_video_id(input_data.youtube_url)
yield "video_id", video_id
transcript = self.get_transcript(video_id, credentials)
transcript = self.get_transcript(video_id)
transcript_text = self.format_transcript(transcript=transcript)
yield "transcript", transcript_text

View File

@@ -5,8 +5,6 @@ from datetime import datetime
from faker import Faker
from prisma import Prisma
from backend.data.db import query_raw_with_schema
faker = Faker()
@@ -17,9 +15,9 @@ async def check_cron_job(db):
try:
# Check if pg_cron extension exists
extension_check = await query_raw_with_schema("CREATE EXTENSION pg_cron;")
extension_check = await db.query_raw("CREATE EXTENSION pg_cron;")
print(extension_check)
extension_check = await query_raw_with_schema(
extension_check = await db.query_raw(
"SELECT COUNT(*) as count FROM pg_extension WHERE extname = 'pg_cron'"
)
if extension_check[0]["count"] == 0:
@@ -27,7 +25,7 @@ async def check_cron_job(db):
return False
# Check if the refresh job exists
job_check = await query_raw_with_schema(
job_check = await db.query_raw(
"""
SELECT jobname, schedule, command
FROM cron.job
@@ -57,33 +55,33 @@ async def get_materialized_view_counts(db):
print("-" * 40)
# Get counts from mv_agent_run_counts
agent_runs = await query_raw_with_schema(
agent_runs = await db.query_raw(
"""
SELECT COUNT(*) as total_agents,
SUM(run_count) as total_runs,
MAX(run_count) as max_runs,
MIN(run_count) as min_runs
FROM {schema_prefix}mv_agent_run_counts
FROM mv_agent_run_counts
"""
)
# Get counts from mv_review_stats
review_stats = await query_raw_with_schema(
review_stats = await db.query_raw(
"""
SELECT COUNT(*) as total_listings,
SUM(review_count) as total_reviews,
AVG(avg_rating) as overall_avg_rating
FROM {schema_prefix}mv_review_stats
FROM mv_review_stats
"""
)
# Get sample data from StoreAgent view
store_agents = await query_raw_with_schema(
store_agents = await db.query_raw(
"""
SELECT COUNT(*) as total_store_agents,
AVG(runs) as avg_runs,
AVG(rating) as avg_rating
FROM {schema_prefix}"StoreAgent"
FROM "StoreAgent"
"""
)

View File

@@ -5,8 +5,6 @@ import asyncio
from prisma import Prisma
from backend.data.db import query_raw_with_schema
async def check_store_data(db):
"""Check what store data exists in the database."""
@@ -91,11 +89,11 @@ async def check_store_data(db):
sa.creator_username,
sa.categories,
sa.updated_at
FROM {schema_prefix}"StoreAgent" sa
FROM "StoreAgent" sa
LIMIT 10;
"""
store_agents = await query_raw_with_schema(query)
store_agents = await db.query_raw(query)
print(f"Total store agents in view: {len(store_agents)}")
if store_agents:
@@ -113,22 +111,22 @@ async def check_store_data(db):
# Check for any APPROVED store listing versions
query = """
SELECT COUNT(*) as count
FROM {schema_prefix}"StoreListingVersion"
FROM "StoreListingVersion"
WHERE "submissionStatus" = 'APPROVED'
"""
result = await query_raw_with_schema(query)
result = await db.query_raw(query)
approved_count = result[0]["count"] if result else 0
print(f"Approved store listing versions: {approved_count}")
# Check for store listings with hasApprovedVersion = true
query = """
SELECT COUNT(*) as count
FROM {schema_prefix}"StoreListing"
FROM "StoreListing"
WHERE "hasApprovedVersion" = true AND "isDeleted" = false
"""
result = await query_raw_with_schema(query)
result = await db.query_raw(query)
has_approved_count = result[0]["count"] if result else 0
print(f"Store listings with approved versions: {has_approved_count}")
@@ -136,10 +134,10 @@ async def check_store_data(db):
query = """
SELECT COUNT(DISTINCT "agentGraphId") as unique_agents,
COUNT(*) as total_executions
FROM {schema_prefix}"AgentGraphExecution"
FROM "AgentGraphExecution"
"""
result = await query_raw_with_schema(query)
result = await db.query_raw(query)
if result:
print("\nAgent Graph Executions:")
print(f" Unique agents with executions: {result[0]['unique_agents']}")

View File

@@ -71,7 +71,6 @@ class BlockType(Enum):
AGENT = "Agent"
AI = "AI"
AYRSHARE = "Ayrshare"
HUMAN_IN_THE_LOOP = "Human In The Loop"
class BlockCategory(Enum):
@@ -266,61 +265,14 @@ class BlockSchema(BaseModel):
)
}
@classmethod
def get_auto_credentials_fields(cls) -> dict[str, dict[str, Any]]:
"""
Get fields that have auto_credentials metadata (e.g., GoogleDriveFileInput).
Returns a dict mapping kwarg_name -> {field_name, auto_credentials_config}
Raises:
ValueError: If multiple fields have the same kwarg_name, as this would
cause silent overwriting and only the last field would be processed.
"""
result: dict[str, dict[str, Any]] = {}
schema = cls.jsonschema()
properties = schema.get("properties", {})
for field_name, field_schema in properties.items():
auto_creds = field_schema.get("auto_credentials")
if auto_creds:
kwarg_name = auto_creds.get("kwarg_name", "credentials")
if kwarg_name in result:
raise ValueError(
f"Duplicate auto_credentials kwarg_name '{kwarg_name}' "
f"in fields '{result[kwarg_name]['field_name']}' and "
f"'{field_name}' on {cls.__qualname__}"
)
result[kwarg_name] = {
"field_name": field_name,
"config": auto_creds,
}
return result
@classmethod
def get_credentials_fields_info(cls) -> dict[str, CredentialsFieldInfo]:
result = {}
# Regular credentials fields
for field_name in cls.get_credentials_fields().keys():
result[field_name] = CredentialsFieldInfo.model_validate(
return {
field_name: CredentialsFieldInfo.model_validate(
cls.get_field_schema(field_name), by_alias=True
)
# Auto-generated credentials fields (from GoogleDriveFileInput etc.)
for kwarg_name, info in cls.get_auto_credentials_fields().items():
config = info["config"]
# Build a schema-like dict that CredentialsFieldInfo can parse
auto_schema = {
"credentials_provider": [config.get("provider", "google")],
"credentials_types": [config.get("type", "oauth2")],
"credentials_scopes": config.get("scopes"),
}
result[kwarg_name] = CredentialsFieldInfo.model_validate(
auto_schema, by_alias=True
)
return result
for field_name in cls.get_credentials_fields().keys()
}
@classmethod
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
@@ -844,12 +796,3 @@ def get_io_block_ids() -> Sequence[str]:
for id, B in get_blocks().items()
if B().block_type in (BlockType.INPUT, BlockType.OUTPUT)
]
@cached(ttl_seconds=3600)
def get_human_in_the_loop_block_ids() -> Sequence[str]:
return [
id
for id, B in get_blocks().items()
if B().block_type == BlockType.HUMAN_IN_THE_LOOP
]

View File

@@ -7,7 +7,7 @@ from prisma.models import CreditTransaction, UserBalance
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.block import get_block
from backend.data.credit import BetaUserCredit, UsageTransactionMetadata
from backend.data.execution import ExecutionContext, NodeExecutionEntry
from backend.data.execution import NodeExecutionEntry, UserContext
from backend.data.user import DEFAULT_USER_ID
from backend.executor.utils import block_usage_cost
from backend.integrations.credentials_store import openai_credentials
@@ -73,7 +73,6 @@ async def test_block_credit_usage(server: SpinTestServer):
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
graph_version=1,
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
@@ -86,7 +85,7 @@ async def test_block_credit_usage(server: SpinTestServer):
"type": openai_credentials.type,
},
},
execution_context=ExecutionContext(user_timezone="UTC"),
user_context=UserContext(timezone="UTC"),
),
)
assert spending_amount_1 > 0
@@ -95,13 +94,12 @@ async def test_block_credit_usage(server: SpinTestServer):
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
graph_version=1,
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
inputs={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
execution_context=ExecutionContext(user_timezone="UTC"),
user_context=UserContext(timezone="UTC"),
),
)
assert spending_amount_2 == 0

View File

@@ -34,7 +34,6 @@ from prisma.types import (
AgentNodeExecutionKeyValueDataCreateInput,
AgentNodeExecutionUpdateInput,
AgentNodeExecutionWhereInput,
AgentNodeExecutionWhereUniqueInput,
)
from pydantic import BaseModel, ConfigDict, JsonValue, ValidationError
from pydantic.fields import Field
@@ -71,18 +70,6 @@ logger = logging.getLogger(__name__)
config = Config()
class ExecutionContext(BaseModel):
"""
Unified context that carries execution-level data throughout the entire execution flow.
This includes information needed by blocks, sub-graphs, and execution management.
"""
safe_mode: bool = True
user_timezone: str = "UTC"
root_execution_id: Optional[str] = None
parent_execution_id: Optional[str] = None
# -------------------------- Models -------------------------- #
@@ -109,14 +96,11 @@ NodesInputMasks = Mapping[str, NodeInputMask]
VALID_STATUS_TRANSITIONS = {
ExecutionStatus.QUEUED: [
ExecutionStatus.INCOMPLETE,
ExecutionStatus.TERMINATED, # For resuming halted execution
ExecutionStatus.REVIEW, # For resuming after review
],
ExecutionStatus.RUNNING: [
ExecutionStatus.INCOMPLETE,
ExecutionStatus.QUEUED,
ExecutionStatus.TERMINATED, # For resuming halted execution
ExecutionStatus.REVIEW, # For resuming after review
],
ExecutionStatus.COMPLETED: [
ExecutionStatus.RUNNING,
@@ -125,16 +109,11 @@ VALID_STATUS_TRANSITIONS = {
ExecutionStatus.INCOMPLETE,
ExecutionStatus.QUEUED,
ExecutionStatus.RUNNING,
ExecutionStatus.REVIEW,
],
ExecutionStatus.TERMINATED: [
ExecutionStatus.INCOMPLETE,
ExecutionStatus.QUEUED,
ExecutionStatus.RUNNING,
ExecutionStatus.REVIEW,
],
ExecutionStatus.REVIEW: [
ExecutionStatus.RUNNING,
],
}
@@ -377,8 +356,9 @@ class GraphExecutionWithNodes(GraphExecution):
def to_graph_execution_entry(
self,
execution_context: ExecutionContext,
user_context: "UserContext",
compiled_nodes_input_masks: Optional[NodesInputMasks] = None,
parent_graph_exec_id: Optional[str] = None,
):
return GraphExecutionEntry(
user_id=self.user_id,
@@ -386,7 +366,8 @@ class GraphExecutionWithNodes(GraphExecution):
graph_version=self.graph_version or 0,
graph_exec_id=self.id,
nodes_input_masks=compiled_nodes_input_masks,
execution_context=execution_context,
user_context=user_context,
parent_graph_exec_id=parent_graph_exec_id,
)
@@ -459,18 +440,17 @@ class NodeExecutionResult(BaseModel):
)
def to_node_execution_entry(
self, execution_context: ExecutionContext
self, user_context: "UserContext"
) -> "NodeExecutionEntry":
return NodeExecutionEntry(
user_id=self.user_id,
graph_exec_id=self.graph_exec_id,
graph_id=self.graph_id,
graph_version=self.graph_version,
node_exec_id=self.node_exec_id,
node_id=self.node_id,
block_id=self.block_id,
inputs=self.input_data,
execution_context=execution_context,
user_context=user_context,
)
@@ -748,7 +728,7 @@ async def upsert_execution_input(
input_name: str,
input_data: JsonValue,
node_exec_id: str | None = None,
) -> tuple[NodeExecutionResult, BlockInput]:
) -> tuple[str, BlockInput]:
"""
Insert AgentNodeExecutionInputOutput record for as one of AgentNodeExecution.Input.
If there is no AgentNodeExecution that has no `input_name` as input, create new one.
@@ -781,7 +761,7 @@ async def upsert_execution_input(
existing_execution = await AgentNodeExecution.prisma().find_first(
where=existing_exec_query_filter,
order={"addedTime": "asc"},
include={"Input": True, "GraphExecution": True},
include={"Input": True},
)
json_input_data = SafeJson(input_data)
@@ -793,7 +773,7 @@ async def upsert_execution_input(
referencedByInputExecId=existing_execution.id,
)
)
return NodeExecutionResult.from_db(existing_execution), {
return existing_execution.id, {
**{
input_data.name: type_utils.convert(input_data.data, JsonValue)
for input_data in existing_execution.Input or []
@@ -808,10 +788,9 @@ async def upsert_execution_input(
agentGraphExecutionId=graph_exec_id,
executionStatus=ExecutionStatus.INCOMPLETE,
Input={"create": {"name": input_name, "data": json_input_data}},
),
include={"GraphExecution": True},
)
)
return NodeExecutionResult.from_db(result), {input_name: input_data}
return result.id, {input_name: input_data}
else:
raise ValueError(
@@ -907,25 +886,9 @@ async def update_node_execution_status_batch(
node_exec_ids: list[str],
status: ExecutionStatus,
stats: dict[str, Any] | None = None,
) -> int:
# Validate status transitions - allowed_from should never be empty for valid statuses
allowed_from = VALID_STATUS_TRANSITIONS.get(status, [])
if not allowed_from:
raise ValueError(
f"Invalid status transition: {status} has no valid source statuses"
)
# For batch updates, we filter to only update nodes with valid current statuses
where_clause = cast(
AgentNodeExecutionWhereInput,
{
"id": {"in": node_exec_ids},
"executionStatus": {"in": [s.value for s in allowed_from]},
},
)
return await AgentNodeExecution.prisma().update_many(
where=where_clause,
):
await AgentNodeExecution.prisma().update_many(
where={"id": {"in": node_exec_ids}},
data=_get_update_status_data(status, None, stats),
)
@@ -939,32 +902,15 @@ async def update_node_execution_status(
if status == ExecutionStatus.QUEUED and execution_data is None:
raise ValueError("Execution data must be provided when queuing an execution.")
# Validate status transitions - allowed_from should never be empty for valid statuses
allowed_from = VALID_STATUS_TRANSITIONS.get(status, [])
if not allowed_from:
raise ValueError(
f"Invalid status transition: {status} has no valid source statuses"
)
if res := await AgentNodeExecution.prisma().update(
where=cast(
AgentNodeExecutionWhereUniqueInput,
{
"id": node_exec_id,
"executionStatus": {"in": [s.value for s in allowed_from]},
},
),
res = await AgentNodeExecution.prisma().update(
where={"id": node_exec_id},
data=_get_update_status_data(status, execution_data, stats),
include=EXECUTION_RESULT_INCLUDE,
):
return NodeExecutionResult.from_db(res)
)
if not res:
raise ValueError(f"Execution {node_exec_id} not found.")
if res := await AgentNodeExecution.prisma().find_unique(
where={"id": node_exec_id}, include=EXECUTION_RESULT_INCLUDE
):
return NodeExecutionResult.from_db(res)
raise ValueError(f"Execution {node_exec_id} not found.")
return NodeExecutionResult.from_db(res)
def _get_update_status_data(
@@ -1018,17 +964,17 @@ async def get_node_execution(node_exec_id: str) -> NodeExecutionResult | None:
return NodeExecutionResult.from_db(execution)
def _build_node_execution_where_clause(
async def get_node_executions(
graph_exec_id: str | None = None,
node_id: str | None = None,
block_ids: list[str] | None = None,
statuses: list[ExecutionStatus] | None = None,
limit: int | None = None,
created_time_gte: datetime | None = None,
created_time_lte: datetime | None = None,
) -> AgentNodeExecutionWhereInput:
"""
Build where clause for node execution queries.
"""
include_exec_data: bool = True,
) -> list[NodeExecutionResult]:
"""⚠️ No `user_id` check: DO NOT USE without check in user-facing endpoints."""
where_clause: AgentNodeExecutionWhereInput = {}
if graph_exec_id:
where_clause["agentGraphExecutionId"] = graph_exec_id
@@ -1045,29 +991,6 @@ def _build_node_execution_where_clause(
"lte": created_time_lte or datetime.max.replace(tzinfo=timezone.utc),
}
return where_clause
async def get_node_executions(
graph_exec_id: str | None = None,
node_id: str | None = None,
block_ids: list[str] | None = None,
statuses: list[ExecutionStatus] | None = None,
limit: int | None = None,
created_time_gte: datetime | None = None,
created_time_lte: datetime | None = None,
include_exec_data: bool = True,
) -> list[NodeExecutionResult]:
"""⚠️ No `user_id` check: DO NOT USE without check in user-facing endpoints."""
where_clause = _build_node_execution_where_clause(
graph_exec_id=graph_exec_id,
node_id=node_id,
block_ids=block_ids,
statuses=statuses,
created_time_gte=created_time_gte,
created_time_lte=created_time_lte,
)
executions = await AgentNodeExecution.prisma().find_many(
where=where_clause,
include=(
@@ -1109,29 +1032,31 @@ async def get_latest_node_execution(
# ----------------- Execution Infrastructure ----------------- #
class GraphExecutionEntry(BaseModel):
model_config = {"extra": "ignore"}
class UserContext(BaseModel):
"""Generic user context for graph execution containing user-specific settings."""
timezone: str
class GraphExecutionEntry(BaseModel):
user_id: str
graph_exec_id: str
graph_id: str
graph_version: int
nodes_input_masks: Optional[NodesInputMasks] = None
execution_context: ExecutionContext = Field(default_factory=ExecutionContext)
user_context: UserContext
parent_graph_exec_id: Optional[str] = None
class NodeExecutionEntry(BaseModel):
model_config = {"extra": "ignore"}
user_id: str
graph_exec_id: str
graph_id: str
graph_version: int
node_exec_id: str
node_id: str
block_id: str
inputs: BlockInput
execution_context: ExecutionContext = Field(default_factory=ExecutionContext)
user_context: UserContext
class ExecutionQueue(Generic[T]):

View File

@@ -61,10 +61,6 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
class GraphSettings(BaseModel):
human_in_the_loop_safe_mode: bool | None = None
class Link(BaseDbModel):
source_id: str
sink_id: str
@@ -229,15 +225,6 @@ class BaseGraph(BaseDbModel):
def has_external_trigger(self) -> bool:
return self.webhook_input_node is not None
@computed_field
@property
def has_human_in_the_loop(self) -> bool:
return any(
node.block_id
for node in self.nodes
if node.block.block_type == BlockType.HUMAN_IN_THE_LOOP
)
@property
def webhook_input_node(self) -> Node | None:
return next(
@@ -1118,28 +1105,6 @@ async def delete_graph(graph_id: str, user_id: str) -> int:
return entries_count
async def get_graph_settings(user_id: str, graph_id: str) -> GraphSettings:
lib = await LibraryAgent.prisma().find_first(
where={
"userId": user_id,
"agentGraphId": graph_id,
"isDeleted": False,
"isArchived": False,
},
order={"agentGraphVersion": "desc"},
)
if not lib or not lib.settings:
return GraphSettings()
try:
return GraphSettings.model_validate(lib.settings)
except Exception:
logger.warning(
f"Malformed settings for LibraryAgent user={user_id} graph={graph_id}"
)
return GraphSettings()
async def validate_graph_execution_permissions(
user_id: str, graph_id: str, graph_version: int, is_sub_graph: bool = False
) -> None:

View File

@@ -1,258 +0,0 @@
"""
Data layer for Human In The Loop (HITL) review operations.
Handles all database operations for pending human reviews.
"""
import asyncio
import logging
from datetime import datetime, timezone
from typing import Optional
from prisma.enums import ReviewStatus
from prisma.models import PendingHumanReview
from prisma.types import PendingHumanReviewUpdateInput
from pydantic import BaseModel
from backend.server.v2.executions.review.model import (
PendingHumanReviewModel,
SafeJsonData,
)
from backend.util.json import SafeJson
logger = logging.getLogger(__name__)
class ReviewResult(BaseModel):
"""Result of a review operation."""
data: Optional[SafeJsonData] = None
status: ReviewStatus
message: str = ""
processed: bool
node_exec_id: str
async def get_or_create_human_review(
user_id: str,
node_exec_id: str,
graph_exec_id: str,
graph_id: str,
graph_version: int,
input_data: SafeJsonData,
message: str,
editable: bool,
) -> Optional[ReviewResult]:
"""
Get existing review or create a new pending review entry.
Uses upsert with empty update to get existing or create new review in a single operation.
Args:
user_id: ID of the user who owns this review
node_exec_id: ID of the node execution
graph_exec_id: ID of the graph execution
graph_id: ID of the graph template
graph_version: Version of the graph template
input_data: The data to be reviewed
message: Instructions for the reviewer
editable: Whether the data can be edited
Returns:
ReviewResult if the review is complete, None if waiting for human input
"""
try:
logger.debug(f"Getting or creating review for node {node_exec_id}")
# Upsert - get existing or create new review
review = await PendingHumanReview.prisma().upsert(
where={"nodeExecId": node_exec_id},
data={
"create": {
"userId": user_id,
"nodeExecId": node_exec_id,
"graphExecId": graph_exec_id,
"graphId": graph_id,
"graphVersion": graph_version,
"payload": SafeJson(input_data),
"instructions": message,
"editable": editable,
"status": ReviewStatus.WAITING,
},
"update": {}, # Do nothing on update - keep existing review as is
},
)
logger.info(
f"Review {'created' if review.createdAt == review.updatedAt else 'retrieved'} for node {node_exec_id} with status {review.status}"
)
except Exception as e:
logger.error(
f"Database error in get_or_create_human_review for node {node_exec_id}: {str(e)}"
)
raise
# Early return if already processed
if review.processed:
return None
# If pending, return None to continue waiting, otherwise return the review result
if review.status == ReviewStatus.WAITING:
return None
else:
return ReviewResult(
data=review.payload if review.status == ReviewStatus.APPROVED else None,
status=review.status,
message=review.reviewMessage or "",
processed=review.processed,
node_exec_id=review.nodeExecId,
)
async def has_pending_reviews_for_graph_exec(graph_exec_id: str) -> bool:
"""
Check if a graph execution has any pending reviews.
Args:
graph_exec_id: The graph execution ID to check
Returns:
True if there are reviews waiting for human input, False otherwise
"""
# Check if there are any reviews waiting for human input
count = await PendingHumanReview.prisma().count(
where={"graphExecId": graph_exec_id, "status": ReviewStatus.WAITING}
)
return count > 0
async def get_pending_reviews_for_user(
user_id: str, page: int = 1, page_size: int = 25
) -> list["PendingHumanReviewModel"]:
"""
Get all pending reviews for a user with pagination.
Args:
user_id: User ID to get reviews for
page: Page number (1-indexed)
page_size: Number of reviews per page
Returns:
List of pending review models
"""
# Calculate offset for pagination
offset = (page - 1) * page_size
reviews = await PendingHumanReview.prisma().find_many(
where={"userId": user_id, "status": ReviewStatus.WAITING},
order={"createdAt": "desc"},
skip=offset,
take=page_size,
)
return [PendingHumanReviewModel.from_db(review) for review in reviews]
async def get_pending_reviews_for_execution(
graph_exec_id: str, user_id: str
) -> list["PendingHumanReviewModel"]:
"""
Get all pending reviews for a specific graph execution.
Args:
graph_exec_id: Graph execution ID
user_id: User ID for security validation
Returns:
List of pending review models
"""
reviews = await PendingHumanReview.prisma().find_many(
where={
"userId": user_id,
"graphExecId": graph_exec_id,
"status": ReviewStatus.WAITING,
},
order={"createdAt": "asc"},
)
return [PendingHumanReviewModel.from_db(review) for review in reviews]
async def process_all_reviews_for_execution(
user_id: str,
review_decisions: dict[str, tuple[ReviewStatus, SafeJsonData | None, str | None]],
) -> dict[str, PendingHumanReviewModel]:
"""Process all pending reviews for an execution with approve/reject decisions.
Args:
user_id: User ID for ownership validation
review_decisions: Map of node_exec_id -> (status, reviewed_data, message)
Returns:
Dict of node_exec_id -> updated review model
"""
if not review_decisions:
return {}
node_exec_ids = list(review_decisions.keys())
# Get all reviews for validation
reviews = await PendingHumanReview.prisma().find_many(
where={
"nodeExecId": {"in": node_exec_ids},
"userId": user_id,
"status": ReviewStatus.WAITING,
},
)
# Validate all reviews can be processed
if len(reviews) != len(node_exec_ids):
missing_ids = set(node_exec_ids) - {review.nodeExecId for review in reviews}
raise ValueError(
f"Reviews not found, access denied, or not in WAITING status: {', '.join(missing_ids)}"
)
# Create parallel update tasks
update_tasks = []
for review in reviews:
new_status, reviewed_data, message = review_decisions[review.nodeExecId]
has_data_changes = reviewed_data is not None and reviewed_data != review.payload
# Check edit permissions for actual data modifications
if has_data_changes and not review.editable:
raise ValueError(f"Review {review.nodeExecId} is not editable")
update_data: PendingHumanReviewUpdateInput = {
"status": new_status,
"reviewMessage": message,
"wasEdited": has_data_changes,
"reviewedAt": datetime.now(timezone.utc),
}
if has_data_changes:
update_data["payload"] = SafeJson(reviewed_data)
task = PendingHumanReview.prisma().update(
where={"nodeExecId": review.nodeExecId},
data=update_data,
)
update_tasks.append(task)
# Execute all updates in parallel and get updated reviews
updated_reviews = await asyncio.gather(*update_tasks)
# Note: Execution resumption is now handled at the API layer after ALL reviews
# for an execution are processed (both approved and rejected)
# Return as dict for easy access
return {
review.nodeExecId: PendingHumanReviewModel.from_db(review)
for review in updated_reviews
}
async def update_review_processed_status(node_exec_id: str, processed: bool) -> None:
"""Update the processed status of a review."""
await PendingHumanReview.prisma().update(
where={"nodeExecId": node_exec_id}, data={"processed": processed}
)

View File

@@ -1,342 +0,0 @@
import datetime
from unittest.mock import AsyncMock, Mock
import pytest
import pytest_mock
from prisma.enums import ReviewStatus
from backend.data.human_review import (
get_or_create_human_review,
get_pending_reviews_for_execution,
get_pending_reviews_for_user,
has_pending_reviews_for_graph_exec,
process_all_reviews_for_execution,
)
@pytest.fixture
def sample_db_review():
"""Create a sample database review object"""
mock_review = Mock()
mock_review.nodeExecId = "test_node_123"
mock_review.userId = "test-user-123"
mock_review.graphExecId = "test_graph_exec_456"
mock_review.graphId = "test_graph_789"
mock_review.graphVersion = 1
mock_review.payload = {"data": "test payload"}
mock_review.instructions = "Please review"
mock_review.editable = True
mock_review.status = ReviewStatus.WAITING
mock_review.reviewMessage = None
mock_review.wasEdited = False
mock_review.processed = False
mock_review.createdAt = datetime.datetime.now(datetime.timezone.utc)
mock_review.updatedAt = None
mock_review.reviewedAt = None
return mock_review
@pytest.mark.asyncio
async def test_get_or_create_human_review_new(
mocker: pytest_mock.MockFixture,
sample_db_review,
):
"""Test creating a new human review"""
# Mock the upsert to return a new review (created_at == updated_at)
sample_db_review.status = ReviewStatus.WAITING
sample_db_review.processed = False
mock_upsert = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
mock_upsert.return_value.upsert = AsyncMock(return_value=sample_db_review)
result = await get_or_create_human_review(
user_id="test-user-123",
node_exec_id="test_node_123",
graph_exec_id="test_graph_exec_456",
graph_id="test_graph_789",
graph_version=1,
input_data={"data": "test payload"},
message="Please review",
editable=True,
)
# Should return None for pending reviews (waiting for human input)
assert result is None
@pytest.mark.asyncio
async def test_get_or_create_human_review_approved(
mocker: pytest_mock.MockFixture,
sample_db_review,
):
"""Test retrieving an already approved review"""
# Set up review as already approved
sample_db_review.status = ReviewStatus.APPROVED
sample_db_review.processed = False
sample_db_review.reviewMessage = "Looks good"
mock_upsert = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
mock_upsert.return_value.upsert = AsyncMock(return_value=sample_db_review)
result = await get_or_create_human_review(
user_id="test-user-123",
node_exec_id="test_node_123",
graph_exec_id="test_graph_exec_456",
graph_id="test_graph_789",
graph_version=1,
input_data={"data": "test payload"},
message="Please review",
editable=True,
)
# Should return the approved result
assert result is not None
assert result.status == ReviewStatus.APPROVED
assert result.data == {"data": "test payload"}
assert result.message == "Looks good"
@pytest.mark.asyncio
async def test_has_pending_reviews_for_graph_exec_true(
mocker: pytest_mock.MockFixture,
):
"""Test when there are pending reviews"""
mock_count = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
mock_count.return_value.count = AsyncMock(return_value=2)
result = await has_pending_reviews_for_graph_exec("test_graph_exec")
assert result is True
@pytest.mark.asyncio
async def test_has_pending_reviews_for_graph_exec_false(
mocker: pytest_mock.MockFixture,
):
"""Test when there are no pending reviews"""
mock_count = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
mock_count.return_value.count = AsyncMock(return_value=0)
result = await has_pending_reviews_for_graph_exec("test_graph_exec")
assert result is False
@pytest.mark.asyncio
async def test_get_pending_reviews_for_user(
mocker: pytest_mock.MockFixture,
sample_db_review,
):
"""Test getting pending reviews for a user with pagination"""
mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
mock_find_many.return_value.find_many = AsyncMock(return_value=[sample_db_review])
result = await get_pending_reviews_for_user("test_user", page=2, page_size=10)
assert len(result) == 1
assert result[0].node_exec_id == "test_node_123"
# Verify pagination parameters
call_args = mock_find_many.return_value.find_many.call_args
assert call_args.kwargs["skip"] == 10 # (page-1) * page_size = (2-1) * 10
assert call_args.kwargs["take"] == 10
@pytest.mark.asyncio
async def test_get_pending_reviews_for_execution(
mocker: pytest_mock.MockFixture,
sample_db_review,
):
"""Test getting pending reviews for specific execution"""
mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
mock_find_many.return_value.find_many = AsyncMock(return_value=[sample_db_review])
result = await get_pending_reviews_for_execution(
"test_graph_exec_456", "test-user-123"
)
assert len(result) == 1
assert result[0].graph_exec_id == "test_graph_exec_456"
# Verify it filters by execution and user
call_args = mock_find_many.return_value.find_many.call_args
where_clause = call_args.kwargs["where"]
assert where_clause["userId"] == "test-user-123"
assert where_clause["graphExecId"] == "test_graph_exec_456"
assert where_clause["status"] == ReviewStatus.WAITING
@pytest.mark.asyncio
async def test_process_all_reviews_for_execution_success(
mocker: pytest_mock.MockFixture,
sample_db_review,
):
"""Test successful processing of reviews for an execution"""
# Mock finding reviews
mock_prisma = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
mock_prisma.return_value.find_many = AsyncMock(return_value=[sample_db_review])
# Mock updating reviews
updated_review = Mock()
updated_review.nodeExecId = "test_node_123"
updated_review.userId = "test-user-123"
updated_review.graphExecId = "test_graph_exec_456"
updated_review.graphId = "test_graph_789"
updated_review.graphVersion = 1
updated_review.payload = {"data": "modified"}
updated_review.instructions = "Please review"
updated_review.editable = True
updated_review.status = ReviewStatus.APPROVED
updated_review.reviewMessage = "Approved"
updated_review.wasEdited = True
updated_review.processed = False
updated_review.createdAt = datetime.datetime.now(datetime.timezone.utc)
updated_review.updatedAt = datetime.datetime.now(datetime.timezone.utc)
updated_review.reviewedAt = datetime.datetime.now(datetime.timezone.utc)
mock_prisma.return_value.update = AsyncMock(return_value=updated_review)
# Mock gather to simulate parallel updates
mocker.patch(
"backend.data.human_review.asyncio.gather",
new=AsyncMock(return_value=[updated_review]),
)
result = await process_all_reviews_for_execution(
user_id="test-user-123",
review_decisions={
"test_node_123": (ReviewStatus.APPROVED, {"data": "modified"}, "Approved")
},
)
assert len(result) == 1
assert "test_node_123" in result
assert result["test_node_123"].status == ReviewStatus.APPROVED
@pytest.mark.asyncio
async def test_process_all_reviews_for_execution_validation_errors(
mocker: pytest_mock.MockFixture,
):
"""Test validation errors in process_all_reviews_for_execution"""
# Mock finding fewer reviews than requested (some not found)
mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
mock_find_many.return_value.find_many = AsyncMock(
return_value=[]
) # No reviews found
with pytest.raises(ValueError, match="Reviews not found"):
await process_all_reviews_for_execution(
user_id="test-user-123",
review_decisions={
"nonexistent_node": (ReviewStatus.APPROVED, {"data": "test"}, "message")
},
)
@pytest.mark.asyncio
async def test_process_all_reviews_edit_permission_error(
mocker: pytest_mock.MockFixture,
sample_db_review,
):
"""Test editing non-editable review"""
# Set review as non-editable
sample_db_review.editable = False
# Mock finding reviews
mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
mock_find_many.return_value.find_many = AsyncMock(return_value=[sample_db_review])
with pytest.raises(ValueError, match="not editable"):
await process_all_reviews_for_execution(
user_id="test-user-123",
review_decisions={
"test_node_123": (
ReviewStatus.APPROVED,
{"data": "modified"},
"message",
)
},
)
@pytest.mark.asyncio
async def test_process_all_reviews_mixed_approval_rejection(
mocker: pytest_mock.MockFixture,
sample_db_review,
):
"""Test processing mixed approval and rejection decisions"""
# Create second review for rejection
second_review = Mock()
second_review.nodeExecId = "test_node_456"
second_review.userId = "test-user-123"
second_review.graphExecId = "test_graph_exec_456"
second_review.graphId = "test_graph_789"
second_review.graphVersion = 1
second_review.payload = {"data": "original"}
second_review.instructions = "Second review"
second_review.editable = True
second_review.status = ReviewStatus.WAITING
second_review.reviewMessage = None
second_review.wasEdited = False
second_review.processed = False
second_review.createdAt = datetime.datetime.now(datetime.timezone.utc)
second_review.updatedAt = None
second_review.reviewedAt = None
# Mock finding reviews
mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
mock_find_many.return_value.find_many = AsyncMock(
return_value=[sample_db_review, second_review]
)
# Mock updating reviews
approved_review = Mock()
approved_review.nodeExecId = "test_node_123"
approved_review.userId = "test-user-123"
approved_review.graphExecId = "test_graph_exec_456"
approved_review.graphId = "test_graph_789"
approved_review.graphVersion = 1
approved_review.payload = {"data": "modified"}
approved_review.instructions = "Please review"
approved_review.editable = True
approved_review.status = ReviewStatus.APPROVED
approved_review.reviewMessage = "Approved"
approved_review.wasEdited = True
approved_review.processed = False
approved_review.createdAt = datetime.datetime.now(datetime.timezone.utc)
approved_review.updatedAt = datetime.datetime.now(datetime.timezone.utc)
approved_review.reviewedAt = datetime.datetime.now(datetime.timezone.utc)
rejected_review = Mock()
rejected_review.nodeExecId = "test_node_456"
rejected_review.userId = "test-user-123"
rejected_review.graphExecId = "test_graph_exec_456"
rejected_review.graphId = "test_graph_789"
rejected_review.graphVersion = 1
rejected_review.payload = {"data": "original"}
rejected_review.instructions = "Please review"
rejected_review.editable = True
rejected_review.status = ReviewStatus.REJECTED
rejected_review.reviewMessage = "Rejected"
rejected_review.wasEdited = False
rejected_review.processed = False
rejected_review.createdAt = datetime.datetime.now(datetime.timezone.utc)
rejected_review.updatedAt = datetime.datetime.now(datetime.timezone.utc)
rejected_review.reviewedAt = datetime.datetime.now(datetime.timezone.utc)
mocker.patch(
"backend.data.human_review.asyncio.gather",
new=AsyncMock(return_value=[approved_review, rejected_review]),
)
result = await process_all_reviews_for_execution(
user_id="test-user-123",
review_decisions={
"test_node_123": (ReviewStatus.APPROVED, {"data": "modified"}, "Approved"),
"test_node_456": (ReviewStatus.REJECTED, None, "Rejected"),
},
)
assert len(result) == 2
assert "test_node_123" in result
assert "test_node_456" in result

View File

@@ -1,5 +1,5 @@
import logging
from typing import TYPE_CHECKING, AsyncGenerator, Literal, Optional, overload
from typing import AsyncGenerator, Literal, Optional, overload
from prisma.models import AgentNode, AgentPreset, IntegrationWebhook
from prisma.types import (
@@ -19,12 +19,10 @@ from backend.integrations.creds_manager import IntegrationCredentialsManager
from backend.integrations.providers import ProviderName
from backend.integrations.webhooks import get_webhook_manager
from backend.integrations.webhooks.utils import webhook_ingress_url
from backend.server.v2.library.model import LibraryAgentPreset
from backend.util.exceptions import NotFoundError
from backend.util.json import SafeJson
if TYPE_CHECKING:
from backend.server.v2.library.model import LibraryAgentPreset
from .db import BaseDbModel
from .graph import NodeModel
@@ -66,7 +64,7 @@ class Webhook(BaseDbModel):
class WebhookWithRelations(Webhook):
triggered_nodes: list[NodeModel]
triggered_presets: list["LibraryAgentPreset"]
triggered_presets: list[LibraryAgentPreset]
@staticmethod
def from_db(webhook: IntegrationWebhook):
@@ -75,12 +73,6 @@ class WebhookWithRelations(Webhook):
"AgentNodes and AgentPresets must be included in "
"IntegrationWebhook query with relations"
)
# LibraryAgentPreset import is moved to TYPE_CHECKING to avoid circular import:
# integrations.py → library/model.py → integrations.py (for Webhook)
# Runtime import is used in WebhookWithRelations.from_db() method instead
# Import at runtime to avoid circular dependency
from backend.server.v2.library.model import LibraryAgentPreset
return WebhookWithRelations(
**Webhook.from_db(webhook).model_dump(),
triggered_nodes=[NodeModel.from_db(node) for node in webhook.AgentNodes],

View File

@@ -46,7 +46,6 @@ from backend.util.settings import Secrets
# Type alias for any provider name (including custom ones)
AnyProviderName = str # Will be validated as ProviderName at runtime
USER_TIMEZONE_NOT_SET = "not-set"
class User(BaseModel):
@@ -99,7 +98,7 @@ class User(BaseModel):
# User timezone for scheduling and time display
timezone: str = Field(
default=USER_TIMEZONE_NOT_SET,
default="not-set",
description="User timezone (IANA timezone identifier or 'not-set')",
)
@@ -156,7 +155,7 @@ class User(BaseModel):
notify_on_daily_summary=prisma_user.notifyOnDailySummary or True,
notify_on_weekly_summary=prisma_user.notifyOnWeeklySummary or True,
notify_on_monthly_summary=prisma_user.notifyOnMonthlySummary or True,
timezone=prisma_user.timezone or USER_TIMEZONE_NOT_SET,
timezone=prisma_user.timezone or "not-set",
)
@@ -434,18 +433,6 @@ class OAuthState(BaseModel):
code_verifier: Optional[str] = None
"""Unix timestamp (seconds) indicating when this OAuth state expires"""
scopes: list[str]
# Fields for external API OAuth flows
callback_url: Optional[str] = None
"""External app's callback URL for OAuth redirect"""
state_metadata: dict[str, Any] = Field(default_factory=dict)
"""Metadata to echo back to external app on completion"""
initiated_by_api_key_id: Optional[str] = None
"""ID of the API key that initiated this OAuth flow"""
@property
def is_external(self) -> bool:
"""Whether this OAuth flow was initiated via external API."""
return self.callback_url is not None
class UserMetadata(BaseModel):

View File

@@ -28,15 +28,9 @@ from backend.data.graph import (
get_connected_output_nodes,
get_graph,
get_graph_metadata,
get_graph_settings,
get_node,
validate_graph_execution_permissions,
)
from backend.data.human_review import (
get_or_create_human_review,
has_pending_reviews_for_graph_exec,
update_review_processed_status,
)
from backend.data.notifications import (
clear_all_user_notification_batches,
create_or_add_to_user_notification_batch,
@@ -151,7 +145,6 @@ class DatabaseManager(AppService):
get_graph = _(get_graph)
get_connected_output_nodes = _(get_connected_output_nodes)
get_graph_metadata = _(get_graph_metadata)
get_graph_settings = _(get_graph_settings)
# Credits
spend_credits = _(_spend_credits, name="spend_credits")
@@ -168,11 +161,6 @@ class DatabaseManager(AppService):
get_user_email_verification = _(get_user_email_verification)
get_user_notification_preference = _(get_user_notification_preference)
# Human In The Loop
get_or_create_human_review = _(get_or_create_human_review)
has_pending_reviews_for_graph_exec = _(has_pending_reviews_for_graph_exec)
update_review_processed_status = _(update_review_processed_status)
# Notifications - async
clear_all_user_notification_batches = _(clear_all_user_notification_batches)
create_or_add_to_user_notification_batch = _(
@@ -227,9 +215,6 @@ class DatabaseManagerClient(AppServiceClient):
# Block error monitoring
get_block_error_stats = _(d.get_block_error_stats)
# Human In The Loop
has_pending_reviews_for_graph_exec = _(d.has_pending_reviews_for_graph_exec)
# User Emails
get_user_email_by_id = _(d.get_user_email_by_id)
@@ -256,7 +241,6 @@ class DatabaseManagerAsyncClient(AppServiceClient):
get_latest_node_execution = d.get_latest_node_execution
get_graph = d.get_graph
get_graph_metadata = d.get_graph_metadata
get_graph_settings = d.get_graph_settings
get_graph_execution_meta = d.get_graph_execution_meta
get_node = d.get_node
get_node_execution = d.get_node_execution
@@ -272,10 +256,6 @@ class DatabaseManagerAsyncClient(AppServiceClient):
get_execution_kv_data = d.get_execution_kv_data
set_execution_kv_data = d.set_execution_kv_data
# Human In The Loop
get_or_create_human_review = d.get_or_create_human_review
update_review_processed_status = d.update_review_processed_status
# User Comms
get_active_user_ids_in_timerange = d.get_active_user_ids_in_timerange
get_user_email_by_id = d.get_user_email_by_id

View File

@@ -29,7 +29,6 @@ from backend.data.block import (
from backend.data.credit import UsageTransactionMetadata
from backend.data.dynamic_fields import parse_execution_output
from backend.data.execution import (
ExecutionContext,
ExecutionQueue,
ExecutionStatus,
GraphExecution,
@@ -37,6 +36,7 @@ from backend.data.execution import (
NodeExecutionEntry,
NodeExecutionResult,
NodesInputMasks,
UserContext,
)
from backend.data.graph import Link, Node
from backend.data.model import GraphExecutionStats, NodeExecutionStats
@@ -164,11 +164,9 @@ async def execute_node(
user_id = data.user_id
graph_exec_id = data.graph_exec_id
graph_id = data.graph_id
graph_version = data.graph_version
node_exec_id = data.node_exec_id
node_id = data.node_id
node_block = node.block
execution_context = data.execution_context
log_metadata = LogMetadata(
logger=_logger,
@@ -206,65 +204,28 @@ async def execute_node(
# Inject extra execution arguments for the blocks via kwargs
extra_exec_kwargs: dict = {
"graph_id": graph_id,
"graph_version": graph_version,
"node_id": node_id,
"graph_exec_id": graph_exec_id,
"node_exec_id": node_exec_id,
"user_id": user_id,
"execution_context": execution_context,
}
# Add user context from NodeExecutionEntry
extra_exec_kwargs["user_context"] = data.user_context
# Last-minute fetch credentials + acquire a system-wide read-write lock to prevent
# changes during execution. ⚠️ This means a set of credentials can only be used by
# one (running) block at a time; simultaneous execution of blocks using same
# credentials is not supported.
creds_locks: list[AsyncRedisLock] = []
creds_lock = None
input_model = cast(type[BlockSchema], node_block.input_schema)
# Handle regular credentials fields
for field_name, input_type in input_model.get_credentials_fields().items():
credentials_meta = input_type(**input_data[field_name])
credentials, lock = await creds_manager.acquire(user_id, credentials_meta.id)
creds_locks.append(lock)
credentials, creds_lock = await creds_manager.acquire(
user_id, credentials_meta.id
)
extra_exec_kwargs[field_name] = credentials
# Handle auto-generated credentials (e.g., from GoogleDriveFileInput)
for kwarg_name, info in input_model.get_auto_credentials_fields().items():
field_name = info["field_name"]
field_data = input_data.get(field_name)
if field_data and isinstance(field_data, dict):
# Check if _credentials_id key exists in the field data
if "_credentials_id" in field_data:
cred_id = field_data["_credentials_id"]
if cred_id:
# Credential ID provided - acquire credentials
provider = info.get("config", {}).get(
"provider", "external service"
)
file_name = field_data.get("name", "selected file")
try:
credentials, lock = await creds_manager.acquire(
user_id, cred_id
)
creds_locks.append(lock)
extra_exec_kwargs[kwarg_name] = credentials
except ValueError:
# Credential was deleted or doesn't exist
raise ValueError(
f"Authentication expired for '{file_name}' in field '{field_name}'. "
f"The saved {provider.capitalize()} credentials no longer exist. "
f"Please re-select the file to re-authenticate."
)
# else: _credentials_id is explicitly None, skip credentials (for chained data)
else:
# _credentials_id key missing entirely - this is an error
provider = info.get("config", {}).get("provider", "external service")
file_name = field_data.get("name", "selected file")
raise ValueError(
f"Authentication missing for '{file_name}' in field '{field_name}'. "
f"Please re-select the file to authenticate with {provider.capitalize()}."
)
output_size = 0
# sentry tracking nonsense to get user counts for blocks because isolation scopes don't work :(
@@ -280,8 +241,8 @@ async def execute_node(
scope.set_tag("node_id", node_id)
scope.set_tag("block_name", node_block.name)
scope.set_tag("block_id", node_block.id)
for k, v in execution_context.model_dump().items():
scope.set_tag(f"execution_context.{k}", v)
for k, v in (data.user_context or UserContext(timezone="UTC")).model_dump().items():
scope.set_tag(f"user_context.{k}", v)
try:
async for output_name, output_data in node_block.execute(
@@ -298,17 +259,12 @@ async def execute_node(
# Re-raise to maintain normal error flow
raise
finally:
# Ensure all credentials are released even if execution fails
for creds_lock in creds_locks:
if (
creds_lock
and (await creds_lock.locked())
and (await creds_lock.owned())
):
try:
await creds_lock.release()
except Exception as e:
log_metadata.error(f"Failed to release credentials lock: {e}")
# Ensure credentials are released even if execution fails
if creds_lock and (await creds_lock.locked()) and (await creds_lock.owned()):
try:
await creds_lock.release()
except Exception as e:
log_metadata.error(f"Failed to release credentials lock: {e}")
# Update execution stats
if execution_stats is not None:
@@ -328,10 +284,9 @@ async def _enqueue_next_nodes(
user_id: str,
graph_exec_id: str,
graph_id: str,
graph_version: int,
log_metadata: LogMetadata,
nodes_input_masks: Optional[NodesInputMasks],
execution_context: ExecutionContext,
user_context: UserContext,
) -> list[NodeExecutionEntry]:
async def add_enqueued_execution(
node_exec_id: str, node_id: str, block_id: str, data: BlockInput
@@ -346,12 +301,11 @@ async def _enqueue_next_nodes(
user_id=user_id,
graph_exec_id=graph_exec_id,
graph_id=graph_id,
graph_version=graph_version,
node_exec_id=node_exec_id,
node_id=node_id,
block_id=block_id,
inputs=data,
execution_context=execution_context,
user_context=user_context,
)
async def register_next_executions(node_link: Link) -> list[NodeExecutionEntry]:
@@ -380,14 +334,17 @@ async def _enqueue_next_nodes(
# Or the same input to be consumed multiple times.
async with synchronized(f"upsert_input-{next_node_id}-{graph_exec_id}"):
# Add output data to the earliest incomplete execution, or create a new one.
next_node_exec, next_node_input = await db_client.upsert_execution_input(
next_node_exec_id, next_node_input = await db_client.upsert_execution_input(
node_id=next_node_id,
graph_exec_id=graph_exec_id,
input_name=next_input_name,
input_data=next_data,
)
next_node_exec_id = next_node_exec.node_exec_id
await send_async_execution_update(next_node_exec)
await async_update_node_execution_status(
db_client=db_client,
exec_id=next_node_exec_id,
status=ExecutionStatus.INCOMPLETE,
)
# Complete missing static input pins data using the last execution input.
static_link_names = {
@@ -703,16 +660,6 @@ class ExecutionProcessor:
log_metadata.info(
f"⚙️ Graph execution #{graph_exec.graph_exec_id} is already running, continuing where it left off."
)
elif exec_meta.status == ExecutionStatus.REVIEW:
exec_meta.status = ExecutionStatus.RUNNING
log_metadata.info(
f"⚙️ Graph execution #{graph_exec.graph_exec_id} was waiting for review, resuming execution."
)
update_graph_execution_state(
db_client=db_client,
graph_exec_id=graph_exec.graph_exec_id,
status=ExecutionStatus.RUNNING,
)
elif exec_meta.status == ExecutionStatus.FAILED:
exec_meta.status = ExecutionStatus.RUNNING
log_metadata.info(
@@ -750,21 +697,19 @@ class ExecutionProcessor:
raise status
exec_meta.status = status
if status in [ExecutionStatus.COMPLETED, ExecutionStatus.FAILED]:
activity_response = asyncio.run_coroutine_threadsafe(
generate_activity_status_for_execution(
graph_exec_id=graph_exec.graph_exec_id,
graph_id=graph_exec.graph_id,
graph_version=graph_exec.graph_version,
execution_stats=exec_stats,
db_client=get_db_async_client(),
user_id=graph_exec.user_id,
execution_status=status,
),
self.node_execution_loop,
).result(timeout=60.0)
else:
activity_response = None
# Activity status handling
activity_response = asyncio.run_coroutine_threadsafe(
generate_activity_status_for_execution(
graph_exec_id=graph_exec.graph_exec_id,
graph_id=graph_exec.graph_id,
graph_version=graph_exec.graph_version,
execution_stats=exec_stats,
db_client=get_db_async_client(),
user_id=graph_exec.user_id,
execution_status=status,
),
self.node_execution_loop,
).result(timeout=60.0)
if activity_response is not None:
exec_stats.activity_status = activity_response["activity_status"]
exec_stats.correctness_score = activity_response["correctness_score"]
@@ -900,18 +845,14 @@ class ExecutionProcessor:
ExecutionStatus.RUNNING,
ExecutionStatus.QUEUED,
ExecutionStatus.TERMINATED,
ExecutionStatus.REVIEW,
],
):
node_entry = node_exec.to_node_execution_entry(
graph_exec.execution_context
)
node_entry = node_exec.to_node_execution_entry(graph_exec.user_context)
execution_queue.add(node_entry)
# ------------------------------------------------------------
# Main dispatch / polling loop -----------------------------
# ------------------------------------------------------------
while not execution_queue.empty():
if cancel.is_set():
break
@@ -1065,12 +1006,7 @@ class ExecutionProcessor:
elif error is not None:
execution_status = ExecutionStatus.FAILED
else:
if db_client.has_pending_reviews_for_graph_exec(
graph_exec.graph_exec_id
):
execution_status = ExecutionStatus.REVIEW
else:
execution_status = ExecutionStatus.COMPLETED
execution_status = ExecutionStatus.COMPLETED
if error:
execution_stats.error = str(error) or type(error).__name__
@@ -1206,10 +1142,9 @@ class ExecutionProcessor:
user_id=graph_exec.user_id,
graph_exec_id=graph_exec.graph_exec_id,
graph_id=graph_exec.graph_id,
graph_version=graph_exec.graph_version,
log_metadata=log_metadata,
nodes_input_masks=nodes_input_masks,
execution_context=graph_exec.execution_context,
user_context=graph_exec.user_context,
):
execution_queue.add(next_execution)
@@ -1599,32 +1534,36 @@ class ExecutionManager(AppProcess):
graph_exec_id = graph_exec_entry.graph_exec_id
user_id = graph_exec_entry.user_id
graph_id = graph_exec_entry.graph_id
root_exec_id = graph_exec_entry.execution_context.root_execution_id
parent_exec_id = graph_exec_entry.execution_context.parent_execution_id
parent_graph_exec_id = graph_exec_entry.parent_graph_exec_id
logger.info(
f"[{self.service_name}] Received RUN for graph_exec_id={graph_exec_id}, user_id={user_id}, executor_id={self.executor_id}"
+ (f", root={root_exec_id}" if root_exec_id else "")
+ (f", parent={parent_exec_id}" if parent_exec_id else "")
+ (f", parent={parent_graph_exec_id}" if parent_graph_exec_id else "")
)
# Check if root execution is already terminated (prevents orphaned child executions)
if root_exec_id and root_exec_id != graph_exec_id:
parent_exec = get_db_client().get_graph_execution_meta(
execution_id=root_exec_id,
user_id=user_id,
)
if parent_exec and parent_exec.status == ExecutionStatus.TERMINATED:
logger.info(
f"[{self.service_name}] Skipping execution {graph_exec_id} - parent {root_exec_id} is TERMINATED"
# Check if parent execution is already terminated (prevents orphaned child executions)
if parent_graph_exec_id:
try:
parent_exec = get_db_client().get_graph_execution_meta(
execution_id=parent_graph_exec_id,
user_id=user_id,
)
# Mark this child as terminated since parent was stopped
get_db_client().update_graph_execution_stats(
graph_exec_id=graph_exec_id,
status=ExecutionStatus.TERMINATED,
if parent_exec and parent_exec.status == ExecutionStatus.TERMINATED:
logger.info(
f"[{self.service_name}] Skipping execution {graph_exec_id} - parent {parent_graph_exec_id} is TERMINATED"
)
# Mark this child as terminated since parent was stopped
get_db_client().update_graph_execution_stats(
graph_exec_id=graph_exec_id,
status=ExecutionStatus.TERMINATED,
)
_ack_message(reject=False, requeue=False)
return
except Exception as e:
logger.warning(
f"[{self.service_name}] Could not check parent status for {graph_exec_id}: {e}"
)
_ack_message(reject=False, requeue=False)
return
# Continue execution if parent check fails (don't block on errors)
# Check user rate limit before processing
try:

View File

@@ -10,7 +10,6 @@ from pydantic import BaseModel, JsonValue, ValidationError
from backend.data import execution as execution_db
from backend.data import graph as graph_db
from backend.data import user as user_db
from backend.data.block import (
Block,
BlockCostType,
@@ -25,17 +24,18 @@ from backend.data.db import prisma
# Import dynamic field utilities from centralized location
from backend.data.dynamic_fields import merge_execution_input
from backend.data.execution import (
ExecutionContext,
ExecutionStatus,
GraphExecutionMeta,
GraphExecutionStats,
GraphExecutionWithNodes,
NodesInputMasks,
get_graph_execution,
UserContext,
)
from backend.data.graph import GraphModel, Node
from backend.data.model import USER_TIMEZONE_NOT_SET, CredentialsMetaInput
from backend.data.model import CredentialsMetaInput
from backend.data.rabbitmq import Exchange, ExchangeType, Queue, RabbitMQConfig
from backend.data.user import get_user_by_id
from backend.util.cache import cached
from backend.util.clients import (
get_async_execution_event_bus,
get_async_execution_queue,
@@ -51,6 +51,32 @@ from backend.util.logging import TruncatedLogger, is_structured_logging_enabled
from backend.util.settings import Config
from backend.util.type import convert
@cached(maxsize=1000, ttl_seconds=3600)
async def get_user_context(user_id: str) -> UserContext:
"""
Get UserContext for a user, always returns a valid context with timezone.
Defaults to UTC if user has no timezone set.
"""
user_context = UserContext(timezone="UTC") # Default to UTC
try:
if prisma.is_connected():
user = await get_user_by_id(user_id)
else:
user = await get_database_manager_async_client().get_user_by_id(user_id)
if user and user.timezone and user.timezone != "not-set":
user_context.timezone = user.timezone
logger.debug(f"Retrieved user context: timezone={user.timezone}")
else:
logger.debug("User has no timezone set, using UTC")
except Exception as e:
logger.warning(f"Could not fetch user timezone: {e}")
# Continue with UTC as default
return user_context
config = Config()
logger = TruncatedLogger(logging.getLogger(__name__), prefix="[GraphExecutorUtil]")
@@ -468,6 +494,7 @@ async def validate_and_construct_node_execution_input(
graph_version: The version of the graph to use.
graph_credentials_inputs: Credentials inputs to use.
nodes_input_masks: Node inputs to use.
is_sub_graph: Whether this is a sub-graph execution.
Returns:
GraphModel: Full graph object for the given `graph_id`.
@@ -735,8 +762,8 @@ async def add_graph_execution(
graph_version: Optional[int] = None,
graph_credentials_inputs: Optional[Mapping[str, CredentialsMetaInput]] = None,
nodes_input_masks: Optional[NodesInputMasks] = None,
execution_context: Optional[ExecutionContext] = None,
graph_exec_id: Optional[str] = None,
parent_graph_exec_id: Optional[str] = None,
is_sub_graph: bool = False,
) -> GraphExecutionWithNodes:
"""
Adds a graph execution to the queue and returns the execution entry.
@@ -751,54 +778,33 @@ async def add_graph_execution(
Keys should map to the keys generated by `GraphModel.aggregate_credentials_inputs`.
nodes_input_masks: Node inputs to use in the execution.
parent_graph_exec_id: The ID of the parent graph execution (for nested executions).
graph_exec_id: If provided, resume this existing execution instead of creating a new one.
is_sub_graph: Whether this is a sub-graph execution.
Returns:
GraphExecutionEntry: The entry for the graph execution.
Raises:
ValueError: If the graph is not found or if there are validation errors.
NotFoundError: If graph_exec_id is provided but execution is not found.
"""
if prisma.is_connected():
edb = execution_db
udb = user_db
gdb = graph_db
else:
edb = udb = gdb = get_database_manager_async_client()
edb = get_database_manager_async_client()
# Get or create the graph execution
if graph_exec_id:
# Resume existing execution
graph_exec = await get_graph_execution(
graph, starting_nodes_input, compiled_nodes_input_masks = (
await validate_and_construct_node_execution_input(
graph_id=graph_id,
user_id=user_id,
execution_id=graph_exec_id,
include_node_executions=True,
)
if not graph_exec:
raise NotFoundError(f"Graph execution #{graph_exec_id} not found.")
# Use existing execution's compiled input masks
compiled_nodes_input_masks = graph_exec.nodes_input_masks or {}
logger.info(f"Resuming graph execution #{graph_exec.id} for graph #{graph_id}")
else:
parent_exec_id = (
execution_context.parent_execution_id if execution_context else None
)
# Create new execution
graph, starting_nodes_input, compiled_nodes_input_masks = (
await validate_and_construct_node_execution_input(
graph_id=graph_id,
user_id=user_id,
graph_inputs=inputs or {},
graph_version=graph_version,
graph_credentials_inputs=graph_credentials_inputs,
nodes_input_masks=nodes_input_masks,
is_sub_graph=parent_exec_id is not None,
)
graph_inputs=inputs or {},
graph_version=graph_version,
graph_credentials_inputs=graph_credentials_inputs,
nodes_input_masks=nodes_input_masks,
is_sub_graph=is_sub_graph,
)
)
graph_exec = None
try:
# Sanity check: running add_graph_execution with the properties of
# the graph_exec created here should create the same execution again.
graph_exec = await edb.create_graph_execution(
user_id=user_id,
graph_id=graph_id,
@@ -808,38 +814,20 @@ async def add_graph_execution(
nodes_input_masks=nodes_input_masks,
starting_nodes_input=starting_nodes_input,
preset_id=preset_id,
parent_graph_exec_id=parent_exec_id,
parent_graph_exec_id=parent_graph_exec_id,
)
graph_exec_entry = graph_exec.to_graph_execution_entry(
user_context=await get_user_context(user_id),
compiled_nodes_input_masks=compiled_nodes_input_masks,
parent_graph_exec_id=parent_graph_exec_id,
)
logger.info(
f"Created graph execution #{graph_exec.id} for graph "
f"#{graph_id} with {len(starting_nodes_input)} starting nodes"
f"#{graph_id} with {len(starting_nodes_input)} starting nodes. "
f"Now publishing to execution queue."
)
# Generate execution context if it's not provided
if execution_context is None:
user = await udb.get_user_by_id(user_id)
settings = await gdb.get_graph_settings(user_id=user_id, graph_id=graph_id)
execution_context = ExecutionContext(
safe_mode=(
settings.human_in_the_loop_safe_mode
if settings.human_in_the_loop_safe_mode is not None
else True
),
user_timezone=(
user.timezone if user.timezone != USER_TIMEZONE_NOT_SET else "UTC"
),
root_execution_id=graph_exec.id,
)
try:
graph_exec_entry = graph_exec.to_graph_execution_entry(
compiled_nodes_input_masks=compiled_nodes_input_masks,
execution_context=execution_context,
)
logger.info(f"Publishing execution {graph_exec.id} to execution queue")
exec_queue = await get_async_execution_queue()
await exec_queue.publish_message(
routing_key=GRAPH_EXECUTION_ROUTING_KEY,

View File

@@ -348,6 +348,9 @@ async def test_add_graph_execution_is_repeatable(mocker: MockerFixture):
mock_graph_exec.node_executions = [] # Add this to avoid AttributeError
mock_graph_exec.to_graph_execution_entry.return_value = mocker.MagicMock()
# Mock user context
mock_user_context = {"user_id": user_id, "context": "test_context"}
# Mock the queue and event bus
mock_queue = mocker.AsyncMock()
mock_event_bus = mocker.MagicMock()
@@ -359,8 +362,7 @@ async def test_add_graph_execution_is_repeatable(mocker: MockerFixture):
)
mock_edb = mocker.patch("backend.executor.utils.execution_db")
mock_prisma = mocker.patch("backend.executor.utils.prisma")
mock_udb = mocker.patch("backend.executor.utils.user_db")
mock_gdb = mocker.patch("backend.executor.utils.graph_db")
mock_get_user_context = mocker.patch("backend.executor.utils.get_user_context")
mock_get_queue = mocker.patch("backend.executor.utils.get_async_execution_queue")
mock_get_event_bus = mocker.patch(
"backend.executor.utils.get_async_execution_event_bus"
@@ -378,14 +380,7 @@ async def test_add_graph_execution_is_repeatable(mocker: MockerFixture):
return_value=mock_graph_exec
)
mock_edb.update_node_execution_status_batch = mocker.AsyncMock()
# Mock user and settings data
mock_user = mocker.MagicMock()
mock_user.timezone = "UTC"
mock_settings = mocker.MagicMock()
mock_settings.human_in_the_loop_safe_mode = True
mock_udb.get_user_by_id = mocker.AsyncMock(return_value=mock_user)
mock_gdb.get_graph_settings = mocker.AsyncMock(return_value=mock_settings)
mock_get_user_context.return_value = mock_user_context
mock_get_queue.return_value = mock_queue
mock_get_event_bus.return_value = mock_event_bus

View File

@@ -15,7 +15,6 @@ from backend.data.model import (
OAuth2Credentials,
OAuthState,
UserIntegrations,
UserPasswordCredentials,
)
from backend.data.redis_client import get_redis_async
from backend.util.settings import Settings
@@ -208,14 +207,6 @@ v0_credentials = APIKeyCredentials(
expires_at=None,
)
webshare_proxy_credentials = UserPasswordCredentials(
id="a5b3c7d9-2e4f-4a6b-8c1d-9e0f1a2b3c4d",
provider="webshare_proxy",
username=SecretStr(settings.secrets.webshare_proxy_username),
password=SecretStr(settings.secrets.webshare_proxy_password),
title="Use Credits for Webshare Proxy",
)
DEFAULT_CREDENTIALS = [
ollama_credentials,
revid_credentials,
@@ -242,7 +233,6 @@ DEFAULT_CREDENTIALS = [
google_maps_credentials,
llama_api_credentials,
v0_credentials,
webshare_proxy_credentials,
]
@@ -331,11 +321,6 @@ class IntegrationCredentialsStore:
all_credentials.append(zerobounce_credentials)
if settings.secrets.google_maps_api_key:
all_credentials.append(google_maps_credentials)
if (
settings.secrets.webshare_proxy_username
and settings.secrets.webshare_proxy_password
):
all_credentials.append(webshare_proxy_credentials)
return all_credentials
async def get_creds_by_id(
@@ -414,15 +399,7 @@ class IntegrationCredentialsStore:
# ===================== OAUTH STATES ===================== #
async def store_state_token(
self,
user_id: str,
provider: str,
scopes: list[str],
use_pkce: bool = False,
# New parameters for external API OAuth flows
callback_url: Optional[str] = None,
state_metadata: Optional[dict] = None,
initiated_by_api_key_id: Optional[str] = None,
self, user_id: str, provider: str, scopes: list[str], use_pkce: bool = False
) -> tuple[str, str]:
token = secrets.token_urlsafe(32)
expires_at = datetime.now(timezone.utc) + timedelta(minutes=10)
@@ -435,10 +412,6 @@ class IntegrationCredentialsStore:
code_verifier=code_verifier,
expires_at=int(expires_at.timestamp()),
scopes=scopes,
# External API OAuth flow fields
callback_url=callback_url,
state_metadata=state_metadata or {},
initiated_by_api_key_id=initiated_by_api_key_id,
)
async with self.edit_user_integrations(user_id) as user_integrations:

View File

@@ -1,156 +0,0 @@
"""
Embedding service for generating text embeddings using OpenAI.
Used for vector-based semantic search in the store.
"""
import logging
from typing import Optional
import openai
from backend.util.settings import Settings
logger = logging.getLogger(__name__)
# Model configuration
# Using text-embedding-3-small (1536 dimensions) for compatibility with pgvector indexes
# pgvector IVFFlat/HNSW indexes have dimension limits (2000 for IVFFlat, varies for HNSW)
EMBEDDING_MODEL = "text-embedding-3-small"
EMBEDDING_DIMENSIONS = 1536
# Input validation limits
# OpenAI text-embedding-3-large supports up to 8191 tokens (~32k chars)
# We set a conservative limit to prevent abuse
MAX_TEXT_LENGTH = 10000 # characters
MAX_BATCH_SIZE = 100 # maximum texts per batch request
class EmbeddingService:
"""Service for generating text embeddings using OpenAI."""
def __init__(self, api_key: Optional[str] = None):
settings = Settings()
self.api_key = (
api_key
or settings.secrets.openai_internal_api_key
or settings.secrets.openai_api_key
)
if not self.api_key:
raise ValueError(
"OpenAI API key not configured. "
"Set OPENAI_API_KEY or OPENAI_INTERNAL_API_KEY environment variable."
)
self.client = openai.AsyncOpenAI(api_key=self.api_key)
async def generate_embedding(self, text: str) -> list[float]:
"""
Generate embedding for a single text string.
Args:
text: The text to generate an embedding for.
Returns:
A list of floats representing the embedding vector.
Raises:
ValueError: If the text is empty or exceeds maximum length.
openai.APIError: If the OpenAI API call fails.
"""
# Input validation
if not text or not text.strip():
raise ValueError("Text cannot be empty")
if len(text) > MAX_TEXT_LENGTH:
raise ValueError(
f"Text exceeds maximum length of {MAX_TEXT_LENGTH} characters"
)
try:
response = await self.client.embeddings.create(
model=EMBEDDING_MODEL,
input=text,
dimensions=EMBEDDING_DIMENSIONS,
)
return response.data[0].embedding
except openai.APIError as e:
logger.error(f"OpenAI API error generating embedding: {e}")
raise
async def generate_embeddings(self, texts: list[str]) -> list[list[float]]:
"""
Generate embeddings for multiple texts (batch).
Args:
texts: List of texts to generate embeddings for.
Returns:
List of embedding vectors, one per input text.
Raises:
ValueError: If any text is invalid or batch size exceeds limit.
openai.APIError: If the OpenAI API call fails.
"""
# Input validation
if not texts:
raise ValueError("Texts list cannot be empty")
if len(texts) > MAX_BATCH_SIZE:
raise ValueError(f"Batch size exceeds maximum of {MAX_BATCH_SIZE} texts")
for i, text in enumerate(texts):
if not text or not text.strip():
raise ValueError(f"Text at index {i} cannot be empty")
if len(text) > MAX_TEXT_LENGTH:
raise ValueError(
f"Text at index {i} exceeds maximum length of {MAX_TEXT_LENGTH} characters"
)
try:
response = await self.client.embeddings.create(
model=EMBEDDING_MODEL,
input=texts,
dimensions=EMBEDDING_DIMENSIONS,
)
# Sort by index to ensure correct ordering
sorted_data = sorted(response.data, key=lambda x: x.index)
return [item.embedding for item in sorted_data]
except openai.APIError as e:
logger.error(f"OpenAI API error generating embeddings: {e}")
raise
def create_search_text(name: str, sub_heading: str, description: str) -> str:
"""
Combine fields into searchable text for embedding.
This creates a single text string from the agent's name, sub-heading,
and description, which is then converted to an embedding vector.
Args:
name: The agent name.
sub_heading: The agent sub-heading/tagline.
description: The agent description.
Returns:
A single string combining all non-empty fields.
"""
parts = [name or "", sub_heading or "", description or ""]
return " ".join(filter(None, parts)).strip()
# Singleton instance
_embedding_service: Optional[EmbeddingService] = None
async def get_embedding_service() -> EmbeddingService:
"""
Get or create the embedding service singleton.
Returns:
The shared EmbeddingService instance.
Raises:
ValueError: If OpenAI API key is not configured.
"""
global _embedding_service
if _embedding_service is None:
_embedding_service = EmbeddingService()
return _embedding_service

View File

@@ -1,231 +0,0 @@
"""Tests for the embedding service.
This module tests:
- create_search_text utility function
- EmbeddingService input validation
- EmbeddingService API interaction (mocked)
"""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from backend.integrations.embeddings import (
EMBEDDING_DIMENSIONS,
MAX_BATCH_SIZE,
MAX_TEXT_LENGTH,
EmbeddingService,
create_search_text,
)
class TestCreateSearchText:
"""Tests for the create_search_text utility function."""
def test_combines_all_fields(self):
result = create_search_text("Agent Name", "A cool agent", "Does amazing things")
assert result == "Agent Name A cool agent Does amazing things"
def test_handles_empty_name(self):
result = create_search_text("", "Sub heading", "Description")
assert result == "Sub heading Description"
def test_handles_empty_sub_heading(self):
result = create_search_text("Name", "", "Description")
assert result == "Name Description"
def test_handles_empty_description(self):
result = create_search_text("Name", "Sub heading", "")
assert result == "Name Sub heading"
def test_handles_all_empty(self):
result = create_search_text("", "", "")
assert result == ""
def test_handles_none_values(self):
# The function expects strings but should handle None gracefully
result = create_search_text(None, None, None) # type: ignore
assert result == ""
def test_preserves_content_strips_outer_whitespace(self):
# The function joins parts and strips the outer result
# Internal whitespace in each part is preserved
result = create_search_text(" Name ", " Sub ", " Desc ")
# Each part is joined with space, then outer strip applied
assert result == "Name Sub Desc"
def test_handles_only_whitespace(self):
# Parts that are only whitespace become empty after filter
result = create_search_text(" ", " ", " ")
assert result == ""
class TestEmbeddingServiceValidation:
"""Tests for EmbeddingService input validation."""
@pytest.fixture
def mock_settings(self):
"""Mock settings with a test API key."""
with patch("backend.integrations.embeddings.Settings") as mock:
mock_instance = MagicMock()
mock_instance.secrets.openai_internal_api_key = "test-api-key"
mock_instance.secrets.openai_api_key = ""
mock.return_value = mock_instance
yield mock
@pytest.fixture
def service(self, mock_settings):
"""Create an EmbeddingService instance with mocked settings."""
with patch("backend.integrations.embeddings.openai.AsyncOpenAI"):
return EmbeddingService()
def test_init_requires_api_key(self):
"""Test that initialization fails without an API key."""
with patch("backend.integrations.embeddings.Settings") as mock:
mock_instance = MagicMock()
mock_instance.secrets.openai_internal_api_key = ""
mock_instance.secrets.openai_api_key = ""
mock.return_value = mock_instance
with pytest.raises(ValueError, match="OpenAI API key not configured"):
EmbeddingService()
def test_init_accepts_explicit_api_key(self):
"""Test that explicit API key overrides settings."""
with patch("backend.integrations.embeddings.Settings") as mock:
mock_instance = MagicMock()
mock_instance.secrets.openai_internal_api_key = ""
mock_instance.secrets.openai_api_key = ""
mock.return_value = mock_instance
with patch("backend.integrations.embeddings.openai.AsyncOpenAI"):
service = EmbeddingService(api_key="explicit-key")
assert service.api_key == "explicit-key"
@pytest.mark.asyncio
async def test_generate_embedding_empty_text(self, service):
"""Test that empty text raises ValueError."""
with pytest.raises(ValueError, match="Text cannot be empty"):
await service.generate_embedding("")
@pytest.mark.asyncio
async def test_generate_embedding_whitespace_only(self, service):
"""Test that whitespace-only text raises ValueError."""
with pytest.raises(ValueError, match="Text cannot be empty"):
await service.generate_embedding(" ")
@pytest.mark.asyncio
async def test_generate_embedding_exceeds_max_length(self, service):
"""Test that text exceeding max length raises ValueError."""
long_text = "a" * (MAX_TEXT_LENGTH + 1)
with pytest.raises(ValueError, match="exceeds maximum length"):
await service.generate_embedding(long_text)
@pytest.mark.asyncio
async def test_generate_embeddings_empty_list(self, service):
"""Test that empty list raises ValueError."""
with pytest.raises(ValueError, match="Texts list cannot be empty"):
await service.generate_embeddings([])
@pytest.mark.asyncio
async def test_generate_embeddings_exceeds_batch_size(self, service):
"""Test that batch exceeding max size raises ValueError."""
texts = ["text"] * (MAX_BATCH_SIZE + 1)
with pytest.raises(ValueError, match="Batch size exceeds maximum"):
await service.generate_embeddings(texts)
@pytest.mark.asyncio
async def test_generate_embeddings_empty_text_in_batch(self, service):
"""Test that empty text in batch raises ValueError with index."""
with pytest.raises(ValueError, match="Text at index 1 cannot be empty"):
await service.generate_embeddings(["valid", "", "also valid"])
@pytest.mark.asyncio
async def test_generate_embeddings_long_text_in_batch(self, service):
"""Test that long text in batch raises ValueError with index."""
long_text = "a" * (MAX_TEXT_LENGTH + 1)
with pytest.raises(ValueError, match="Text at index 2 exceeds maximum length"):
await service.generate_embeddings(["short", "also short", long_text])
class TestEmbeddingServiceAPI:
"""Tests for EmbeddingService API interaction."""
@pytest.fixture
def mock_openai_client(self):
"""Create a mock OpenAI client."""
mock_client = MagicMock()
mock_client.embeddings = MagicMock()
return mock_client
@pytest.fixture
def service_with_mock_client(self, mock_openai_client):
"""Create an EmbeddingService with a mocked OpenAI client."""
with patch("backend.integrations.embeddings.Settings") as mock_settings:
mock_instance = MagicMock()
mock_instance.secrets.openai_internal_api_key = "test-key"
mock_instance.secrets.openai_api_key = ""
mock_settings.return_value = mock_instance
with patch(
"backend.integrations.embeddings.openai.AsyncOpenAI"
) as mock_openai:
mock_openai.return_value = mock_openai_client
service = EmbeddingService()
return service, mock_openai_client
@pytest.mark.asyncio
async def test_generate_embedding_success(self, service_with_mock_client):
"""Test successful embedding generation."""
service, mock_client = service_with_mock_client
# Create mock response
mock_embedding = [0.1] * EMBEDDING_DIMENSIONS
mock_response = MagicMock()
mock_response.data = [MagicMock(embedding=mock_embedding)]
mock_client.embeddings.create = AsyncMock(return_value=mock_response)
result = await service.generate_embedding("test text")
assert result == mock_embedding
mock_client.embeddings.create.assert_called_once()
@pytest.mark.asyncio
async def test_generate_embeddings_success(self, service_with_mock_client):
"""Test successful batch embedding generation."""
service, mock_client = service_with_mock_client
# Create mock response with multiple embeddings
mock_embeddings = [[0.1] * EMBEDDING_DIMENSIONS, [0.2] * EMBEDDING_DIMENSIONS]
mock_response = MagicMock()
mock_response.data = [
MagicMock(embedding=mock_embeddings[0], index=0),
MagicMock(embedding=mock_embeddings[1], index=1),
]
mock_client.embeddings.create = AsyncMock(return_value=mock_response)
result = await service.generate_embeddings(["text1", "text2"])
assert result == mock_embeddings
mock_client.embeddings.create.assert_called_once()
@pytest.mark.asyncio
async def test_generate_embeddings_preserves_order(self, service_with_mock_client):
"""Test that batch embeddings are returned in correct order even if API returns out of order."""
service, mock_client = service_with_mock_client
# Create mock response with embeddings out of order
mock_embeddings = [[0.1] * EMBEDDING_DIMENSIONS, [0.2] * EMBEDDING_DIMENSIONS]
mock_response = MagicMock()
# Return in reverse order
mock_response.data = [
MagicMock(embedding=mock_embeddings[1], index=1),
MagicMock(embedding=mock_embeddings[0], index=0),
]
mock_client.embeddings.create = AsyncMock(return_value=mock_response)
result = await service.generate_embeddings(["text1", "text2"])
# Should be sorted by index
assert result[0] == mock_embeddings[0]
assert result[1] == mock_embeddings[1]

View File

@@ -49,7 +49,6 @@ class ProviderName(str, Enum):
TODOIST = "todoist"
UNREAL_SPEECH = "unreal_speech"
V0 = "v0"
WEBSHARE_PROXY = "webshare_proxy"
ZEROBOUNCE = "zerobounce"
@classmethod

View File

@@ -143,9 +143,6 @@ def instrument_fastapi(
)
# Create instrumentator with default metrics
# Use service-specific inprogress_name to avoid duplicate registration
# when multiple FastAPI apps are instrumented in the same process
service_subsystem = service_name.replace("-", "_")
instrumentator = Instrumentator(
should_group_status_codes=True,
should_ignore_untemplated=True,
@@ -153,7 +150,7 @@ def instrument_fastapi(
should_instrument_requests_inprogress=True,
excluded_handlers=excluded_handlers or ["/health", "/readiness"],
env_var_name="ENABLE_METRICS",
inprogress_name=f"autogpt_{service_subsystem}_http_requests_inprogress",
inprogress_name="autogpt_http_requests_inprogress",
inprogress_labels=True,
)

View File

@@ -3,8 +3,6 @@ from fastapi import FastAPI
from backend.monitoring.instrumentation import instrument_fastapi
from backend.server.middleware.security import SecurityHeadersMiddleware
from .routes.integrations import integrations_router
from .routes.tools import tools_router
from .routes.v1 import v1_router
external_app = FastAPI(
@@ -16,8 +14,6 @@ external_app = FastAPI(
external_app.add_middleware(SecurityHeadersMiddleware)
external_app.include_router(v1_router, prefix="/v1")
external_app.include_router(tools_router, prefix="/v1")
external_app.include_router(integrations_router, prefix="/v1")
# Add Prometheus instrumentation
instrument_fastapi(

View File

@@ -1,650 +0,0 @@
"""
External API endpoints for integrations and credentials.
This module provides endpoints for external applications (like Autopilot) to:
- Initiate OAuth flows with custom callback URLs
- Complete OAuth flows by exchanging authorization codes
- Create API key, user/password, and host-scoped credentials
- List and manage user credentials
"""
import logging
from typing import TYPE_CHECKING, Annotated, Any, Literal, Optional, Union
from urllib.parse import urlparse
from fastapi import APIRouter, Body, HTTPException, Path, Security, status
from prisma.enums import APIKeyPermission
from pydantic import BaseModel, Field, SecretStr
from backend.data.api_key import APIKeyInfo
from backend.data.model import (
APIKeyCredentials,
Credentials,
CredentialsType,
HostScopedCredentials,
OAuth2Credentials,
UserPasswordCredentials,
)
from backend.integrations.creds_manager import IntegrationCredentialsManager
from backend.integrations.oauth import CREDENTIALS_BY_PROVIDER, HANDLERS_BY_NAME
from backend.integrations.providers import ProviderName
from backend.server.external.middleware import require_permission
from backend.server.integrations.models import get_all_provider_names
from backend.util.settings import Settings
if TYPE_CHECKING:
from backend.integrations.oauth import BaseOAuthHandler
logger = logging.getLogger(__name__)
settings = Settings()
creds_manager = IntegrationCredentialsManager()
integrations_router = APIRouter(prefix="/integrations", tags=["integrations"])
# ==================== Request/Response Models ==================== #
class OAuthInitiateRequest(BaseModel):
"""Request model for initiating an OAuth flow."""
callback_url: str = Field(
..., description="The external app's callback URL for OAuth redirect"
)
scopes: list[str] = Field(
default_factory=list, description="OAuth scopes to request"
)
state_metadata: dict[str, Any] = Field(
default_factory=dict,
description="Arbitrary metadata to echo back on completion",
)
class OAuthInitiateResponse(BaseModel):
"""Response model for OAuth initiation."""
login_url: str = Field(..., description="URL to redirect user for OAuth consent")
state_token: str = Field(..., description="State token for CSRF protection")
expires_at: int = Field(
..., description="Unix timestamp when the state token expires"
)
class OAuthCompleteRequest(BaseModel):
"""Request model for completing an OAuth flow."""
code: str = Field(..., description="Authorization code from OAuth provider")
state_token: str = Field(..., description="State token from initiate request")
class OAuthCompleteResponse(BaseModel):
"""Response model for OAuth completion."""
credentials_id: str = Field(..., description="ID of the stored credentials")
provider: str = Field(..., description="Provider name")
type: str = Field(..., description="Credential type (oauth2)")
title: Optional[str] = Field(None, description="Credential title")
scopes: list[str] = Field(default_factory=list, description="Granted scopes")
username: Optional[str] = Field(None, description="Username from provider")
state_metadata: dict[str, Any] = Field(
default_factory=dict, description="Echoed metadata from initiate request"
)
class CredentialSummary(BaseModel):
"""Summary of a credential without sensitive data."""
id: str
provider: str
type: CredentialsType
title: Optional[str] = None
scopes: Optional[list[str]] = None
username: Optional[str] = None
host: Optional[str] = None
class ProviderInfo(BaseModel):
"""Information about an integration provider."""
name: str
supports_oauth: bool = False
supports_api_key: bool = False
supports_user_password: bool = False
supports_host_scoped: bool = False
default_scopes: list[str] = Field(default_factory=list)
# ==================== Credential Creation Models ==================== #
class CreateAPIKeyCredentialRequest(BaseModel):
"""Request model for creating API key credentials."""
type: Literal["api_key"] = "api_key"
api_key: str = Field(..., description="The API key")
title: str = Field(..., description="A name for this credential")
expires_at: Optional[int] = Field(
None, description="Unix timestamp when the API key expires"
)
class CreateUserPasswordCredentialRequest(BaseModel):
"""Request model for creating username/password credentials."""
type: Literal["user_password"] = "user_password"
username: str = Field(..., description="Username")
password: str = Field(..., description="Password")
title: str = Field(..., description="A name for this credential")
class CreateHostScopedCredentialRequest(BaseModel):
"""Request model for creating host-scoped credentials."""
type: Literal["host_scoped"] = "host_scoped"
host: str = Field(..., description="Host/domain pattern to match")
headers: dict[str, str] = Field(..., description="Headers to include in requests")
title: str = Field(..., description="A name for this credential")
# Union type for credential creation
CreateCredentialRequest = Annotated[
CreateAPIKeyCredentialRequest
| CreateUserPasswordCredentialRequest
| CreateHostScopedCredentialRequest,
Field(discriminator="type"),
]
class CreateCredentialResponse(BaseModel):
"""Response model for credential creation."""
id: str
provider: str
type: CredentialsType
title: Optional[str] = None
# ==================== Helper Functions ==================== #
def validate_callback_url(callback_url: str) -> bool:
"""Validate that the callback URL is from an allowed origin."""
allowed_origins = settings.config.external_oauth_callback_origins
try:
parsed = urlparse(callback_url)
callback_origin = f"{parsed.scheme}://{parsed.netloc}"
for allowed in allowed_origins:
# Simple origin matching
if callback_origin == allowed:
return True
# Allow localhost with any port in development (proper hostname check)
if parsed.hostname == "localhost":
for allowed in allowed_origins:
allowed_parsed = urlparse(allowed)
if allowed_parsed.hostname == "localhost":
return True
return False
except Exception:
return False
def _get_oauth_handler_for_external(
provider_name: str, redirect_uri: str
) -> "BaseOAuthHandler":
"""Get an OAuth handler configured with an external redirect URI."""
# Ensure blocks are loaded so SDK providers are available
try:
from backend.blocks import load_all_blocks
load_all_blocks()
except Exception as e:
logger.warning(f"Failed to load blocks: {e}")
if provider_name not in HANDLERS_BY_NAME:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Provider '{provider_name}' does not support OAuth",
)
# Check if this provider has custom OAuth credentials
oauth_credentials = CREDENTIALS_BY_PROVIDER.get(provider_name)
if oauth_credentials and not oauth_credentials.use_secrets:
import os
client_id = (
os.getenv(oauth_credentials.client_id_env_var)
if oauth_credentials.client_id_env_var
else None
)
client_secret = (
os.getenv(oauth_credentials.client_secret_env_var)
if oauth_credentials.client_secret_env_var
else None
)
else:
client_id = getattr(settings.secrets, f"{provider_name}_client_id", None)
client_secret = getattr(
settings.secrets, f"{provider_name}_client_secret", None
)
if not (client_id and client_secret):
logger.error(f"Attempt to use unconfigured {provider_name} OAuth integration")
raise HTTPException(
status_code=status.HTTP_501_NOT_IMPLEMENTED,
detail={
"message": f"Integration with provider '{provider_name}' is not configured.",
"hint": "Set client ID and secret in the application's deployment environment",
},
)
handler_class = HANDLERS_BY_NAME[provider_name]
return handler_class(
client_id=client_id,
client_secret=client_secret,
redirect_uri=redirect_uri,
)
# ==================== Endpoints ==================== #
@integrations_router.get("/providers", response_model=list[ProviderInfo])
async def list_providers(
api_key: APIKeyInfo = Security(
require_permission(APIKeyPermission.READ_INTEGRATIONS)
),
) -> list[ProviderInfo]:
"""
List all available integration providers.
Returns a list of all providers with their supported credential types.
Most providers support API key credentials, and some also support OAuth.
"""
# Ensure blocks are loaded
try:
from backend.blocks import load_all_blocks
load_all_blocks()
except Exception as e:
logger.warning(f"Failed to load blocks: {e}")
from backend.sdk.registry import AutoRegistry
providers = []
for name in get_all_provider_names():
supports_oauth = name in HANDLERS_BY_NAME
handler_class = HANDLERS_BY_NAME.get(name)
default_scopes = (
getattr(handler_class, "DEFAULT_SCOPES", []) if handler_class else []
)
# Check if provider has specific auth types from SDK registration
sdk_provider = AutoRegistry.get_provider(name)
if sdk_provider and sdk_provider.supported_auth_types:
supports_api_key = "api_key" in sdk_provider.supported_auth_types
supports_user_password = (
"user_password" in sdk_provider.supported_auth_types
)
supports_host_scoped = "host_scoped" in sdk_provider.supported_auth_types
else:
# Fallback for legacy providers
supports_api_key = True # All providers can accept API keys
supports_user_password = name in ("smtp",)
supports_host_scoped = name == "http"
providers.append(
ProviderInfo(
name=name,
supports_oauth=supports_oauth,
supports_api_key=supports_api_key,
supports_user_password=supports_user_password,
supports_host_scoped=supports_host_scoped,
default_scopes=default_scopes,
)
)
return providers
@integrations_router.post(
"/{provider}/oauth/initiate",
response_model=OAuthInitiateResponse,
summary="Initiate OAuth flow",
)
async def initiate_oauth(
provider: Annotated[str, Path(title="The OAuth provider")],
request: OAuthInitiateRequest,
api_key: APIKeyInfo = Security(
require_permission(APIKeyPermission.MANAGE_INTEGRATIONS)
),
) -> OAuthInitiateResponse:
"""
Initiate an OAuth flow for an external application.
This endpoint allows external apps to start an OAuth flow with a custom
callback URL. The callback URL must be from an allowed origin configured
in the platform settings.
Returns a login URL to redirect the user to, along with a state token
for CSRF protection.
"""
# Validate callback URL
if not validate_callback_url(request.callback_url):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"Callback URL origin is not allowed. Allowed origins: {settings.config.external_oauth_callback_origins}",
)
# Validate provider
try:
provider_name = ProviderName(provider)
except ValueError:
# Check if it's a dynamically registered provider
if provider not in HANDLERS_BY_NAME:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Provider '{provider}' not found",
)
provider_name = provider
# Get OAuth handler with external callback URL
handler = _get_oauth_handler_for_external(
provider if isinstance(provider_name, str) else provider_name.value,
request.callback_url,
)
# Store state token with external flow metadata
state_token, code_challenge = await creds_manager.store.store_state_token(
user_id=api_key.user_id,
provider=provider if isinstance(provider_name, str) else provider_name.value,
scopes=request.scopes,
callback_url=request.callback_url,
state_metadata=request.state_metadata,
initiated_by_api_key_id=api_key.id,
)
# Build login URL
login_url = handler.get_login_url(
request.scopes, state_token, code_challenge=code_challenge
)
# Calculate expiration (10 minutes from now)
from datetime import datetime, timedelta, timezone
expires_at = int((datetime.now(timezone.utc) + timedelta(minutes=10)).timestamp())
return OAuthInitiateResponse(
login_url=login_url,
state_token=state_token,
expires_at=expires_at,
)
@integrations_router.post(
"/{provider}/oauth/complete",
response_model=OAuthCompleteResponse,
summary="Complete OAuth flow",
)
async def complete_oauth(
provider: Annotated[str, Path(title="The OAuth provider")],
request: OAuthCompleteRequest,
api_key: APIKeyInfo = Security(
require_permission(APIKeyPermission.MANAGE_INTEGRATIONS)
),
) -> OAuthCompleteResponse:
"""
Complete an OAuth flow by exchanging the authorization code for tokens.
This endpoint should be called after the user has authorized the application
and been redirected back to the external app's callback URL with an
authorization code.
"""
# Verify state token
valid_state = await creds_manager.store.verify_state_token(
api_key.user_id, request.state_token, provider
)
if not valid_state:
logger.warning(f"Invalid or expired state token for provider {provider}")
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Invalid or expired state token",
)
# Verify this is an external flow (callback_url must be set)
if not valid_state.callback_url:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="State token was not created for external OAuth flow",
)
# Get OAuth handler with the original callback URL
handler = _get_oauth_handler_for_external(provider, valid_state.callback_url)
try:
scopes = valid_state.scopes
scopes = handler.handle_default_scopes(scopes)
credentials = await handler.exchange_code_for_tokens(
request.code, scopes, valid_state.code_verifier
)
# Handle Linear's space-separated scopes
if len(credentials.scopes) == 1 and " " in credentials.scopes[0]:
credentials.scopes = credentials.scopes[0].split(" ")
# Check scope mismatch
if not set(scopes).issubset(set(credentials.scopes)):
logger.warning(
f"Granted scopes {credentials.scopes} for provider {provider} "
f"do not include all requested scopes {scopes}"
)
except Exception as e:
logger.error(f"OAuth2 Code->Token exchange failed for provider {provider}: {e}")
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"OAuth2 callback failed to exchange code for tokens: {str(e)}",
)
# Store credentials
await creds_manager.create(api_key.user_id, credentials)
logger.info(f"Successfully completed external OAuth for provider {provider}")
return OAuthCompleteResponse(
credentials_id=credentials.id,
provider=credentials.provider,
type=credentials.type,
title=credentials.title,
scopes=credentials.scopes,
username=credentials.username,
state_metadata=valid_state.state_metadata,
)
@integrations_router.get("/credentials", response_model=list[CredentialSummary])
async def list_credentials(
api_key: APIKeyInfo = Security(
require_permission(APIKeyPermission.READ_INTEGRATIONS)
),
) -> list[CredentialSummary]:
"""
List all credentials for the authenticated user.
Returns metadata about each credential without exposing sensitive tokens.
"""
credentials = await creds_manager.store.get_all_creds(api_key.user_id)
return [
CredentialSummary(
id=cred.id,
provider=cred.provider,
type=cred.type,
title=cred.title,
scopes=cred.scopes if isinstance(cred, OAuth2Credentials) else None,
username=cred.username if isinstance(cred, OAuth2Credentials) else None,
host=cred.host if isinstance(cred, HostScopedCredentials) else None,
)
for cred in credentials
]
@integrations_router.get(
"/{provider}/credentials", response_model=list[CredentialSummary]
)
async def list_credentials_by_provider(
provider: Annotated[str, Path(title="The provider to list credentials for")],
api_key: APIKeyInfo = Security(
require_permission(APIKeyPermission.READ_INTEGRATIONS)
),
) -> list[CredentialSummary]:
"""
List credentials for a specific provider.
"""
credentials = await creds_manager.store.get_creds_by_provider(
api_key.user_id, provider
)
return [
CredentialSummary(
id=cred.id,
provider=cred.provider,
type=cred.type,
title=cred.title,
scopes=cred.scopes if isinstance(cred, OAuth2Credentials) else None,
username=cred.username if isinstance(cred, OAuth2Credentials) else None,
host=cred.host if isinstance(cred, HostScopedCredentials) else None,
)
for cred in credentials
]
@integrations_router.post(
"/{provider}/credentials",
response_model=CreateCredentialResponse,
status_code=status.HTTP_201_CREATED,
summary="Create credentials",
)
async def create_credential(
provider: Annotated[str, Path(title="The provider to create credentials for")],
request: Union[
CreateAPIKeyCredentialRequest,
CreateUserPasswordCredentialRequest,
CreateHostScopedCredentialRequest,
] = Body(..., discriminator="type"),
api_key: APIKeyInfo = Security(
require_permission(APIKeyPermission.MANAGE_INTEGRATIONS)
),
) -> CreateCredentialResponse:
"""
Create non-OAuth credentials for a provider.
Supports creating:
- API key credentials (type: "api_key")
- Username/password credentials (type: "user_password")
- Host-scoped credentials (type: "host_scoped")
For OAuth credentials, use the OAuth initiate/complete flow instead.
"""
# Validate provider exists
all_providers = get_all_provider_names()
if provider not in all_providers:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Provider '{provider}' not found",
)
# Create the appropriate credential type
credentials: Credentials
if request.type == "api_key":
credentials = APIKeyCredentials(
provider=provider,
api_key=SecretStr(request.api_key),
title=request.title,
expires_at=request.expires_at,
)
elif request.type == "user_password":
credentials = UserPasswordCredentials(
provider=provider,
username=SecretStr(request.username),
password=SecretStr(request.password),
title=request.title,
)
elif request.type == "host_scoped":
# Convert string headers to SecretStr
secret_headers = {k: SecretStr(v) for k, v in request.headers.items()}
credentials = HostScopedCredentials(
provider=provider,
host=request.host,
headers=secret_headers,
title=request.title,
)
else:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"Unsupported credential type: {request.type}",
)
# Store credentials
try:
await creds_manager.create(api_key.user_id, credentials)
except Exception as e:
logger.error(f"Failed to store credentials: {e}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"Failed to store credentials: {str(e)}",
)
logger.info(f"Created {request.type} credentials for provider {provider}")
return CreateCredentialResponse(
id=credentials.id,
provider=provider,
type=credentials.type,
title=credentials.title,
)
class DeleteCredentialResponse(BaseModel):
"""Response model for deleting a credential."""
deleted: bool = Field(..., description="Whether the credential was deleted")
credentials_id: str = Field(..., description="ID of the deleted credential")
@integrations_router.delete(
"/{provider}/credentials/{cred_id}",
response_model=DeleteCredentialResponse,
)
async def delete_credential(
provider: Annotated[str, Path(title="The provider")],
cred_id: Annotated[str, Path(title="The credential ID to delete")],
api_key: APIKeyInfo = Security(
require_permission(APIKeyPermission.DELETE_INTEGRATIONS)
),
) -> DeleteCredentialResponse:
"""
Delete a credential.
Note: This does not revoke the tokens with the provider. For full cleanup,
use the main API's delete endpoint which handles webhook cleanup and
token revocation.
"""
creds = await creds_manager.store.get_creds_by_id(api_key.user_id, cred_id)
if not creds:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND, detail="Credentials not found"
)
if creds.provider != provider:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Credentials do not match the specified provider",
)
await creds_manager.delete(api_key.user_id, cred_id)
return DeleteCredentialResponse(deleted=True, credentials_id=cred_id)

View File

@@ -1,148 +0,0 @@
"""External API routes for chat tools - stateless HTTP endpoints.
Note: These endpoints use ephemeral sessions that are not persisted to Redis.
As a result, session-based rate limiting (max_agent_runs, max_agent_schedules)
is not enforced for external API calls. Each request creates a fresh session
with zeroed counters. Rate limiting for external API consumers should be
handled separately (e.g., via API key quotas).
"""
import logging
from typing import Any
from fastapi import APIRouter, Security
from prisma.enums import APIKeyPermission
from pydantic import BaseModel, Field
from backend.data.api_key import APIKeyInfo
from backend.server.external.middleware import require_permission
from backend.server.v2.chat.model import ChatSession
from backend.server.v2.chat.tools import find_agent_tool, run_agent_tool
from backend.server.v2.chat.tools.models import ToolResponseBase
logger = logging.getLogger(__name__)
tools_router = APIRouter(prefix="/tools", tags=["tools"])
# Note: We use Security() as a function parameter dependency (api_key: APIKeyInfo = Security(...))
# rather than in the decorator's dependencies= list. This avoids duplicate permission checks
# while still enforcing auth AND giving us access to the api_key for extracting user_id.
# Request models
class FindAgentRequest(BaseModel):
query: str = Field(..., description="Search query for finding agents")
class RunAgentRequest(BaseModel):
"""Request to run or schedule an agent.
The tool automatically handles the setup flow:
- First call returns available inputs so user can decide what values to use
- Returns missing credentials if user needs to configure them
- Executes when inputs are provided OR use_defaults=true
- Schedules execution if schedule_name and cron are provided
"""
username_agent_slug: str = Field(
...,
description="The marketplace agent slug (e.g., 'username/agent-name')",
)
inputs: dict[str, Any] = Field(
default_factory=dict,
description="Dictionary of input values for the agent",
)
use_defaults: bool = Field(
default=False,
description="Set to true to run with default values (user must confirm)",
)
schedule_name: str | None = Field(
None,
description="Name for scheduled execution (triggers scheduling mode)",
)
cron: str | None = Field(
None,
description="Cron expression (5 fields: minute hour day month weekday)",
)
timezone: str = Field(
default="UTC",
description="IANA timezone (e.g., 'America/New_York', 'UTC')",
)
def _create_ephemeral_session(user_id: str | None) -> ChatSession:
"""Create an ephemeral session for stateless API requests."""
return ChatSession.new(user_id)
@tools_router.post(
path="/find-agent",
)
async def find_agent(
request: FindAgentRequest,
api_key: APIKeyInfo = Security(require_permission(APIKeyPermission.USE_TOOLS)),
) -> dict[str, Any]:
"""
Search for agents in the marketplace based on capabilities and user needs.
Args:
request: Search query for finding agents
Returns:
List of matching agents or no results response
"""
session = _create_ephemeral_session(api_key.user_id)
result = await find_agent_tool._execute(
user_id=api_key.user_id,
session=session,
query=request.query,
)
return _response_to_dict(result)
@tools_router.post(
path="/run-agent",
)
async def run_agent(
request: RunAgentRequest,
api_key: APIKeyInfo = Security(require_permission(APIKeyPermission.USE_TOOLS)),
) -> dict[str, Any]:
"""
Run or schedule an agent from the marketplace.
The endpoint automatically handles the setup flow:
- Returns missing inputs if required fields are not provided
- Returns missing credentials if user needs to configure them
- Executes immediately if all requirements are met
- Schedules execution if schedule_name and cron are provided
For scheduled execution:
- Cron format: "minute hour day month weekday"
- Examples: "0 9 * * 1-5" (9am weekdays), "0 0 * * *" (daily at midnight)
- Timezone: Use IANA timezone names like "America/New_York"
Args:
request: Agent slug, inputs, and optional schedule config
Returns:
- setup_requirements: If inputs or credentials are missing
- execution_started: If agent was run or scheduled successfully
- error: If something went wrong
"""
session = _create_ephemeral_session(api_key.user_id)
result = await run_agent_tool._execute(
user_id=api_key.user_id,
session=session,
username_agent_slug=request.username_agent_slug,
inputs=request.inputs,
use_defaults=request.use_defaults,
schedule_name=request.schedule_name or "",
cron=request.cron or "",
timezone=request.timezone,
)
return _response_to_dict(result)
def _response_to_dict(result: ToolResponseBase) -> dict[str, Any]:
"""Convert a tool response to a dictionary for JSON serialization."""
return result.model_dump()

View File

@@ -1,15 +1,12 @@
import logging
import urllib.parse
from collections import defaultdict
from typing import Annotated, Any, Literal, Optional, Sequence
from typing import Annotated, Any, Optional, Sequence
from fastapi import APIRouter, Body, HTTPException, Security
from prisma.enums import AgentExecutionStatus, APIKeyPermission
from typing_extensions import TypedDict
import backend.data.block
import backend.server.v2.store.cache as store_cache
import backend.server.v2.store.model as store_model
from backend.data import execution as execution_db
from backend.data import graph as graph_db
from backend.data.api_key import APIKeyInfo
@@ -147,149 +144,3 @@ async def get_graph_execution_results(
else None
),
)
##############################################
############### Store Endpoints ##############
##############################################
@v1_router.get(
path="/store/agents",
tags=["store"],
dependencies=[Security(require_permission(APIKeyPermission.READ_STORE))],
response_model=store_model.StoreAgentsResponse,
)
async def get_store_agents(
featured: bool = False,
creator: str | None = None,
sorted_by: Literal["rating", "runs", "name", "updated_at"] | None = None,
search_query: str | None = None,
category: str | None = None,
page: int = 1,
page_size: int = 20,
) -> store_model.StoreAgentsResponse:
"""
Get a paginated list of agents from the store with optional filtering and sorting.
Args:
featured: Filter to only show featured agents
creator: Filter agents by creator username
sorted_by: Sort agents by "runs", "rating", "name", or "updated_at"
search_query: Search agents by name, subheading and description
category: Filter agents by category
page: Page number for pagination (default 1)
page_size: Number of agents per page (default 20)
Returns:
StoreAgentsResponse: Paginated list of agents matching the filters
"""
if page < 1:
raise HTTPException(status_code=422, detail="Page must be greater than 0")
if page_size < 1:
raise HTTPException(status_code=422, detail="Page size must be greater than 0")
agents = await store_cache._get_cached_store_agents(
featured=featured,
creator=creator,
sorted_by=sorted_by,
search_query=search_query,
category=category,
page=page,
page_size=page_size,
)
return agents
@v1_router.get(
path="/store/agents/{username}/{agent_name}",
tags=["store"],
dependencies=[Security(require_permission(APIKeyPermission.READ_STORE))],
response_model=store_model.StoreAgentDetails,
)
async def get_store_agent(
username: str,
agent_name: str,
) -> store_model.StoreAgentDetails:
"""
Get details of a specific store agent by username and agent name.
Args:
username: Creator's username
agent_name: Name/slug of the agent
Returns:
StoreAgentDetails: Detailed information about the agent
"""
username = urllib.parse.unquote(username).lower()
agent_name = urllib.parse.unquote(agent_name).lower()
agent = await store_cache._get_cached_agent_details(
username=username, agent_name=agent_name
)
return agent
@v1_router.get(
path="/store/creators",
tags=["store"],
dependencies=[Security(require_permission(APIKeyPermission.READ_STORE))],
response_model=store_model.CreatorsResponse,
)
async def get_store_creators(
featured: bool = False,
search_query: str | None = None,
sorted_by: Literal["agent_rating", "agent_runs", "num_agents"] | None = None,
page: int = 1,
page_size: int = 20,
) -> store_model.CreatorsResponse:
"""
Get a paginated list of store creators with optional filtering and sorting.
Args:
featured: Filter to only show featured creators
search_query: Search creators by profile description
sorted_by: Sort by "agent_rating", "agent_runs", or "num_agents"
page: Page number for pagination (default 1)
page_size: Number of creators per page (default 20)
Returns:
CreatorsResponse: Paginated list of creators matching the filters
"""
if page < 1:
raise HTTPException(status_code=422, detail="Page must be greater than 0")
if page_size < 1:
raise HTTPException(status_code=422, detail="Page size must be greater than 0")
creators = await store_cache._get_cached_store_creators(
featured=featured,
search_query=search_query,
sorted_by=sorted_by,
page=page,
page_size=page_size,
)
return creators
@v1_router.get(
path="/store/creators/{username}",
tags=["store"],
dependencies=[Security(require_permission(APIKeyPermission.READ_STORE))],
response_model=store_model.CreatorDetails,
)
async def get_store_creator(
username: str,
) -> store_model.CreatorDetails:
"""
Get details of a specific store creator by username.
Args:
username: Creator's username
Returns:
CreatorDetails: Detailed information about the creator
"""
username = urllib.parse.unquote(username).lower()
creator = await store_cache._get_cached_creator_details(username=username)
return creator

View File

@@ -29,7 +29,6 @@ import backend.server.v2.admin.store_admin_routes
import backend.server.v2.builder
import backend.server.v2.builder.routes
import backend.server.v2.chat.routes as chat_routes
import backend.server.v2.executions.review.routes
import backend.server.v2.library.db
import backend.server.v2.library.model
import backend.server.v2.library.routes
@@ -275,11 +274,6 @@ app.include_router(
tags=["v2", "admin"],
prefix="/api/executions",
)
app.include_router(
backend.server.v2.executions.review.routes.router,
tags=["v2", "executions", "review"],
prefix="/api/review",
)
app.include_router(
backend.server.v2.library.routes.router, tags=["v2"], prefix="/api/library"
)

View File

@@ -44,7 +44,7 @@ from backend.data.credit import (
get_user_credit_model,
set_auto_top_up,
)
from backend.data.graph import GraphSettings
from backend.data.execution import UserContext
from backend.data.model import CredentialsMetaInput
from backend.data.notifications import NotificationPreference, NotificationPreferenceDTO
from backend.data.onboarding import (
@@ -143,28 +143,6 @@ async def hide_activity_summary_if_disabled(
return execution
async def _update_library_agent_version_and_settings(
user_id: str, agent_graph: graph_db.GraphModel
) -> library_db.library_model.LibraryAgent:
# Keep the library agent up to date with the new active version
library = await library_db.update_agent_version_in_library(
user_id, agent_graph.id, agent_graph.version
)
# If the graph has HITL node, initialize the setting if it's not already set.
if (
agent_graph.has_human_in_the_loop
and library.settings.human_in_the_loop_safe_mode is None
):
await library_db.update_library_agent_settings(
user_id=user_id,
agent_id=library.id,
settings=library.settings.model_copy(
update={"human_in_the_loop_safe_mode": True}
),
)
return library
# Define the API routes
v1_router = APIRouter()
@@ -409,15 +387,19 @@ async def execute_graph_block(
if not obj:
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
# Get user context for block execution
user = await get_user_by_id(user_id)
if not user:
raise HTTPException(status_code=404, detail="User not found.")
user_context = UserContext(timezone=user.timezone)
start_time = time.time()
try:
output = defaultdict(list)
async for name, data in obj.execute(
data,
user_context=user_context,
user_id=user_id,
# Note: graph_exec_id and graph_id are not available for direct block execution
):
@@ -860,7 +842,9 @@ async def update_graph(
if new_graph_version.is_active:
# Keep the library agent up to date with the new active version
await _update_library_agent_version_and_settings(user_id, new_graph_version)
await library_db.update_agent_version_in_library(
user_id, graph.id, graph.version
)
# Handle activation of the new graph first to ensure continuity
new_graph_version = await on_graph_activate(new_graph_version, user_id=user_id)
@@ -917,43 +901,15 @@ async def set_graph_active_version(
)
# Keep the library agent up to date with the new active version
await _update_library_agent_version_and_settings(user_id, new_active_graph)
await library_db.update_agent_version_in_library(
user_id, new_active_graph.id, new_active_graph.version
)
if current_active_graph and current_active_graph.version != new_active_version:
# Handle deactivation of the previously active version
await on_graph_deactivate(current_active_graph, user_id=user_id)
@v1_router.patch(
path="/graphs/{graph_id}/settings",
summary="Update graph settings",
tags=["graphs"],
dependencies=[Security(requires_user)],
)
async def update_graph_settings(
graph_id: str,
settings: GraphSettings,
user_id: Annotated[str, Security(get_user_id)],
) -> GraphSettings:
"""Update graph settings for the user's library agent."""
# Get the library agent for this graph
library_agent = await library_db.get_library_agent_by_graph_id(
graph_id=graph_id, user_id=user_id
)
if not library_agent:
raise HTTPException(404, f"Graph #{graph_id} not found in user's library")
# Update the library agent settings
updated_agent = await library_db.update_library_agent_settings(
user_id=user_id,
agent_id=library_agent.id,
settings=settings,
)
# Return the updated settings
return GraphSettings.model_validate(updated_agent.settings)
@v1_router.post(
path="/graphs/{graph_id}/execute/{graph_version}",
summary="Execute graph agent",

View File

@@ -7,7 +7,6 @@ import backend.data.block
from backend.blocks import load_all_blocks
from backend.blocks.llm import LlmModel
from backend.data.block import AnyBlockSchema, BlockCategory, BlockInfo, BlockSchema
from backend.data.db import query_raw_with_schema
from backend.integrations.providers import ProviderName
from backend.server.v2.builder.model import (
BlockCategoryResponse,
@@ -341,13 +340,13 @@ async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]:
# Calculate the cutoff timestamp
timestamp_threshold = datetime.now(timezone.utc) - timedelta(days=30)
results = await query_raw_with_schema(
results = await prisma.get_client().query_raw(
"""
SELECT
agent_node."agentBlockId" AS block_id,
COUNT(execution.id) AS execution_count
FROM {schema_prefix}"AgentNodeExecution" execution
JOIN {schema_prefix}"AgentNode" agent_node ON execution."agentNodeId" = agent_node.id
FROM "AgentNodeExecution" execution
JOIN "AgentNode" agent_node ON execution."agentNodeId" = agent_node.id
WHERE execution."endedTime" >= $1::timestamp
GROUP BY agent_node."agentBlockId"
ORDER BY execution_count DESC;

View File

@@ -4,29 +4,21 @@ Here are the functions available to you:
<functions>
1. **find_agent** - Search for agents that solve the user's problem
2. **run_agent** - Run or schedule an agent (automatically handles setup)
2. **get_agent_details** - Get comprehensive information about the chosen agent
3. **get_required_setup_info** - Verify user has required credentials (MANDATORY before execution)
4. **schedule_agent** - Schedules the agent to run based on a cron
5. **run_agent** - Execute the agent
</functions>
## HOW run_agent WORKS
The `run_agent` tool automatically handles the entire setup flow:
## MANDATORY WORKFLOW
1. **First call** (no inputs) → Returns available inputs so user can decide what values to use
2. **Credentials check** → If missing, UI automatically prompts user to add them (you don't need to mention this)
3. **Execution** → Runs when you provide `inputs` OR set `use_defaults=true`
Parameters:
- `username_agent_slug` (required): Agent identifier like "creator/agent-name"
- `inputs`: Object with input values for the agent
- `use_defaults`: Set to `true` to run with default values (only after user confirms)
- `schedule_name` + `cron`: For scheduled execution
## WORKFLOW
You must follow these 4 steps in exact order:
1. **find_agent** - Search for agents that solve the user's problem
2. **run_agent** (first call, no inputs) - Get available inputs for the agent
3. **Ask user** what values they want to use OR if they want to use defaults
4. **run_agent** (second call) - Either with `inputs={...}` or `use_defaults=true`
2. **get_agent_details** - Get comprehensive information about the chosen agent
3. **get_required_setup_info** - Verify user has required credentials (MANDATORY before execution)
4. **schedule_agent** or **run_agent** - Execute the agent
## YOUR APPROACH
@@ -39,66 +31,67 @@ Parameters:
- Use `find_agent` immediately with relevant keywords
- Suggest the best option from search results
- Explain briefly how it solves their problem
- Ask if they want to use it, then move to step 3
**Step 3: Get Agent Inputs**
- Call `run_agent(username_agent_slug="creator/agent-name")` without inputs
- This returns the available inputs (required and optional)
- Present these to the user and ask what values they want
**Step 3: Get Details**
- Use `get_agent_details` on their chosen agent
- Explain what the agent does and its requirements
- Keep explanations brief and outcome-focused
**Step 4: Run with User's Choice**
- If user provides values: `run_agent(username_agent_slug="...", inputs={...})`
- If user says "use defaults": `run_agent(username_agent_slug="...", use_defaults=true)`
- On success, share the agent link with the user
**Step 4: Verify Setup (CRITICAL)**
- ALWAYS use `get_required_setup_info` before execution
- Tell user what credentials they need (if any)
- Explain that credentials are added via the frontend interface
**For Scheduled Execution:**
- Add `schedule_name` and `cron` parameters
- Example: `run_agent(username_agent_slug="...", inputs={...}, schedule_name="Daily Report", cron="0 9 * * *")`
**Step 5: Execute**
- Use `schedule_agent` for scheduled runs OR `run_agent` for immediate execution
- Confirm successful setup
- Provide clear next steps
## FUNCTION CALL FORMAT
To call a function, use this exact format:
`<function_call>function_name(parameter="value")</function_call>`
Examples:
- `<function_call>find_agent(query="social media automation")</function_call>`
- `<function_call>run_agent(username_agent_slug="creator/agent-name")</function_call>` (get inputs)
- `<function_call>run_agent(username_agent_slug="creator/agent-name", inputs={"topic": "AI news"})</function_call>`
- `<function_call>run_agent(username_agent_slug="creator/agent-name", use_defaults=true)</function_call>`
## KEY RULES
**What You DON'T Do:**
- Don't help with login (frontend handles this)
- Don't mention or explain credentials to the user (frontend handles this automatically)
- Don't run agents without first showing available inputs to the user
- Don't use `use_defaults=true` without user explicitly confirming
- Don't help add credentials (frontend handles this)
- Don't skip `get_required_setup_info` (mandatory before execution)
- Don't ask permission to use functions - just use them
- Don't write responses longer than 3 sentences
- Don't pretend to be ChatGPT
**What You DO:**
- Always call run_agent first without inputs to see what's available
- Ask user what values they want OR if they want to use defaults
- Act fast - get to agent discovery quickly
- Use functions proactively
- Keep all responses to maximum 3 sentences
- Include the agent link in your response after successful execution
- Always verify credentials before setup/run
- Focus on outcomes and value
- Maintain conversational, concise style
- Do use markdown to make your messages easier to read
**Error Handling:**
- Authentication needed → "Please sign in via the interface"
- Credentials missing → The UI handles this automatically. Focus on asking the user about input values instead.
- Credentials missing → Tell user what's needed and where to add them
- Setup fails → Identify issue and provide clear fix
## RESPONSE STRUCTURE
Before responding, wrap your analysis in <thinking> tags to systematically plan your approach:
- Identify which step of the 4-step mandatory workflow you're currently on
- Extract the key business problem or request from the user's message
- Determine what function call (if any) you need to make next
- Plan your response to stay under the 3-sentence maximum
- Consider what specific keywords or parameters you'll use for any function calls
Example interaction:
Example interaction pattern:
```
User: "Run the AI news agent for me"
Otto: <function_call>run_agent(username_agent_slug="autogpt/ai-news")</function_call>
[Tool returns: Agent accepts inputs - Required: topic. Optional: num_articles (default: 5)]
Otto: The AI News agent needs a topic. What topic would you like news about, or should I use the defaults?
User: "Use defaults"
Otto: <function_call>run_agent(username_agent_slug="autogpt/ai-news", use_defaults=true)</function_call>
User: "I need to automate my social media posting"
Otto: Let me find social media automation agents for you. <function_call>find_agent(query="social media posting automation")</function_call> I'll show you the best options once I get the results.
```
KEEP ANSWERS TO 3 SENTENCES
Respond conversationally and begin helping them find the right AutoGPT agent for their needs.
KEEP ANSWERS TO 3 SENTENCES

View File

@@ -64,10 +64,7 @@ async def create_session(
CreateSessionResponse: Details of the created session.
"""
logger.info(
f"Creating session with user_id: "
f"...{user_id[-8:] if user_id and len(user_id) > 8 else '<redacted>'}"
)
logger.info(f"Creating session with user_id: {user_id}")
session = await chat_service.create_chat_session(user_id)

View File

@@ -6,18 +6,27 @@ from backend.server.v2.chat.model import ChatSession
from .base import BaseTool
from .find_agent import FindAgentTool
from .get_agent_details import GetAgentDetailsTool
from .get_required_setup_info import GetRequiredSetupInfoTool
from .run_agent import RunAgentTool
from .setup_agent import SetupAgentTool
if TYPE_CHECKING:
from backend.server.v2.chat.response_model import StreamToolExecutionResult
# Initialize tool instances
find_agent_tool = FindAgentTool()
get_agent_details_tool = GetAgentDetailsTool()
get_required_setup_info_tool = GetRequiredSetupInfoTool()
setup_agent_tool = SetupAgentTool()
run_agent_tool = RunAgentTool()
# Export tools as OpenAI format
tools: list[ChatCompletionToolParam] = [
find_agent_tool.as_openai_tool(),
get_agent_details_tool.as_openai_tool(),
get_required_setup_info_tool.as_openai_tool(),
setup_agent_tool.as_openai_tool(),
run_agent_tool.as_openai_tool(),
]
@@ -32,6 +41,9 @@ async def execute_tool(
tool_map: dict[str, BaseTool] = {
"find_agent": find_agent_tool,
"get_agent_details": get_agent_details_tool,
"get_required_setup_info": get_required_setup_info_tool,
"schedule_agent": setup_agent_tool,
"run_agent": run_agent_tool,
}
if tool_name not in tool_map:

View File

@@ -0,0 +1,221 @@
"""Tool for getting detailed information about a specific agent."""
import logging
from typing import Any
from backend.data import graph as graph_db
from backend.data.model import CredentialsMetaInput
from backend.server.v2.chat.model import ChatSession
from backend.server.v2.chat.tools.base import BaseTool
from backend.server.v2.chat.tools.models import (
AgentDetails,
AgentDetailsResponse,
ErrorResponse,
ExecutionOptions,
ToolResponseBase,
)
from backend.server.v2.store import db as store_db
from backend.util.exceptions import DatabaseError, NotFoundError
logger = logging.getLogger(__name__)
class GetAgentDetailsTool(BaseTool):
"""Tool for getting detailed information about an agent."""
@property
def name(self) -> str:
return "get_agent_details"
@property
def description(self) -> str:
return "Get detailed information about a specific agent including inputs, credentials required, and execution options."
@property
def parameters(self) -> dict[str, Any]:
return {
"type": "object",
"properties": {
"username_agent_slug": {
"type": "string",
"description": "The marketplace agent slug (e.g., 'username/agent-name')",
},
},
"required": ["username_agent_slug"],
}
async def _execute(
self,
user_id: str | None,
session: ChatSession,
**kwargs,
) -> ToolResponseBase:
"""Get detailed information about an agent.
Args:
user_id: User ID (may be anonymous)
session_id: Chat session ID
username_agent_slug: Agent ID or slug
Returns:
Pydantic response model
"""
agent_id = kwargs.get("username_agent_slug", "").strip()
session_id = session.session_id
if not agent_id or "/" not in agent_id:
return ErrorResponse(
message="Please provide an agent ID in format 'creator/agent-name'",
session_id=session_id,
)
try:
# Always try to get from marketplace first
graph = None
store_agent = None
# Check if it's a slug format (username/agent_name)
try:
# Parse username/agent_name from slug
username, agent_name = agent_id.split("/", 1)
store_agent = await store_db.get_store_agent_details(
username, agent_name
)
logger.info(f"Found agent {agent_id} in marketplace")
except NotFoundError as e:
logger.debug(f"Failed to get from marketplace: {e}")
return ErrorResponse(
message=f"Agent '{agent_id}' not found",
session_id=session_id,
)
except DatabaseError as e:
logger.error(f"Failed to get from marketplace: {e}")
return ErrorResponse(
message=f"Failed to get agent details: {e!s}",
session_id=session_id,
)
# If we found a store agent, get its graph
if store_agent:
try:
# Use get_available_graph to get the graph from store listing version
graph_meta = await store_db.get_available_graph(
store_agent.store_listing_version_id
)
# Now get the full graph with that ID
graph = await graph_db.get_graph(
graph_id=graph_meta.id,
version=graph_meta.version,
user_id=None, # Public access
include_subgraphs=True,
)
except NotFoundError as e:
logger.error(f"Failed to get graph for store agent: {e}")
return ErrorResponse(
message=f"Failed to get graph for store agent: {e!s}",
session_id=session_id,
)
except DatabaseError as e:
logger.error(f"Failed to get graph for store agent: {e}")
return ErrorResponse(
message=f"Failed to get graph for store agent: {e!s}",
session_id=session_id,
)
if not graph:
return ErrorResponse(
message=f"Agent '{agent_id}' not found",
session_id=session_id,
)
credentials_input_schema = graph.credentials_input_schema
# Extract credentials from the JSON schema properties
credentials = []
if (
isinstance(credentials_input_schema, dict)
and "properties" in credentials_input_schema
):
for cred_name, cred_schema in credentials_input_schema[
"properties"
].items():
# Extract credential metadata from the schema
# The schema properties contain provider info and other metadata
# Get provider from credentials_provider array or properties.provider.const
provider = "unknown"
if (
"credentials_provider" in cred_schema
and cred_schema["credentials_provider"]
):
provider = cred_schema["credentials_provider"][0]
elif (
"properties" in cred_schema
and "provider" in cred_schema["properties"]
):
provider = cred_schema["properties"]["provider"].get(
"const", "unknown"
)
# Get type from credentials_types array or properties.type.const
cred_type = "api_key" # Default
if (
"credentials_types" in cred_schema
and cred_schema["credentials_types"]
):
cred_type = cred_schema["credentials_types"][0]
elif (
"properties" in cred_schema
and "type" in cred_schema["properties"]
):
cred_type = cred_schema["properties"]["type"].get(
"const", "api_key"
)
credentials.append(
CredentialsMetaInput(
id=cred_name,
title=cred_schema.get("title", cred_name),
provider=provider, # type: ignore
type=cred_type,
)
)
trigger_info = (
graph.trigger_setup_info.model_dump()
if graph.trigger_setup_info
else None
)
agent_details = AgentDetails(
id=graph.id,
name=graph.name,
description=graph.description,
inputs=graph.input_schema,
credentials=credentials,
execution_options=ExecutionOptions(
# Currently a graph with a webhook can only be triggered by a webhook
manual=trigger_info is None,
scheduled=trigger_info is None,
webhook=trigger_info is not None,
),
trigger_info=trigger_info,
)
return AgentDetailsResponse(
message=f"Found agent '{agent_details.name}'. When presenting the agent you do not need to mention the required credentials. You do not need to run this tool again for this agent.",
session_id=session_id,
agent=agent_details,
user_authenticated=user_id is not None,
graph_id=graph.id,
graph_version=graph.version,
)
except Exception as e:
logger.error(f"Error getting agent details: {e}", exc_info=True)
return ErrorResponse(
message=f"Failed to get agent details: {e!s}",
error=str(e),
session_id=session_id,
)

View File

@@ -0,0 +1,335 @@
import uuid
import orjson
import pytest
from backend.server.v2.chat.tools._test_data import (
make_session,
setup_llm_test_data,
setup_test_data,
)
from backend.server.v2.chat.tools.get_agent_details import GetAgentDetailsTool
# This is so the formatter doesn't remove the fixture imports
setup_llm_test_data = setup_llm_test_data
setup_test_data = setup_test_data
@pytest.mark.asyncio(scope="session")
async def test_get_agent_details_success(setup_test_data):
"""Test successfully getting agent details from marketplace"""
# Use test data from fixture
user = setup_test_data["user"]
graph = setup_test_data["graph"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = GetAgentDetailsTool()
# Build the proper marketplace agent_id format: username/slug
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Build session
session = make_session()
# Execute the tool
response = await tool.execute(
user_id=user.id,
session=session,
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
)
# Verify the response
assert response is not None
assert hasattr(response, "result")
# Parse the result JSON
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Check the basic structure
assert "agent" in result_data
assert "message" in result_data
assert "graph_id" in result_data
assert "graph_version" in result_data
assert "user_authenticated" in result_data
# Check agent details
agent = result_data["agent"]
assert agent["id"] == graph.id
assert agent["name"] == "Test Agent"
assert (
agent["description"] == "A simple test agent"
) # Description from store submission
assert "inputs" in agent
assert "credentials" in agent
assert "execution_options" in agent
# Check execution options
exec_options = agent["execution_options"]
assert "manual" in exec_options
assert "scheduled" in exec_options
assert "webhook" in exec_options
# Check inputs schema
assert isinstance(agent["inputs"], dict)
# Should have properties for the input fields
if "properties" in agent["inputs"]:
assert "test_input" in agent["inputs"]["properties"]
@pytest.mark.asyncio(scope="session")
async def test_get_agent_details_with_llm_credentials(setup_llm_test_data):
"""Test getting agent details for an agent that requires LLM credentials"""
# Use test data from fixture
user = setup_llm_test_data["user"]
store_submission = setup_llm_test_data["store_submission"]
# Create the tool instance
tool = GetAgentDetailsTool()
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
session = make_session(user_id=user.id)
# Execute the tool
response = await tool.execute(
user_id=user.id,
session=session,
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
)
# Verify the response
assert response is not None
assert hasattr(response, "result")
# Parse the result JSON
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Check that agent details are returned
assert "agent" in result_data
agent = result_data["agent"]
# Check that credentials are listed
assert "credentials" in agent
credentials = agent["credentials"]
# The LLM agent should have OpenAI credentials listed
assert isinstance(credentials, list)
# Check that inputs include the user_prompt
assert "inputs" in agent
if "properties" in agent["inputs"]:
assert "user_prompt" in agent["inputs"]["properties"]
@pytest.mark.asyncio(scope="session")
async def test_get_agent_details_invalid_format():
"""Test error handling when agent_id is not in correct format"""
tool = GetAgentDetailsTool()
session = make_session()
session.user_id = str(uuid.uuid4())
# Execute with invalid format (no slash)
response = await tool.execute(
user_id=session.user_id,
session=session,
tool_call_id=str(uuid.uuid4()),
username_agent_slug="invalid-format",
)
# Verify error response
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "message" in result_data
assert "creator/agent-name" in result_data["message"]
@pytest.mark.asyncio(scope="session")
async def test_get_agent_details_empty_slug():
"""Test error handling when agent_id is empty"""
tool = GetAgentDetailsTool()
session = make_session()
session.user_id = str(uuid.uuid4())
# Execute with empty slug
response = await tool.execute(
user_id=session.user_id,
session=session,
tool_call_id=str(uuid.uuid4()),
username_agent_slug="",
)
# Verify error response
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "message" in result_data
assert "creator/agent-name" in result_data["message"]
@pytest.mark.asyncio(scope="session")
async def test_get_agent_details_not_found():
"""Test error handling when agent is not found in marketplace"""
tool = GetAgentDetailsTool()
session = make_session()
session.user_id = str(uuid.uuid4())
# Execute with non-existent agent
response = await tool.execute(
user_id=session.user_id,
session=session,
tool_call_id=str(uuid.uuid4()),
username_agent_slug="nonexistent/agent",
)
# Verify error response
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "message" in result_data
assert "not found" in result_data["message"].lower()
@pytest.mark.asyncio(scope="session")
async def test_get_agent_details_anonymous_user(setup_test_data):
"""Test getting agent details as an anonymous user (no user_id)"""
# Use test data from fixture
user = setup_test_data["user"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = GetAgentDetailsTool()
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
session = make_session()
# session.user_id stays as None
# Execute the tool without a user_id (anonymous)
response = await tool.execute(
user_id=None,
session=session,
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
)
# Verify the response
assert response is not None
assert hasattr(response, "result")
# Parse the result JSON
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Should still get agent details
assert "agent" in result_data
assert "user_authenticated" in result_data
# User should be marked as not authenticated
assert result_data["user_authenticated"] is False
@pytest.mark.asyncio(scope="session")
async def test_get_agent_details_authenticated_user(setup_test_data):
"""Test getting agent details as an authenticated user"""
# Use test data from fixture
user = setup_test_data["user"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = GetAgentDetailsTool()
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
session = make_session()
session.user_id = user.id
# Execute the tool with a user_id (authenticated)
response = await tool.execute(
user_id=user.id,
session=session,
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
)
# Verify the response
assert response is not None
assert hasattr(response, "result")
# Parse the result JSON
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Should get agent details
assert "agent" in result_data
assert "user_authenticated" in result_data
# User should be marked as authenticated
assert result_data["user_authenticated"] is True
@pytest.mark.asyncio(scope="session")
async def test_get_agent_details_includes_execution_options(setup_test_data):
"""Test that agent details include execution options"""
# Use test data from fixture
user = setup_test_data["user"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = GetAgentDetailsTool()
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
session = make_session()
session.user_id = user.id
# Execute the tool
response = await tool.execute(
user_id=user.id,
session=session,
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
)
# Verify the response
assert response is not None
assert hasattr(response, "result")
# Parse the result JSON
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Check execution options
assert "agent" in result_data
agent = result_data["agent"]
assert "execution_options" in agent
exec_options = agent["execution_options"]
# These should all be boolean values
assert isinstance(exec_options["manual"], bool)
assert isinstance(exec_options["scheduled"], bool)
assert isinstance(exec_options["webhook"], bool)
# For a regular agent (no webhook), manual and scheduled should be True
assert exec_options["manual"] is True
assert exec_options["scheduled"] is True
assert exec_options["webhook"] is False

View File

@@ -0,0 +1,182 @@
"""Tool for getting required setup information for an agent."""
import logging
from typing import Any
from backend.integrations.creds_manager import IntegrationCredentialsManager
from backend.server.v2.chat.model import ChatSession
from backend.server.v2.chat.tools.base import BaseTool
from backend.server.v2.chat.tools.get_agent_details import GetAgentDetailsTool
from backend.server.v2.chat.tools.models import (
AgentDetailsResponse,
ErrorResponse,
SetupInfo,
SetupRequirementsResponse,
ToolResponseBase,
UserReadiness,
)
logger = logging.getLogger(__name__)
class GetRequiredSetupInfoTool(BaseTool):
"""Tool for getting required setup information including credentials and inputs."""
@property
def name(self) -> str:
return "get_required_setup_info"
@property
def description(self) -> str:
return """Check if an agent can be set up with the provided input data and credentials.
Call this AFTER get_agent_details to validate that you have all required inputs.
Pass the input dictionary you plan to use with run_agent or setup_agent to verify it's complete."""
@property
def parameters(self) -> dict[str, Any]:
return {
"type": "object",
"properties": {
"username_agent_slug": {
"type": "string",
"description": "The marketplace agent slug (e.g., 'username/agent-name' or just 'agent-name' to search)",
},
"inputs": {
"type": "object",
"description": "The input dictionary you plan to provide. Should contain ALL required inputs from get_agent_details",
"additionalProperties": True,
},
},
"required": ["username_agent_slug"],
}
@property
def requires_auth(self) -> bool:
"""This tool requires authentication."""
return True
async def _execute(
self,
user_id: str | None,
session: ChatSession,
**kwargs,
) -> ToolResponseBase:
"""
Retrieve and validate the required setup information for running or configuring an agent.
This checks all required credentials and input fields based on the agent details,
and verifies user readiness to run the agent based on provided inputs and available credentials.
Args:
user_id: The authenticated user's ID (must not be None; authentication required).
session_id: The chat session ID.
agent_id: The agent's marketplace slug (e.g. 'username/agent-name'). Also accepts Graph ID.
agent_version: (Optional) Specific agent/graph version (if applicable).
Returns:
SetupRequirementsResponse containing:
- agent and graph info,
- credential and input requirements,
- user readiness and missing credentials/fields,
- setup instructions.
"""
assert (
user_id is not None
), "GetRequiredSetupInfoTool - This should never happen user_id is None when auth is required"
session_id = session.session_id
# Call _execute directly since we're calling internally from another tool
agent_details = await GetAgentDetailsTool()._execute(user_id, session, **kwargs)
if isinstance(agent_details, ErrorResponse):
return agent_details
if not isinstance(agent_details, AgentDetailsResponse):
return ErrorResponse(
message="Failed to get agent details",
session_id=session_id,
)
available_creds = await IntegrationCredentialsManager().store.get_all_creds(
user_id
)
required_credentials = []
# Check if user has credentials matching the required provider/type
for c in agent_details.agent.credentials:
# Check if any available credential matches this provider and type
has_matching_cred = any(
cred.provider == c.provider and cred.type == c.type
for cred in available_creds
)
if not has_matching_cred:
required_credentials.append(c)
required_fields = set(agent_details.agent.inputs.get("required", []))
provided_inputs = kwargs.get("inputs", {})
missing_inputs = required_fields - set(provided_inputs.keys())
missing_credentials = {c.id: c.model_dump() for c in required_credentials}
user_readiness = UserReadiness(
has_all_credentials=len(required_credentials) == 0,
missing_credentials=missing_credentials,
ready_to_run=len(missing_inputs) == 0 and len(required_credentials) == 0,
)
# Convert execution options to list of available modes
exec_opts = agent_details.agent.execution_options
execution_modes = []
if exec_opts.manual:
execution_modes.append("manual")
if exec_opts.scheduled:
execution_modes.append("scheduled")
if exec_opts.webhook:
execution_modes.append("webhook")
# Convert input schema to list of input field info
inputs_list = []
if (
isinstance(agent_details.agent.inputs, dict)
and "properties" in agent_details.agent.inputs
):
for field_name, field_schema in agent_details.agent.inputs[
"properties"
].items():
inputs_list.append(
{
"name": field_name,
"title": field_schema.get("title", field_name),
"type": field_schema.get("type", "string"),
"description": field_schema.get("description", ""),
"required": field_name
in agent_details.agent.inputs.get("required", []),
}
)
requirements = {
"credentials": agent_details.agent.credentials,
"inputs": inputs_list,
"execution_modes": execution_modes,
}
message = ""
if len(agent_details.agent.credentials) > 0:
message = "The user needs to enter credentials before proceeding. Please wait until you have a message informing you that the credentials have been entered."
elif len(inputs_list) > 0:
message = (
"The user needs to enter inputs before proceeding. Please wait until you have a message informing you that the inputs have been entered. The inputs are: "
+ ", ".join([input["name"] for input in inputs_list])
)
else:
message = "The agent is ready to run. Please call the run_agent tool with the agent ID."
return SetupRequirementsResponse(
message=message,
session_id=session_id,
setup_info=SetupInfo(
agent_id=agent_details.agent.id,
agent_name=agent_details.agent.name,
user_readiness=user_readiness,
requirements=requirements,
),
graph_id=agent_details.graph_id,
graph_version=agent_details.graph_version,
)

View File

@@ -0,0 +1,331 @@
import uuid
import orjson
import pytest
from backend.server.v2.chat.tools._test_data import (
make_session,
setup_firecrawl_test_data,
setup_llm_test_data,
setup_test_data,
)
from backend.server.v2.chat.tools.get_required_setup_info import (
GetRequiredSetupInfoTool,
)
# This is so the formatter doesn't remove the fixture imports
setup_llm_test_data = setup_llm_test_data
setup_test_data = setup_test_data
setup_firecrawl_test_data = setup_firecrawl_test_data
@pytest.mark.asyncio(scope="session")
async def test_get_required_setup_info_success(setup_test_data):
"""Test successfully getting setup info for a simple agent"""
user = setup_test_data["user"]
graph = setup_test_data["graph"]
store_submission = setup_test_data["store_submission"]
tool = GetRequiredSetupInfoTool()
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
session = make_session(user_id=user.id)
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
inputs={"test_input": "Hello World"},
session=session,
)
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "setup_info" in result_data
setup_info = result_data["setup_info"]
assert "agent_id" in setup_info
assert setup_info["agent_id"] == graph.id
assert "agent_name" in setup_info
assert setup_info["agent_name"] == "Test Agent"
assert "requirements" in setup_info
requirements = setup_info["requirements"]
assert "credentials" in requirements
assert "inputs" in requirements
assert "execution_modes" in requirements
assert isinstance(requirements["credentials"], list)
assert len(requirements["credentials"]) == 0
assert isinstance(requirements["inputs"], list)
if len(requirements["inputs"]) > 0:
first_input = requirements["inputs"][0]
assert "name" in first_input
assert "title" in first_input
assert "type" in first_input
assert isinstance(requirements["execution_modes"], list)
assert "manual" in requirements["execution_modes"]
assert "scheduled" in requirements["execution_modes"]
assert "user_readiness" in setup_info
user_readiness = setup_info["user_readiness"]
assert "has_all_credentials" in user_readiness
assert "ready_to_run" in user_readiness
assert user_readiness["ready_to_run"] is True
@pytest.mark.asyncio(scope="session")
async def test_get_required_setup_info_missing_credentials(setup_firecrawl_test_data):
"""Test getting setup info for an agent requiring missing credentials"""
user = setup_firecrawl_test_data["user"]
store_submission = setup_firecrawl_test_data["store_submission"]
tool = GetRequiredSetupInfoTool()
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
session = make_session(user_id=user.id)
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
inputs={"url": "https://example.com"},
session=session,
)
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "setup_info" in result_data
setup_info = result_data["setup_info"]
requirements = setup_info["requirements"]
assert "credentials" in requirements
assert isinstance(requirements["credentials"], list)
assert len(requirements["credentials"]) > 0
firecrawl_cred = requirements["credentials"][0]
assert "provider" in firecrawl_cred
assert firecrawl_cred["provider"] == "firecrawl"
assert "type" in firecrawl_cred
assert firecrawl_cred["type"] == "api_key"
user_readiness = setup_info["user_readiness"]
assert user_readiness["has_all_credentials"] is False
assert user_readiness["ready_to_run"] is False
assert "missing_credentials" in user_readiness
assert isinstance(user_readiness["missing_credentials"], dict)
assert len(user_readiness["missing_credentials"]) > 0
@pytest.mark.asyncio(scope="session")
async def test_get_required_setup_info_with_available_credentials(setup_llm_test_data):
"""Test getting setup info when user has required credentials"""
user = setup_llm_test_data["user"]
store_submission = setup_llm_test_data["store_submission"]
tool = GetRequiredSetupInfoTool()
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
session = make_session(user_id=user.id)
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
inputs={"user_prompt": "What is 2+2?"},
session=session,
)
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
setup_info = result_data["setup_info"]
user_readiness = setup_info["user_readiness"]
assert user_readiness["has_all_credentials"] is True
assert user_readiness["ready_to_run"] is True
assert "missing_credentials" in user_readiness
assert len(user_readiness["missing_credentials"]) == 0
@pytest.mark.asyncio(scope="session")
async def test_get_required_setup_info_missing_inputs(setup_test_data):
"""Test getting setup info when required inputs are not provided"""
user = setup_test_data["user"]
store_submission = setup_test_data["store_submission"]
tool = GetRequiredSetupInfoTool()
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
session = make_session(user_id=user.id)
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
inputs={}, # Empty inputs
session=session,
)
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
setup_info = result_data["setup_info"]
requirements = setup_info["requirements"]
assert "inputs" in requirements
assert isinstance(requirements["inputs"], list)
user_readiness = setup_info["user_readiness"]
assert "ready_to_run" in user_readiness
@pytest.mark.asyncio(scope="session")
async def test_get_required_setup_info_invalid_agent():
"""Test getting setup info for a non-existent agent"""
tool = GetRequiredSetupInfoTool()
session = make_session(user_id=None)
response = await tool.execute(
user_id=str(uuid.uuid4()),
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug="invalid/agent",
inputs={},
session=session,
)
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "message" in result_data
assert any(
phrase in result_data["message"].lower()
for phrase in ["not found", "failed", "error"]
)
@pytest.mark.asyncio(scope="session")
async def test_get_required_setup_info_graph_metadata(setup_test_data):
"""Test that setup info includes graph metadata"""
user = setup_test_data["user"]
graph = setup_test_data["graph"]
store_submission = setup_test_data["store_submission"]
tool = GetRequiredSetupInfoTool()
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
session = make_session(user_id=user.id)
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
inputs={"test_input": "test"},
session=session,
)
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "graph_id" in result_data
assert result_data["graph_id"] == graph.id
assert "graph_version" in result_data
assert result_data["graph_version"] == graph.version
@pytest.mark.asyncio(scope="session")
async def test_get_required_setup_info_inputs_structure(setup_test_data):
"""Test that inputs are properly structured as a list"""
user = setup_test_data["user"]
store_submission = setup_test_data["store_submission"]
tool = GetRequiredSetupInfoTool()
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
session = make_session(user_id=user.id)
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
inputs={},
session=session,
)
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
setup_info = result_data["setup_info"]
requirements = setup_info["requirements"]
assert isinstance(requirements["inputs"], list)
for input_field in requirements["inputs"]:
assert isinstance(input_field, dict)
assert "name" in input_field
assert "title" in input_field
assert "type" in input_field
assert "description" in input_field
assert "required" in input_field
assert isinstance(input_field["required"], bool)
@pytest.mark.asyncio(scope="session")
async def test_get_required_setup_info_execution_modes_structure(setup_test_data):
"""Test that execution_modes are properly structured as a list"""
user = setup_test_data["user"]
store_submission = setup_test_data["store_submission"]
tool = GetRequiredSetupInfoTool()
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
session = make_session(user_id=user.id)
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
inputs={},
session=session,
)
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
setup_info = result_data["setup_info"]
requirements = setup_info["requirements"]
assert isinstance(requirements["execution_modes"], list)
for mode in requirements["execution_modes"]:
assert isinstance(mode, str)
assert mode in ["manual", "scheduled", "webhook"]

View File

@@ -13,9 +13,17 @@ class ResponseType(str, Enum):
AGENT_CAROUSEL = "agent_carousel"
AGENT_DETAILS = "agent_details"
AGENT_DETAILS_NEED_LOGIN = "agent_details_need_login"
AGENT_DETAILS_NEED_CREDENTIALS = "agent_details_need_credentials"
SETUP_REQUIREMENTS = "setup_requirements"
SCHEDULE_CREATED = "schedule_created"
WEBHOOK_CREATED = "webhook_created"
PRESET_CREATED = "preset_created"
EXECUTION_STARTED = "execution_started"
NEED_LOGIN = "need_login"
NEED_CREDENTIALS = "need_credentials"
INSUFFICIENT_CREDITS = "insufficient_credits"
VALIDATION_ERROR = "validation_error"
ERROR = "error"
NO_RESULTS = "no_results"
SUCCESS = "success"
@@ -104,7 +112,7 @@ class AgentDetails(BaseModel):
class AgentDetailsResponse(ToolResponseBase):
"""Response for get_details action."""
"""Response for get_agent_details tool."""
type: ResponseType = ResponseType.AGENT_DETAILS
agent: AgentDetails
@@ -113,7 +121,51 @@ class AgentDetailsResponse(ToolResponseBase):
graph_version: int | None = None
class AgentDetailsNeedLoginResponse(ToolResponseBase):
"""Response when agent details need login."""
type: ResponseType = ResponseType.AGENT_DETAILS_NEED_LOGIN
agent: AgentDetails
agent_info: dict[str, Any] | None = None
graph_id: str | None = None
graph_version: int | None = None
class AgentDetailsNeedCredentialsResponse(ToolResponseBase):
"""Response when agent needs credentials to be configured."""
type: ResponseType = ResponseType.NEED_CREDENTIALS
agent: AgentDetails
credentials_schema: dict[str, Any]
agent_info: dict[str, Any] | None = None
graph_id: str | None = None
graph_version: int | None = None
# Setup info models
class SetupRequirementInfo(BaseModel):
"""Setup requirement information."""
key: str
provider: str
required: bool = True
user_has: bool = False
credential_id: str | None = None
type: str | None = None
scopes: list[str] | None = None
description: str | None = None
class ExecutionModeInfo(BaseModel):
"""Execution mode information."""
type: str # manual, scheduled, webhook
description: str
supported: bool
config_required: dict[str, str] | None = None
trigger_info: dict[str, Any] | None = None
class UserReadiness(BaseModel):
"""User readiness status."""
@@ -135,10 +187,11 @@ class SetupInfo(BaseModel):
},
)
user_readiness: UserReadiness = Field(default_factory=UserReadiness)
setup_instructions: list[str] = []
class SetupRequirementsResponse(ToolResponseBase):
"""Response for validate action."""
"""Response for get_required_setup_info tool."""
type: ResponseType = ResponseType.SETUP_REQUIREMENTS
setup_info: SetupInfo
@@ -146,17 +199,70 @@ class SetupRequirementsResponse(ToolResponseBase):
graph_version: int | None = None
# Execution models
# Setup agent models
class ScheduleCreatedResponse(ToolResponseBase):
"""Response for scheduled agent setup."""
type: ResponseType = ResponseType.SCHEDULE_CREATED
schedule_id: str
name: str
cron: str
timezone: str = "UTC"
next_run: str | None = None
graph_id: str
graph_name: str
class WebhookCreatedResponse(ToolResponseBase):
"""Response for webhook agent setup."""
type: ResponseType = ResponseType.WEBHOOK_CREATED
webhook_id: str
webhook_url: str
preset_id: str | None = None
name: str
graph_id: str
graph_name: str
class PresetCreatedResponse(ToolResponseBase):
"""Response for preset agent setup."""
type: ResponseType = ResponseType.PRESET_CREATED
preset_id: str
name: str
graph_id: str
graph_name: str
# Run agent models
class ExecutionStartedResponse(ToolResponseBase):
"""Response for run/schedule actions."""
"""Response for agent execution started."""
type: ResponseType = ResponseType.EXECUTION_STARTED
execution_id: str
graph_id: str
graph_name: str
library_agent_id: str | None = None
library_agent_link: str | None = None
status: str = "QUEUED"
ended_at: str | None = None
outputs: dict[str, Any] | None = None
error: str | None = None
timeout_reached: bool | None = None
class InsufficientCreditsResponse(ToolResponseBase):
"""Response for insufficient credits."""
type: ResponseType = ResponseType.INSUFFICIENT_CREDITS
balance: float
class ValidationErrorResponse(ToolResponseBase):
"""Response for validation errors."""
type: ResponseType = ResponseType.VALIDATION_ERROR
error: str
details: dict[str, Any] | None = None
# Auth/error models

View File

@@ -1,87 +1,34 @@
"""Unified tool for agent operations with automatic state detection."""
"""Tool for running an agent manually (one-off execution)."""
import logging
from typing import Any
from pydantic import BaseModel, Field, field_validator
from backend.data.graph import GraphModel
from backend.data.graph import get_graph
from backend.data.model import CredentialsMetaInput
from backend.data.user import get_user_by_id
from backend.executor import utils as execution_utils
from backend.integrations.creds_manager import IntegrationCredentialsManager
from backend.server.v2.chat.config import ChatConfig
from backend.server.v2.chat.model import ChatSession
from backend.server.v2.chat.tools.base import BaseTool
from backend.server.v2.chat.tools.get_required_setup_info import (
GetRequiredSetupInfoTool,
)
from backend.server.v2.chat.tools.models import (
AgentDetails,
AgentDetailsResponse,
ErrorResponse,
ExecutionOptions,
ExecutionStartedResponse,
SetupInfo,
SetupRequirementsResponse,
ToolResponseBase,
UserReadiness,
)
from backend.server.v2.chat.tools.utils import (
check_user_has_required_credentials,
extract_credentials_from_schema,
fetch_graph_from_store_slug,
get_or_create_library_agent,
match_user_credentials_to_graph,
)
from backend.util.clients import get_scheduler_client
from backend.util.exceptions import DatabaseError, NotFoundError
from backend.util.timezone_utils import (
convert_utc_time_to_user_timezone,
get_user_timezone_or_utc,
)
from backend.server.v2.library import db as library_db
from backend.server.v2.library import model as library_model
logger = logging.getLogger(__name__)
config = ChatConfig()
# Constants for response messages
MSG_DO_NOT_RUN_AGAIN = "Do not run again unless explicitly requested."
MSG_DO_NOT_SCHEDULE_AGAIN = "Do not schedule again unless explicitly requested."
MSG_ASK_USER_FOR_VALUES = (
"Ask the user what values to use, or call again with use_defaults=true "
"to run with default values."
)
MSG_WHAT_VALUES_TO_USE = (
"What values would you like to use, or would you like to run with defaults?"
)
class RunAgentInput(BaseModel):
"""Input parameters for the run_agent tool."""
username_agent_slug: str = ""
inputs: dict[str, Any] = Field(default_factory=dict)
use_defaults: bool = False
schedule_name: str = ""
cron: str = ""
timezone: str = "UTC"
@field_validator(
"username_agent_slug", "schedule_name", "cron", "timezone", mode="before"
)
@classmethod
def strip_strings(cls, v: Any) -> Any:
"""Strip whitespace from string fields."""
return v.strip() if isinstance(v, str) else v
class RunAgentTool(BaseTool):
"""Unified tool for agent operations with automatic state detection.
The tool automatically determines what to do based on provided parameters:
1. Fetches agent details (always, silently)
2. Checks if required inputs are provided
3. Checks if user has required credentials
4. Runs immediately OR schedules (if cron is provided)
The response tells the caller what's missing or confirms execution.
"""
"""Tool for executing an agent manually with immediate results."""
@property
def name(self) -> str:
@@ -89,15 +36,11 @@ class RunAgentTool(BaseTool):
@property
def description(self) -> str:
return """Run or schedule an agent from the marketplace.
The tool automatically handles the setup flow:
- Returns missing inputs if required fields are not provided
- Returns missing credentials if user needs to configure them
- Executes immediately if all requirements are met
- Schedules execution if cron expression is provided
For scheduled execution, provide: schedule_name, cron, and optionally timezone."""
return """Run an agent immediately (one-off manual execution).
IMPORTANT: Before calling this tool, you MUST first call get_agent_details to determine what inputs are required.
The 'inputs' parameter must be a dictionary containing ALL required input values identified by get_agent_details.
Example: If get_agent_details shows required inputs 'search_query' and 'max_results', you must pass:
inputs={"search_query": "user's query", "max_results": 10}"""
@property
def parameters(self) -> dict[str, Any]:
@@ -106,36 +49,20 @@ class RunAgentTool(BaseTool):
"properties": {
"username_agent_slug": {
"type": "string",
"description": "Agent identifier in format 'username/agent-name'",
"description": "The ID of the agent to run (graph ID or marketplace slug)",
},
"inputs": {
"type": "object",
"description": "Input values for the agent",
"description": 'REQUIRED: Dictionary of input values. Must include ALL required inputs from get_agent_details. Format: {"input_name": value}',
"additionalProperties": True,
},
"use_defaults": {
"type": "boolean",
"description": "Set to true to run with default values (user must confirm)",
},
"schedule_name": {
"type": "string",
"description": "Name for scheduled execution (triggers scheduling mode)",
},
"cron": {
"type": "string",
"description": "Cron expression (5 fields: min hour day month weekday)",
},
"timezone": {
"type": "string",
"description": "IANA timezone for schedule (default: UTC)",
},
},
"required": ["username_agent_slug"],
}
@property
def requires_auth(self) -> bool:
"""All operations require authentication."""
"""This tool requires authentication."""
return True
async def _execute(
@@ -144,357 +71,186 @@ class RunAgentTool(BaseTool):
session: ChatSession,
**kwargs,
) -> ToolResponseBase:
"""Execute the tool with automatic state detection."""
params = RunAgentInput(**kwargs)
"""Execute an agent manually.
Args:
user_id: Authenticated user ID
session_id: Chat session ID
**kwargs: Execution parameters
Returns:
JSON formatted execution result
"""
assert (
user_id is not None
), "User ID is required to run an agent. Superclass enforces authentication."
session_id = session.session_id
username_agent_slug = kwargs.get("username_agent_slug", "").strip()
inputs = kwargs.get("inputs", {})
# Validate agent slug format
if not params.username_agent_slug or "/" not in params.username_agent_slug:
# Call _execute directly since we're calling internally from another tool
response = await GetRequiredSetupInfoTool()._execute(user_id, session, **kwargs)
if not isinstance(response, SetupRequirementsResponse):
return ErrorResponse(
message="Please provide an agent slug in format 'username/agent-name'",
message="Failed to get required setup information",
session_id=session_id,
)
# Auth is required
if not user_id:
setup_info = SetupInfo.model_validate(response.setup_info)
if not setup_info.user_readiness.ready_to_run:
return ErrorResponse(
message="Authentication required. Please sign in to use this tool.",
message=f"User is not ready to run the agent. User Readiness: {setup_info.user_readiness.model_dump_json()} Requirments: {setup_info.requirements}",
session_id=session_id,
)
# Determine if this is a schedule request
is_schedule = bool(params.schedule_name or params.cron)
# Get the graph using the graph_id and graph_version from the setup response
if not response.graph_id or not response.graph_version:
return ErrorResponse(
message=f"Graph information not available for {username_agent_slug}",
session_id=session_id,
)
try:
# Step 1: Fetch agent details (always happens first)
username, agent_name = params.username_agent_slug.split("/", 1)
graph, store_agent = await fetch_graph_from_store_slug(username, agent_name)
graph = await get_graph(
graph_id=response.graph_id,
version=response.graph_version,
user_id=None, # Public access for store graphs
include_subgraphs=True,
)
if not graph:
return ErrorResponse(
message=f"Agent '{params.username_agent_slug}' not found in marketplace",
session_id=session_id,
if not graph:
return ErrorResponse(
message=f"Graph {username_agent_slug} ({response.graph_id}v{response.graph_version}) not found",
session_id=session_id,
)
if graph and (
session.successful_agent_runs.get(graph.id, 0) >= config.max_agent_runs
):
return ErrorResponse(
message="Maximum number of agent schedules reached. You can't schedule this agent again in this chat session.",
session_id=session.session_id,
)
# Check if we already have a library agent for this graph
existing_library_agent = await library_db.get_library_agent_by_graph_id(
graph_id=graph.id, user_id=user_id
)
if not existing_library_agent:
# Now we need to add the graph to the users library
library_agents: list[library_model.LibraryAgent] = (
await library_db.create_library_agent(
graph=graph,
user_id=user_id,
create_library_agents_for_sub_graphs=False,
)
)
assert len(library_agents) == 1, "Expected 1 library agent to be created"
library_agent = library_agents[0]
else:
library_agent = existing_library_agent
# Build credentials mapping for the graph
graph_credentials_inputs: dict[str, CredentialsMetaInput] = {}
# Get aggregated credentials requirements from the graph
aggregated_creds = graph.aggregate_credentials_inputs()
logger.debug(
f"Matching credentials for graph {graph.id}: {len(aggregated_creds)} required"
)
if aggregated_creds:
# Get all available credentials for the user
creds_manager = IntegrationCredentialsManager()
available_creds = await creds_manager.store.get_all_creds(user_id)
# Track unmatched credentials for error reporting
missing_creds: list[str] = []
# For each required credential field, find a matching user credential
# field_info.provider is a frozenset because aggregate_credentials_inputs()
# combines requirements from multiple nodes. A credential matches if its
# provider is in the set of acceptable providers.
for credential_field_name, (
credential_requirements,
_node_fields,
) in aggregated_creds.items():
# Find first matching credential by provider and type
matching_cred = next(
(
cred
for cred in available_creds
if cred.provider in credential_requirements.provider
and cred.type in credential_requirements.supported_types
),
None,
)
# Step 2: Check credentials
graph_credentials, missing_creds = await match_user_credentials_to_graph(
user_id, graph
)
if matching_cred:
# Use Pydantic validation to ensure type safety
try:
graph_credentials_inputs[credential_field_name] = (
CredentialsMetaInput(
id=matching_cred.id,
provider=matching_cred.provider, # type: ignore
type=matching_cred.type,
title=matching_cred.title,
)
)
except Exception as e:
logger.error(
f"Failed to create CredentialsMetaInput for field '{credential_field_name}': "
f"provider={matching_cred.provider}, type={matching_cred.type}, "
f"credential_id={matching_cred.id}",
exc_info=True,
)
missing_creds.append(
f"{credential_field_name} (validation failed: {e})"
)
else:
missing_creds.append(
f"{credential_field_name} "
f"(requires provider in {list(credential_requirements.provider)}, "
f"type in {list(credential_requirements.supported_types)})"
)
# Fail fast if any required credentials are missing
if missing_creds:
# Return credentials needed response with input data info
# The UI handles credential setup automatically, so the message
# focuses on asking about input data
credentials = extract_credentials_from_schema(
graph.credentials_input_schema
logger.warning(
f"Cannot execute agent - missing credentials: {missing_creds}"
)
missing_creds_check = await check_user_has_required_credentials(
user_id, credentials
)
missing_credentials_dict = {
c.id: c.model_dump() for c in missing_creds_check
}
return SetupRequirementsResponse(
message=self._build_inputs_message(graph, MSG_WHAT_VALUES_TO_USE),
return ErrorResponse(
message=f"Cannot execute agent: missing {len(missing_creds)} required credential(s). You need to call the get_required_setup_info tool to setup the credentials."
f"Please set up the following credentials: {', '.join(missing_creds)}",
session_id=session_id,
setup_info=SetupInfo(
agent_id=graph.id,
agent_name=graph.name,
user_readiness=UserReadiness(
has_all_credentials=False,
missing_credentials=missing_credentials_dict,
ready_to_run=False,
),
requirements={
"credentials": [c.model_dump() for c in credentials],
"inputs": self._get_inputs_list(graph.input_schema),
"execution_modes": self._get_execution_modes(graph),
},
),
graph_id=graph.id,
graph_version=graph.version,
details={"missing_credentials": missing_creds},
)
# Step 3: Check inputs
# Get all available input fields from schema
input_properties = graph.input_schema.get("properties", {})
required_fields = set(graph.input_schema.get("required", []))
provided_inputs = set(params.inputs.keys())
# If agent has inputs but none were provided AND use_defaults is not set,
# always show what's available first so user can decide
if input_properties and not provided_inputs and not params.use_defaults:
credentials = extract_credentials_from_schema(
graph.credentials_input_schema
)
return AgentDetailsResponse(
message=self._build_inputs_message(graph, MSG_ASK_USER_FOR_VALUES),
session_id=session_id,
agent=self._build_agent_details(graph, credentials),
user_authenticated=True,
graph_id=graph.id,
graph_version=graph.version,
)
# Check if required inputs are missing (and not using defaults)
missing_inputs = required_fields - provided_inputs
if missing_inputs and not params.use_defaults:
# Return agent details with missing inputs info
credentials = extract_credentials_from_schema(
graph.credentials_input_schema
)
return AgentDetailsResponse(
message=(
f"Agent '{graph.name}' is missing required inputs: "
f"{', '.join(missing_inputs)}. "
"Please provide these values to run the agent."
),
session_id=session_id,
agent=self._build_agent_details(graph, credentials),
user_authenticated=True,
graph_id=graph.id,
graph_version=graph.version,
)
# Step 4: Execute or Schedule
if is_schedule:
return await self._schedule_agent(
user_id=user_id,
session=session,
graph=graph,
graph_credentials=graph_credentials,
inputs=params.inputs,
schedule_name=params.schedule_name,
cron=params.cron,
timezone=params.timezone,
)
else:
return await self._run_agent(
user_id=user_id,
session=session,
graph=graph,
graph_credentials=graph_credentials,
inputs=params.inputs,
)
except NotFoundError as e:
return ErrorResponse(
message=f"Agent '{params.username_agent_slug}' not found",
error=str(e) if str(e) else "not_found",
session_id=session_id,
)
except DatabaseError as e:
logger.error(f"Database error: {e}", exc_info=True)
return ErrorResponse(
message=f"Failed to process request: {e!s}",
error=str(e),
session_id=session_id,
)
except Exception as e:
logger.error(f"Error processing agent request: {e}", exc_info=True)
return ErrorResponse(
message=f"Failed to process request: {e!s}",
error=str(e),
session_id=session_id,
logger.info(
f"Credential matching complete: {len(graph_credentials_inputs)}/{len(aggregated_creds)} matched"
)
def _get_inputs_list(self, input_schema: dict[str, Any]) -> list[dict[str, Any]]:
"""Extract inputs list from schema."""
inputs_list = []
if isinstance(input_schema, dict) and "properties" in input_schema:
for field_name, field_schema in input_schema["properties"].items():
inputs_list.append(
{
"name": field_name,
"title": field_schema.get("title", field_name),
"type": field_schema.get("type", "string"),
"description": field_schema.get("description", ""),
"required": field_name in input_schema.get("required", []),
}
)
return inputs_list
def _get_execution_modes(self, graph: GraphModel) -> list[str]:
"""Get available execution modes for the graph."""
trigger_info = graph.trigger_setup_info
if trigger_info is None:
return ["manual", "scheduled"]
return ["webhook"]
def _build_inputs_message(
self,
graph: GraphModel,
suffix: str,
) -> str:
"""Build a message describing available inputs for an agent."""
inputs_list = self._get_inputs_list(graph.input_schema)
required_names = [i["name"] for i in inputs_list if i["required"]]
optional_names = [i["name"] for i in inputs_list if not i["required"]]
message_parts = [f"Agent '{graph.name}' accepts the following inputs:"]
if required_names:
message_parts.append(f"Required: {', '.join(required_names)}.")
if optional_names:
message_parts.append(
f"Optional (have defaults): {', '.join(optional_names)}."
)
if not inputs_list:
message_parts = [f"Agent '{graph.name}' has no required inputs."]
message_parts.append(suffix)
return " ".join(message_parts)
def _build_agent_details(
self,
graph: GraphModel,
credentials: list[CredentialsMetaInput],
) -> AgentDetails:
"""Build AgentDetails from a graph."""
trigger_info = (
graph.trigger_setup_info.model_dump() if graph.trigger_setup_info else None
)
return AgentDetails(
id=graph.id,
name=graph.name,
description=graph.description,
inputs=graph.input_schema,
credentials=credentials,
execution_options=ExecutionOptions(
manual=trigger_info is None,
scheduled=trigger_info is None,
webhook=trigger_info is not None,
),
trigger_info=trigger_info,
)
async def _run_agent(
self,
user_id: str,
session: ChatSession,
graph: GraphModel,
graph_credentials: dict[str, CredentialsMetaInput],
inputs: dict[str, Any],
) -> ToolResponseBase:
"""Execute an agent immediately."""
session_id = session.session_id
# Check rate limits
if session.successful_agent_runs.get(graph.id, 0) >= config.max_agent_runs:
return ErrorResponse(
message="Maximum agent runs reached for this session. Please try again later.",
session_id=session_id,
)
# Get or create library agent
library_agent = await get_or_create_library_agent(graph, user_id)
# Execute
# At this point we know the user is ready to run the agent
# So we can execute the agent
execution = await execution_utils.add_graph_execution(
graph_id=library_agent.graph_id,
user_id=user_id,
inputs=inputs,
graph_credentials_inputs=graph_credentials,
graph_credentials_inputs=graph_credentials_inputs,
)
# Track successful run
session.successful_agent_runs[library_agent.graph_id] = (
session.successful_agent_runs.get(library_agent.graph_id, 0) + 1
)
library_agent_link = f"/library/agents/{library_agent.id}"
return ExecutionStartedResponse(
message=(
f"Agent '{library_agent.name}' execution started successfully. "
f"View at {library_agent_link}. "
f"{MSG_DO_NOT_RUN_AGAIN}"
),
message=f"Agent execution successfully started. You can add a link to the agent at: /library/agents/{library_agent.id}. Do not run this tool again unless specifically asked to run the agent again.",
session_id=session_id,
execution_id=execution.id,
graph_id=library_agent.graph_id,
graph_name=library_agent.name,
library_agent_id=library_agent.id,
library_agent_link=library_agent_link,
)
async def _schedule_agent(
self,
user_id: str,
session: ChatSession,
graph: GraphModel,
graph_credentials: dict[str, CredentialsMetaInput],
inputs: dict[str, Any],
schedule_name: str,
cron: str,
timezone: str,
) -> ToolResponseBase:
"""Set up scheduled execution for an agent."""
session_id = session.session_id
# Validate schedule params
if not schedule_name:
return ErrorResponse(
message="schedule_name is required for scheduled execution",
session_id=session_id,
)
if not cron:
return ErrorResponse(
message="cron expression is required for scheduled execution",
session_id=session_id,
)
# Check rate limits
if (
session.successful_agent_schedules.get(graph.id, 0)
>= config.max_agent_schedules
):
return ErrorResponse(
message="Maximum agent schedules reached for this session.",
session_id=session_id,
)
# Get or create library agent
library_agent = await get_or_create_library_agent(graph, user_id)
# Get user timezone
user = await get_user_by_id(user_id)
user_timezone = get_user_timezone_or_utc(user.timezone if user else timezone)
# Create schedule
result = await get_scheduler_client().add_execution_schedule(
user_id=user_id,
graph_id=library_agent.graph_id,
graph_version=library_agent.graph_version,
name=schedule_name,
cron=cron,
input_data=inputs,
input_credentials=graph_credentials,
user_timezone=user_timezone,
)
# Convert next_run_time to user timezone for display
if result.next_run_time:
result.next_run_time = convert_utc_time_to_user_timezone(
result.next_run_time, user_timezone
)
# Track successful schedule
session.successful_agent_schedules[library_agent.graph_id] = (
session.successful_agent_schedules.get(library_agent.graph_id, 0) + 1
)
library_agent_link = f"/library/agents/{library_agent.id}"
return ExecutionStartedResponse(
message=(
f"Agent '{library_agent.name}' scheduled successfully as '{schedule_name}'. "
f"View at {library_agent_link}. "
f"{MSG_DO_NOT_SCHEDULE_AGAIN}"
),
session_id=session_id,
execution_id=result.id,
graph_id=library_agent.graph_id,
graph_name=library_agent.name,
library_agent_id=library_agent.id,
library_agent_link=library_agent_link,
)

View File

@@ -5,7 +5,6 @@ import pytest
from backend.server.v2.chat.tools._test_data import (
make_session,
setup_firecrawl_test_data,
setup_llm_test_data,
setup_test_data,
)
@@ -14,7 +13,6 @@ from backend.server.v2.chat.tools.run_agent import RunAgentTool
# This is so the formatter doesn't remove the fixture imports
setup_llm_test_data = setup_llm_test_data
setup_test_data = setup_test_data
setup_firecrawl_test_data = setup_firecrawl_test_data
@pytest.mark.asyncio(scope="session")
@@ -171,221 +169,3 @@ async def test_run_agent_with_llm_credentials(setup_llm_test_data):
assert result_data["graph_id"] == graph.id
assert "graph_name" in result_data
assert result_data["graph_name"] == "LLM Test Agent"
@pytest.mark.asyncio(scope="session")
async def test_run_agent_shows_available_inputs_when_none_provided(setup_test_data):
"""Test that run_agent returns available inputs when called without inputs or use_defaults."""
user = setup_test_data["user"]
store_submission = setup_test_data["store_submission"]
tool = RunAgentTool()
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
session = make_session(user_id=user.id)
# Execute without inputs and without use_defaults
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
inputs={},
use_defaults=False,
session=session,
)
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Should return agent_details type showing available inputs
assert result_data.get("type") == "agent_details"
assert "agent" in result_data
assert "message" in result_data
# Message should mention inputs
assert "inputs" in result_data["message"].lower()
@pytest.mark.asyncio(scope="session")
async def test_run_agent_with_use_defaults(setup_test_data):
"""Test that run_agent executes successfully with use_defaults=True."""
user = setup_test_data["user"]
graph = setup_test_data["graph"]
store_submission = setup_test_data["store_submission"]
tool = RunAgentTool()
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
session = make_session(user_id=user.id)
# Execute with use_defaults=True (no explicit inputs)
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
inputs={},
use_defaults=True,
session=session,
)
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Should execute successfully
assert "execution_id" in result_data
assert result_data["graph_id"] == graph.id
@pytest.mark.asyncio(scope="session")
async def test_run_agent_missing_credentials(setup_firecrawl_test_data):
"""Test that run_agent returns setup_requirements when credentials are missing."""
user = setup_firecrawl_test_data["user"]
store_submission = setup_firecrawl_test_data["store_submission"]
tool = RunAgentTool()
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
session = make_session(user_id=user.id)
# Execute - user doesn't have firecrawl credentials
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
inputs={"url": "https://example.com"},
session=session,
)
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Should return setup_requirements type with missing credentials
assert result_data.get("type") == "setup_requirements"
assert "setup_info" in result_data
setup_info = result_data["setup_info"]
assert "user_readiness" in setup_info
assert setup_info["user_readiness"]["has_all_credentials"] is False
assert len(setup_info["user_readiness"]["missing_credentials"]) > 0
@pytest.mark.asyncio(scope="session")
async def test_run_agent_invalid_slug_format(setup_test_data):
"""Test that run_agent returns error for invalid slug format (no slash)."""
user = setup_test_data["user"]
tool = RunAgentTool()
session = make_session(user_id=user.id)
# Execute with invalid slug format
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug="no-slash-here",
inputs={},
session=session,
)
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Should return error
assert result_data.get("type") == "error"
assert "username/agent-name" in result_data["message"]
@pytest.mark.asyncio(scope="session")
async def test_run_agent_unauthenticated():
"""Test that run_agent returns need_login for unauthenticated users."""
tool = RunAgentTool()
session = make_session(user_id=None)
# Execute without user_id
response = await tool.execute(
user_id=None,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug="test/test-agent",
inputs={},
session=session,
)
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Base tool returns need_login type for unauthenticated users
assert result_data.get("type") == "need_login"
assert "sign in" in result_data["message"].lower()
@pytest.mark.asyncio(scope="session")
async def test_run_agent_schedule_without_cron(setup_test_data):
"""Test that run_agent returns error when scheduling without cron expression."""
user = setup_test_data["user"]
store_submission = setup_test_data["store_submission"]
tool = RunAgentTool()
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
session = make_session(user_id=user.id)
# Try to schedule without cron
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
inputs={"test_input": "test"},
schedule_name="My Schedule",
cron="", # Empty cron
session=session,
)
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Should return error about missing cron
assert result_data.get("type") == "error"
assert "cron" in result_data["message"].lower()
@pytest.mark.asyncio(scope="session")
async def test_run_agent_schedule_without_name(setup_test_data):
"""Test that run_agent returns error when scheduling without schedule_name."""
user = setup_test_data["user"]
store_submission = setup_test_data["store_submission"]
tool = RunAgentTool()
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
session = make_session(user_id=user.id)
# Try to schedule without schedule_name
response = await tool.execute(
user_id=user.id,
session_id=str(uuid.uuid4()),
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
inputs={"test_input": "test"},
schedule_name="", # Empty name
cron="0 9 * * *",
session=session,
)
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Should return error about missing schedule_name
assert result_data.get("type") == "error"
assert "schedule_name" in result_data["message"].lower()

View File

@@ -0,0 +1,395 @@
"""Tool for setting up an agent with credentials and configuration."""
import logging
from typing import Any
from pydantic import BaseModel
from backend.data.graph import get_graph
from backend.data.model import CredentialsMetaInput
from backend.data.user import get_user_by_id
from backend.integrations.creds_manager import IntegrationCredentialsManager
from backend.server.v2.chat.config import ChatConfig
from backend.server.v2.chat.model import ChatSession
from backend.server.v2.chat.tools.get_required_setup_info import (
GetRequiredSetupInfoTool,
)
from backend.server.v2.chat.tools.models import (
ExecutionStartedResponse,
SetupInfo,
SetupRequirementsResponse,
)
from backend.server.v2.library import db as library_db
from backend.server.v2.library import model as library_model
from backend.util.clients import get_scheduler_client
from backend.util.timezone_utils import (
convert_utc_time_to_user_timezone,
get_user_timezone_or_utc,
)
from .base import BaseTool
from .models import ErrorResponse, ToolResponseBase
config = ChatConfig()
logger = logging.getLogger(__name__)
class AgentDetails(BaseModel):
graph_name: str
graph_id: str
graph_version: int
recommended_schedule_cron: str | None
required_credentials: dict[str, CredentialsMetaInput]
class SetupAgentTool(BaseTool):
"""Tool for setting up an agent with scheduled execution or webhook triggers."""
@property
def name(self) -> str:
return "schedule_agent"
@property
def description(self) -> str:
return """Set up an agent with credentials and configure it for scheduled execution or webhook triggers.
IMPORTANT: Before calling this tool, you MUST first call get_agent_details to determine what inputs are required.
For SCHEDULED execution:
- Cron format: "minute hour day month weekday" (e.g., "0 9 * * 1-5" = 9am weekdays)
- Common patterns: "0 * * * *" (hourly), "0 0 * * *" (daily at midnight), "0 9 * * 1" (Mondays at 9am)
- Timezone: Use IANA timezone names like "America/New_York", "Europe/London", "Asia/Tokyo"
- The 'inputs' parameter must contain ALL required inputs from get_agent_details as a dictionary
For WEBHOOK triggers:
- The agent will be triggered by external events
- Still requires all input values from get_agent_details"""
@property
def parameters(self) -> dict[str, Any]:
return {
"type": "object",
"properties": {
"username_agent_slug": {
"type": "string",
"description": "The marketplace agent slug (e.g., 'username/agent-name')",
},
"setup_type": {
"type": "string",
"enum": ["schedule", "webhook"],
"description": "Type of setup: 'schedule' for cron, 'webhook' for triggers.",
},
"name": {
"type": "string",
"description": "Name for this setup/schedule (e.g., 'Daily Report', 'Weekly Summary')",
},
"description": {
"type": "string",
"description": "Description of this setup",
},
"cron": {
"type": "string",
"description": "Cron expression (5 fields: minute hour day month weekday). Examples: '0 9 * * 1-5' (9am weekdays), '*/30 * * * *' (every 30 min)",
},
"timezone": {
"type": "string",
"description": "IANA timezone (e.g., 'America/New_York', 'Europe/London', 'UTC'). Defaults to UTC if not specified.",
},
"inputs": {
"type": "object",
"description": 'REQUIRED: Dictionary with ALL required inputs from get_agent_details. Format: {"input_name": value}',
"additionalProperties": True,
},
"webhook_config": {
"type": "object",
"description": "Webhook configuration (required if setup_type is 'webhook')",
"additionalProperties": True,
},
},
"required": ["username_agent_slug", "setup_type"],
}
@property
def requires_auth(self) -> bool:
"""This tool requires authentication."""
return True
async def _execute(
self,
user_id: str | None,
session: ChatSession,
**kwargs,
) -> ToolResponseBase:
"""Set up an agent with configuration.
Args:
user_id: Authenticated user ID
session_id: Chat session ID
**kwargs: Setup parameters
Returns:
JSON formatted setup result
"""
assert (
user_id is not None
), "User ID is required to run an agent. Superclass enforces authentication."
session_id = session.session_id
setup_type = kwargs.get("setup_type", "schedule").strip()
if setup_type != "schedule":
return ErrorResponse(
message="Only schedule setup is supported at this time",
session_id=session_id,
)
else:
cron = kwargs.get("cron", "").strip()
cron_name = kwargs.get("name", "").strip()
if not cron or not cron_name:
return ErrorResponse(
message="Cron and name are required for schedule setup",
session_id=session_id,
)
username_agent_slug = kwargs.get("username_agent_slug", "").strip()
inputs = kwargs.get("inputs", {})
library_agent = await self._get_or_add_library_agent(
username_agent_slug, user_id, session, **kwargs
)
if not isinstance(library_agent, AgentDetails):
# library agent is an ErrorResponse
return library_agent
if library_agent and (
session.successful_agent_schedules.get(library_agent.graph_id, 0)
if isinstance(library_agent, AgentDetails)
else 0 >= config.max_agent_schedules
):
return ErrorResponse(
message="Maximum number of agent schedules reached. You can't schedule this agent again in this chat session.",
session_id=session.session_id,
)
# At this point we know the user is ready to run the agent
# Create the schedule for the agent
from backend.server.v2.library import db as library_db
# Get the library agent model for scheduling
lib_agent = await library_db.get_library_agent_by_graph_id(
graph_id=library_agent.graph_id, user_id=user_id
)
if not lib_agent:
return ErrorResponse(
message=f"Library agent not found for graph {library_agent.graph_id}",
session_id=session_id,
)
return await self._add_graph_execution_schedule(
library_agent=lib_agent,
user_id=user_id,
cron=cron,
name=cron_name,
inputs=inputs,
credentials=library_agent.required_credentials,
session=session,
)
async def _add_graph_execution_schedule(
self,
library_agent: library_model.LibraryAgent,
user_id: str,
cron: str,
name: str,
inputs: dict[str, Any],
credentials: dict[str, CredentialsMetaInput],
session: ChatSession,
**kwargs,
) -> ExecutionStartedResponse | ErrorResponse:
# Use timezone from request if provided, otherwise fetch from user profile
user = await get_user_by_id(user_id)
user_timezone = get_user_timezone_or_utc(user.timezone if user else None)
session_id = session.session_id
# Map required credentials (schema field names) to actual user credential IDs
# credentials param contains CredentialsMetaInput with schema field names as keys
# We need to find the user's actual credentials that match the provider/type
creds_manager = IntegrationCredentialsManager()
user_credentials = await creds_manager.store.get_all_creds(user_id)
# Build a mapping from schema field name -> actual credential ID
resolved_credentials: dict[str, CredentialsMetaInput] = {}
missing_credentials: list[str] = []
for field_name, cred_meta in credentials.items():
# Find a matching credential from the user's credentials
matching_cred = next(
(
c
for c in user_credentials
if c.provider == cred_meta.provider and c.type == cred_meta.type
),
None,
)
if matching_cred:
# Use the actual credential ID instead of the schema field name
# Create a new CredentialsMetaInput with the actual credential ID
# but keep the same provider/type from the original meta
resolved_credentials[field_name] = CredentialsMetaInput(
id=matching_cred.id,
provider=cred_meta.provider,
type=cred_meta.type,
title=cred_meta.title,
)
else:
missing_credentials.append(
f"{cred_meta.title} ({cred_meta.provider}/{cred_meta.type})"
)
if missing_credentials:
return ErrorResponse(
message=f"Cannot execute agent: missing {len(missing_credentials)} required credential(s). You need to call the get_required_setup_info tool to setup the credentials.",
session_id=session_id,
)
result = await get_scheduler_client().add_execution_schedule(
user_id=user_id,
graph_id=library_agent.graph_id,
graph_version=library_agent.graph_version,
name=name,
cron=cron,
input_data=inputs,
input_credentials=resolved_credentials,
user_timezone=user_timezone,
)
# Convert the next_run_time back to user timezone for display
if result.next_run_time:
result.next_run_time = convert_utc_time_to_user_timezone(
result.next_run_time, user_timezone
)
session.successful_agent_schedules[library_agent.graph_id] = (
session.successful_agent_schedules.get(library_agent.graph_id, 0) + 1
)
return ExecutionStartedResponse(
message=f"Agent execution successfully scheduled. You can add a link to the agent at: /library/agents/{library_agent.id}. Do not run this tool again unless specifically asked to run the agent again.",
session_id=session_id,
execution_id=result.id,
graph_id=library_agent.graph_id,
graph_name=library_agent.name,
)
async def _get_or_add_library_agent(
self, agent_id: str, user_id: str, session: ChatSession, **kwargs
) -> AgentDetails | ErrorResponse:
# Call _execute directly since we're calling internally from another tool
session_id = session.session_id
response = await GetRequiredSetupInfoTool()._execute(user_id, session, **kwargs)
if not isinstance(response, SetupRequirementsResponse):
return ErrorResponse(
message="Failed to get required setup information",
session_id=session_id,
)
setup_info = SetupInfo.model_validate(response.setup_info)
if not setup_info.user_readiness.ready_to_run:
return ErrorResponse(
message=f"User is not ready to run the agent. User Readiness: {setup_info.user_readiness.model_dump_json()} Requirments: {setup_info.requirements}",
session_id=session_id,
)
# Get the graph using the graph_id and graph_version from the setup response
if not response.graph_id or not response.graph_version:
return ErrorResponse(
message=f"Graph information not available for {agent_id}",
session_id=session_id,
)
graph = await get_graph(
graph_id=response.graph_id,
version=response.graph_version,
user_id=None, # Public access for store graphs
include_subgraphs=True,
)
if not graph:
return ErrorResponse(
message=f"Graph {agent_id} ({response.graph_id}v{response.graph_version}) not found",
session_id=session_id,
)
recommended_schedule_cron = graph.recommended_schedule_cron
# Extract credentials from the JSON schema properties
credentials_input_schema = graph.credentials_input_schema
required_credentials: dict[str, CredentialsMetaInput] = {}
if (
isinstance(credentials_input_schema, dict)
and "properties" in credentials_input_schema
):
for cred_name, cred_schema in credentials_input_schema[
"properties"
].items():
# Get provider from credentials_provider array or properties.provider.const
provider = "unknown"
if (
"credentials_provider" in cred_schema
and cred_schema["credentials_provider"]
):
provider = cred_schema["credentials_provider"][0]
elif (
"properties" in cred_schema
and "provider" in cred_schema["properties"]
):
provider = cred_schema["properties"]["provider"].get(
"const", "unknown"
)
# Get type from credentials_types array or properties.type.const
cred_type = "api_key" # Default
if (
"credentials_types" in cred_schema
and cred_schema["credentials_types"]
):
cred_type = cred_schema["credentials_types"][0]
elif (
"properties" in cred_schema and "type" in cred_schema["properties"]
):
cred_type = cred_schema["properties"]["type"].get(
"const", "api_key"
)
required_credentials[cred_name] = CredentialsMetaInput(
id=cred_name,
title=cred_schema.get("title", cred_name),
provider=provider, # type: ignore
type=cred_type,
)
# Check if we already have a library agent for this graph
existing_library_agent = await library_db.get_library_agent_by_graph_id(
graph_id=graph.id, user_id=user_id
)
if not existing_library_agent:
# Now we need to add the graph to the users library
library_agents: list[library_model.LibraryAgent] = (
await library_db.create_library_agent(
graph=graph,
user_id=user_id,
create_library_agents_for_sub_graphs=False,
)
)
assert len(library_agents) == 1, "Expected 1 library agent to be created"
library_agent = library_agents[0]
else:
library_agent = existing_library_agent
return AgentDetails(
graph_name=graph.name,
graph_id=library_agent.graph_id,
graph_version=library_agent.graph_version,
recommended_schedule_cron=recommended_schedule_cron,
required_credentials=required_credentials,
)

View File

@@ -0,0 +1,422 @@
import uuid
import orjson
import pytest
from backend.server.v2.chat.tools._test_data import (
make_session,
setup_llm_test_data,
setup_test_data,
)
from backend.server.v2.chat.tools.setup_agent import SetupAgentTool
from backend.util.clients import get_scheduler_client
# This is so the formatter doesn't remove the fixture imports
setup_llm_test_data = setup_llm_test_data
setup_test_data = setup_test_data
@pytest.mark.asyncio(scope="session")
async def test_setup_agent_missing_cron(setup_test_data):
"""Test error when cron is missing for schedule setup"""
# Use test data from fixture
user = setup_test_data["user"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = SetupAgentTool()
# Build the session
session = make_session(user_id=user.id)
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute without cron
response = await tool.execute(
user_id=user.id,
session=session,
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
setup_type="schedule",
inputs={"test_input": "Hello World"},
# Missing: cron and name
)
# Verify error response
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "message" in result_data
assert (
"cron" in result_data["message"].lower()
or "name" in result_data["message"].lower()
)
@pytest.mark.asyncio(scope="session")
async def test_setup_agent_webhook_not_supported(setup_test_data):
"""Test error when webhook setup is attempted"""
# Use test data from fixture
user = setup_test_data["user"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = SetupAgentTool()
# Build the session
session = make_session(user_id=user.id)
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute with webhook setup_type
response = await tool.execute(
user_id=user.id,
session=session,
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
setup_type="webhook",
inputs={"test_input": "Hello World"},
)
# Verify error response
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "message" in result_data
message_lower = result_data["message"].lower()
assert "schedule" in message_lower and "supported" in message_lower
@pytest.mark.asyncio(scope="session")
@pytest.mark.skip(reason="Requires scheduler service to be running")
async def test_setup_agent_schedule_success(setup_test_data):
"""Test successfully setting up an agent with a schedule"""
# Use test data from fixture
user = setup_test_data["user"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = SetupAgentTool()
# Build the session
session = make_session(user_id=user.id)
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute with schedule setup
response = await tool.execute(
user_id=user.id,
session=session,
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
setup_type="schedule",
name="Test Schedule",
description="Test schedule description",
cron="0 9 * * *", # Daily at 9am
timezone="UTC",
inputs={"test_input": "Hello World"},
)
# Verify the response
assert response is not None
assert hasattr(response, "result")
# Parse the result JSON
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Check for execution started
assert "message" in result_data
assert "execution_id" in result_data
assert "graph_id" in result_data
assert "graph_name" in result_data
@pytest.mark.asyncio(scope="session")
@pytest.mark.skip(reason="Requires scheduler service to be running")
async def test_setup_agent_with_credentials(setup_llm_test_data):
"""Test setting up an agent that requires credentials"""
# Use test data from fixture (includes OpenAI credentials)
user = setup_llm_test_data["user"]
store_submission = setup_llm_test_data["store_submission"]
# Create the tool instance
tool = SetupAgentTool()
# Build the session
session = make_session(user_id=user.id)
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute with schedule setup
response = await tool.execute(
user_id=user.id,
session=session,
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
setup_type="schedule",
name="LLM Schedule",
description="LLM schedule with credentials",
cron="*/30 * * * *", # Every 30 minutes
timezone="America/New_York",
inputs={"user_prompt": "What is 2+2?"},
)
# Verify the response
assert response is not None
assert hasattr(response, "result")
# Parse the result JSON
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
# Should succeed since user has OpenAI credentials
assert "execution_id" in result_data
assert "graph_id" in result_data
@pytest.mark.asyncio(scope="session")
async def test_setup_agent_invalid_agent(setup_test_data):
"""Test error when agent doesn't exist"""
# Use test data from fixture
user = setup_test_data["user"]
# Create the tool instance
tool = SetupAgentTool()
# Build the session
session = make_session(user_id=user.id)
# Execute with non-existent agent
response = await tool.execute(
user_id=user.id,
session=session,
tool_call_id=str(uuid.uuid4()),
username_agent_slug="nonexistent/agent",
setup_type="schedule",
name="Test Schedule",
cron="0 9 * * *",
inputs={},
)
# Verify error response
assert response is not None
assert hasattr(response, "result")
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "message" in result_data
# Should fail to find the agent
assert any(
phrase in result_data["message"].lower()
for phrase in ["not found", "failed", "error"]
)
@pytest.mark.asyncio(scope="session")
@pytest.mark.skip(reason="Requires scheduler service to be running")
async def test_setup_agent_schedule_created_in_scheduler(setup_test_data):
"""Test that the schedule is actually created in the scheduler service"""
# Use test data from fixture
user = setup_test_data["user"]
graph = setup_test_data["graph"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = SetupAgentTool()
# Build the session
session = make_session(user_id=user.id)
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Create a unique schedule name to identify this test
schedule_name = f"Test Schedule {uuid.uuid4()}"
# Execute with schedule setup
response = await tool.execute(
user_id=user.id,
session=session,
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
setup_type="schedule",
name=schedule_name,
description="Test schedule to verify credentials",
cron="0 0 * * *", # Daily at midnight
timezone="UTC",
inputs={"test_input": "Scheduled execution"},
)
# Verify the response
assert response is not None
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "execution_id" in result_data
# Now verify the schedule was created in the scheduler service
scheduler = get_scheduler_client()
schedules = await scheduler.get_execution_schedules(graph.id, user.id)
# Find our schedule
our_schedule = None
for schedule in schedules:
if schedule.name == schedule_name:
our_schedule = schedule
break
assert (
our_schedule is not None
), f"Schedule '{schedule_name}' not found in scheduler"
assert our_schedule.cron == "0 0 * * *"
assert our_schedule.graph_id == graph.id
# Clean up: delete the schedule
await scheduler.delete_schedule(our_schedule.id, user_id=user.id)
@pytest.mark.asyncio(scope="session")
@pytest.mark.skip(reason="Requires scheduler service to be running")
async def test_setup_agent_schedule_with_credentials_triggered(setup_llm_test_data):
"""Test that credentials are properly passed when a schedule is triggered"""
# Use test data from fixture (includes OpenAI credentials)
user = setup_llm_test_data["user"]
graph = setup_llm_test_data["graph"]
store_submission = setup_llm_test_data["store_submission"]
# Create the tool instance
tool = SetupAgentTool()
# Build the session
session = make_session(user_id=user.id)
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Create a unique schedule name
schedule_name = f"LLM Test Schedule {uuid.uuid4()}"
# Execute with schedule setup
response = await tool.execute(
user_id=user.id,
session=session,
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
setup_type="schedule",
name=schedule_name,
description="Test LLM schedule with credentials",
cron="* * * * *", # Every minute (for testing)
timezone="UTC",
inputs={"user_prompt": "Test prompt for credentials"},
)
# Verify the response
assert response is not None
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "execution_id" in result_data
# Get the schedule from the scheduler
scheduler = get_scheduler_client()
schedules = await scheduler.get_execution_schedules(graph.id, user.id)
# Find our schedule
our_schedule = None
for schedule in schedules:
if schedule.name == schedule_name:
our_schedule = schedule
break
assert our_schedule is not None, f"Schedule '{schedule_name}' not found"
# Verify the schedule has the correct input data
assert our_schedule.input_data is not None
assert "user_prompt" in our_schedule.input_data
assert our_schedule.input_data["user_prompt"] == "Test prompt for credentials"
# Verify credentials are stored in the schedule
# The credentials should be stored as input_credentials
assert our_schedule.input_credentials is not None
# The credentials should contain the OpenAI provider credential
# Note: The exact structure depends on how credentials are serialized
# We're checking that credentials data exists and has the right provider
if our_schedule.input_credentials:
# Convert to dict if needed
creds_dict = (
our_schedule.input_credentials
if isinstance(our_schedule.input_credentials, dict)
else {}
)
# Check if any credential has openai provider
has_openai_cred = False
for cred_key, cred_value in creds_dict.items():
if isinstance(cred_value, dict):
if cred_value.get("provider") == "openai":
has_openai_cred = True
# Verify the credential has the expected structure
assert "id" in cred_value or "api_key" in cred_value
break
# If we have LLM block, we should have stored credentials
assert has_openai_cred, "OpenAI credentials not found in schedule"
# Clean up: delete the schedule
await scheduler.delete_schedule(our_schedule.id, user_id=user.id)
@pytest.mark.asyncio(scope="session")
@pytest.mark.skip(reason="Requires scheduler service to be running")
async def test_setup_agent_creates_library_agent(setup_test_data):
"""Test that setup creates a library agent for the user"""
# Use test data from fixture
user = setup_test_data["user"]
graph = setup_test_data["graph"]
store_submission = setup_test_data["store_submission"]
# Create the tool instance
tool = SetupAgentTool()
# Build the session
session = make_session(user_id=user.id)
# Build the proper marketplace agent_id format
agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}"
# Execute with schedule setup
response = await tool.execute(
user_id=user.id,
session=session,
tool_call_id=str(uuid.uuid4()),
username_agent_slug=agent_marketplace_id,
setup_type="schedule",
name="Library Test Schedule",
cron="0 12 * * *", # Daily at noon
inputs={"test_input": "Library test"},
)
# Verify the response
assert response is not None
assert isinstance(response.result, str)
result_data = orjson.loads(response.result)
assert "graph_id" in result_data
assert result_data["graph_id"] == graph.id
# Verify library agent was created
from backend.server.v2.library import db as library_db
library_agent = await library_db.get_library_agent_by_graph_id(
graph_id=graph.id, user_id=user.id
)
assert library_agent is not None
assert library_agent.graph_id == graph.id
assert library_agent.name == "Test Agent"

View File

@@ -1,288 +0,0 @@
"""Shared utilities for chat tools."""
import logging
from typing import Any
from backend.data import graph as graph_db
from backend.data.graph import GraphModel
from backend.data.model import CredentialsMetaInput
from backend.integrations.creds_manager import IntegrationCredentialsManager
from backend.server.v2.library import db as library_db
from backend.server.v2.library import model as library_model
from backend.server.v2.store import db as store_db
from backend.util.exceptions import NotFoundError
logger = logging.getLogger(__name__)
async def fetch_graph_from_store_slug(
username: str,
agent_name: str,
) -> tuple[GraphModel | None, Any | None]:
"""
Fetch graph from store by username/agent_name slug.
Args:
username: Creator's username
agent_name: Agent name/slug
Returns:
tuple[Graph | None, StoreAgentDetails | None]: The graph and store agent details,
or (None, None) if not found.
Raises:
DatabaseError: If there's a database error during lookup.
"""
try:
store_agent = await store_db.get_store_agent_details(username, agent_name)
except NotFoundError:
return None, None
# Get the graph from store listing version
graph_meta = await store_db.get_available_graph(
store_agent.store_listing_version_id
)
graph = await graph_db.get_graph(
graph_id=graph_meta.id,
version=graph_meta.version,
user_id=None, # Public access
include_subgraphs=True,
)
return graph, store_agent
def extract_credentials_from_schema(
credentials_input_schema: dict[str, Any] | None,
) -> list[CredentialsMetaInput]:
"""
Extract credential requirements from graph's credentials_input_schema.
This consolidates duplicated logic from get_agent_details.py and setup_agent.py.
Args:
credentials_input_schema: The credentials_input_schema from a Graph object
Returns:
List of CredentialsMetaInput with provider and type info
"""
credentials: list[CredentialsMetaInput] = []
if (
not isinstance(credentials_input_schema, dict)
or "properties" not in credentials_input_schema
):
return credentials
for cred_name, cred_schema in credentials_input_schema["properties"].items():
provider = _extract_provider_from_schema(cred_schema)
cred_type = _extract_credential_type_from_schema(cred_schema)
credentials.append(
CredentialsMetaInput(
id=cred_name,
title=cred_schema.get("title", cred_name),
provider=provider, # type: ignore
type=cred_type, # type: ignore
)
)
return credentials
def extract_credentials_as_dict(
credentials_input_schema: dict[str, Any] | None,
) -> dict[str, CredentialsMetaInput]:
"""
Extract credential requirements as a dict keyed by field name.
Args:
credentials_input_schema: The credentials_input_schema from a Graph object
Returns:
Dict mapping field name to CredentialsMetaInput
"""
credentials: dict[str, CredentialsMetaInput] = {}
if (
not isinstance(credentials_input_schema, dict)
or "properties" not in credentials_input_schema
):
return credentials
for cred_name, cred_schema in credentials_input_schema["properties"].items():
provider = _extract_provider_from_schema(cred_schema)
cred_type = _extract_credential_type_from_schema(cred_schema)
credentials[cred_name] = CredentialsMetaInput(
id=cred_name,
title=cred_schema.get("title", cred_name),
provider=provider, # type: ignore
type=cred_type, # type: ignore
)
return credentials
def _extract_provider_from_schema(cred_schema: dict[str, Any]) -> str:
"""Extract provider from credential schema."""
if "credentials_provider" in cred_schema and cred_schema["credentials_provider"]:
return cred_schema["credentials_provider"][0]
if "properties" in cred_schema and "provider" in cred_schema["properties"]:
return cred_schema["properties"]["provider"].get("const", "unknown")
return "unknown"
def _extract_credential_type_from_schema(cred_schema: dict[str, Any]) -> str:
"""Extract credential type from credential schema."""
if "credentials_types" in cred_schema and cred_schema["credentials_types"]:
return cred_schema["credentials_types"][0]
if "properties" in cred_schema and "type" in cred_schema["properties"]:
return cred_schema["properties"]["type"].get("const", "api_key")
return "api_key"
async def get_or_create_library_agent(
graph: GraphModel,
user_id: str,
) -> library_model.LibraryAgent:
"""
Get existing library agent or create new one.
This consolidates duplicated logic from run_agent.py and setup_agent.py.
Args:
graph: The Graph to add to library
user_id: The user's ID
Returns:
LibraryAgent instance
"""
existing = await library_db.get_library_agent_by_graph_id(
graph_id=graph.id, user_id=user_id
)
if existing:
return existing
library_agents = await library_db.create_library_agent(
graph=graph,
user_id=user_id,
create_library_agents_for_sub_graphs=False,
)
assert len(library_agents) == 1, "Expected 1 library agent to be created"
return library_agents[0]
async def match_user_credentials_to_graph(
user_id: str,
graph: GraphModel,
) -> tuple[dict[str, CredentialsMetaInput], list[str]]:
"""
Match user's available credentials against graph's required credentials.
Uses graph.aggregate_credentials_inputs() which handles credentials from
multiple nodes and uses frozensets for provider matching.
Args:
user_id: The user's ID
graph: The Graph with credential requirements
Returns:
tuple[matched_credentials dict, missing_credential_descriptions list]
"""
graph_credentials_inputs: dict[str, CredentialsMetaInput] = {}
missing_creds: list[str] = []
# Get aggregated credentials requirements from the graph
aggregated_creds = graph.aggregate_credentials_inputs()
logger.debug(
f"Matching credentials for graph {graph.id}: {len(aggregated_creds)} required"
)
if not aggregated_creds:
return graph_credentials_inputs, missing_creds
# Get all available credentials for the user
creds_manager = IntegrationCredentialsManager()
available_creds = await creds_manager.store.get_all_creds(user_id)
# For each required credential field, find a matching user credential
# field_info.provider is a frozenset because aggregate_credentials_inputs()
# combines requirements from multiple nodes. A credential matches if its
# provider is in the set of acceptable providers.
for credential_field_name, (
credential_requirements,
_node_fields,
) in aggregated_creds.items():
# Find first matching credential by provider and type
matching_cred = next(
(
cred
for cred in available_creds
if cred.provider in credential_requirements.provider
and cred.type in credential_requirements.supported_types
),
None,
)
if matching_cred:
try:
graph_credentials_inputs[credential_field_name] = CredentialsMetaInput(
id=matching_cred.id,
provider=matching_cred.provider, # type: ignore
type=matching_cred.type,
title=matching_cred.title,
)
except Exception as e:
logger.error(
f"Failed to create CredentialsMetaInput for field '{credential_field_name}': "
f"provider={matching_cred.provider}, type={matching_cred.type}, "
f"credential_id={matching_cred.id}",
exc_info=True,
)
missing_creds.append(
f"{credential_field_name} (validation failed: {e})"
)
else:
missing_creds.append(
f"{credential_field_name} "
f"(requires provider in {list(credential_requirements.provider)}, "
f"type in {list(credential_requirements.supported_types)})"
)
logger.info(
f"Credential matching complete: {len(graph_credentials_inputs)}/{len(aggregated_creds)} matched"
)
return graph_credentials_inputs, missing_creds
async def check_user_has_required_credentials(
user_id: str,
required_credentials: list[CredentialsMetaInput],
) -> list[CredentialsMetaInput]:
"""
Check which required credentials the user is missing.
Args:
user_id: The user's ID
required_credentials: List of required credentials
Returns:
List of missing credentials (empty if user has all)
"""
if not required_credentials:
return []
creds_manager = IntegrationCredentialsManager()
available_creds = await creds_manager.store.get_all_creds(user_id)
missing: list[CredentialsMetaInput] = []
for required in required_credentials:
has_matching = any(
cred.provider == required.provider and cred.type == required.type
for cred in available_creds
)
if not has_matching:
missing.append(required)
return missing

View File

@@ -1,204 +0,0 @@
import json
from datetime import datetime
from typing import TYPE_CHECKING, Any, Dict, List, Union
from prisma.enums import ReviewStatus
from pydantic import BaseModel, Field, field_validator, model_validator
if TYPE_CHECKING:
from prisma.models import PendingHumanReview
# SafeJson-compatible type alias for review data
SafeJsonData = Union[Dict[str, Any], List[Any], str, int, float, bool, None]
class PendingHumanReviewModel(BaseModel):
"""Response model for pending human review data.
Represents a human review request that is awaiting user action.
Contains all necessary information for a user to review and approve
or reject data from a Human-in-the-Loop block execution.
Attributes:
id: Unique identifier for the review record
user_id: ID of the user who must perform the review
node_exec_id: ID of the node execution that created this review
graph_exec_id: ID of the graph execution containing the node
graph_id: ID of the graph template being executed
graph_version: Version number of the graph template
payload: The actual data payload awaiting review
instructions: Instructions or message for the reviewer
editable: Whether the reviewer can edit the data
status: Current review status (WAITING, APPROVED, or REJECTED)
review_message: Optional message from the reviewer
created_at: Timestamp when review was created
updated_at: Timestamp when review was last modified
reviewed_at: Timestamp when review was completed (if applicable)
"""
node_exec_id: str = Field(description="Node execution ID (primary key)")
user_id: str = Field(description="User ID associated with the review")
graph_exec_id: str = Field(description="Graph execution ID")
graph_id: str = Field(description="Graph ID")
graph_version: int = Field(description="Graph version")
payload: SafeJsonData = Field(description="The actual data payload awaiting review")
instructions: str | None = Field(
description="Instructions or message for the reviewer", default=None
)
editable: bool = Field(description="Whether the reviewer can edit the data")
status: ReviewStatus = Field(description="Review status")
review_message: str | None = Field(
description="Optional message from the reviewer", default=None
)
was_edited: bool | None = Field(
description="Whether the data was modified during review", default=None
)
processed: bool = Field(
description="Whether the review result has been processed by the execution engine",
default=False,
)
created_at: datetime = Field(description="When the review was created")
updated_at: datetime | None = Field(
description="When the review was last updated", default=None
)
reviewed_at: datetime | None = Field(
description="When the review was completed", default=None
)
@classmethod
def from_db(cls, review: "PendingHumanReview") -> "PendingHumanReviewModel":
"""
Convert a database model to a response model.
Uses the new flat database structure with separate columns for
payload, instructions, and editable flag.
Handles invalid data gracefully by using safe defaults.
"""
return cls(
node_exec_id=review.nodeExecId,
user_id=review.userId,
graph_exec_id=review.graphExecId,
graph_id=review.graphId,
graph_version=review.graphVersion,
payload=review.payload,
instructions=review.instructions,
editable=review.editable,
status=review.status,
review_message=review.reviewMessage,
was_edited=review.wasEdited,
processed=review.processed,
created_at=review.createdAt,
updated_at=review.updatedAt,
reviewed_at=review.reviewedAt,
)
class ReviewItem(BaseModel):
"""Single review item for processing."""
node_exec_id: str = Field(description="Node execution ID to review")
approved: bool = Field(
description="Whether this review is approved (True) or rejected (False)"
)
message: str | None = Field(
None, description="Optional review message", max_length=2000
)
reviewed_data: SafeJsonData | None = Field(
None, description="Optional edited data (ignored if approved=False)"
)
@field_validator("reviewed_data")
@classmethod
def validate_reviewed_data(cls, v):
"""Validate that reviewed_data is safe and properly structured."""
if v is None:
return v
# Validate SafeJson compatibility
def validate_safejson_type(obj):
"""Ensure object only contains SafeJson compatible types."""
if obj is None:
return True
elif isinstance(obj, (str, int, float, bool)):
return True
elif isinstance(obj, dict):
return all(
isinstance(k, str) and validate_safejson_type(v)
for k, v in obj.items()
)
elif isinstance(obj, list):
return all(validate_safejson_type(item) for item in obj)
else:
return False
if not validate_safejson_type(v):
raise ValueError("reviewed_data contains non-SafeJson compatible types")
# Validate data size to prevent DoS attacks
try:
json_str = json.dumps(v)
if len(json_str) > 1000000: # 1MB limit
raise ValueError("reviewed_data is too large (max 1MB)")
except (TypeError, ValueError) as e:
raise ValueError(f"reviewed_data must be JSON serializable: {str(e)}")
# Ensure no dangerous nested structures (prevent infinite recursion)
def check_depth(obj, max_depth=10, current_depth=0):
"""Recursively check object nesting depth to prevent stack overflow attacks."""
if current_depth > max_depth:
raise ValueError("reviewed_data has excessive nesting depth")
if isinstance(obj, dict):
for value in obj.values():
check_depth(value, max_depth, current_depth + 1)
elif isinstance(obj, list):
for item in obj:
check_depth(item, max_depth, current_depth + 1)
check_depth(v)
return v
@field_validator("message")
@classmethod
def validate_message(cls, v):
"""Validate and sanitize review message."""
if v is not None and len(v.strip()) == 0:
return None
return v
class ReviewRequest(BaseModel):
"""Request model for processing ALL pending reviews for an execution.
This request must include ALL pending reviews for a graph execution.
Each review will be either approved (with optional data modifications)
or rejected (data ignored). The execution will resume only after ALL reviews are processed.
"""
reviews: List[ReviewItem] = Field(
description="All reviews with their approval status, data, and messages"
)
@model_validator(mode="after")
def validate_review_completeness(self):
"""Validate that we have at least one review to process and no duplicates."""
if not self.reviews:
raise ValueError("At least one review must be provided")
# Ensure no duplicate node_exec_ids
node_ids = [review.node_exec_id for review in self.reviews]
if len(node_ids) != len(set(node_ids)):
duplicates = [nid for nid in set(node_ids) if node_ids.count(nid) > 1]
raise ValueError(f"Duplicate review IDs found: {', '.join(duplicates)}")
return self
class ReviewResponse(BaseModel):
"""Response from review endpoint."""
approved_count: int = Field(description="Number of reviews successfully approved")
rejected_count: int = Field(description="Number of reviews successfully rejected")
failed_count: int = Field(description="Number of reviews that failed processing")
error: str | None = Field(None, description="Error message if operation failed")

View File

@@ -1,492 +0,0 @@
import datetime
import fastapi
import fastapi.testclient
import pytest
import pytest_mock
from prisma.enums import ReviewStatus
from pytest_snapshot.plugin import Snapshot
from backend.server.rest_api import handle_internal_http_error
from backend.server.v2.executions.review.model import PendingHumanReviewModel
from backend.server.v2.executions.review.routes import router
# Using a fixed timestamp for reproducible tests
FIXED_NOW = datetime.datetime(2023, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
app = fastapi.FastAPI()
app.include_router(router, prefix="/api/review")
app.add_exception_handler(ValueError, handle_internal_http_error(400))
client = fastapi.testclient.TestClient(app)
@pytest.fixture(autouse=True)
def setup_app_auth(mock_jwt_user):
"""Setup auth overrides for all tests in this module"""
from autogpt_libs.auth.jwt_utils import get_jwt_payload
app.dependency_overrides[get_jwt_payload] = mock_jwt_user["get_jwt_payload"]
yield
app.dependency_overrides.clear()
@pytest.fixture
def sample_pending_review(test_user_id: str) -> PendingHumanReviewModel:
"""Create a sample pending review for testing"""
return PendingHumanReviewModel(
node_exec_id="test_node_123",
user_id=test_user_id,
graph_exec_id="test_graph_exec_456",
graph_id="test_graph_789",
graph_version=1,
payload={"data": "test payload", "value": 42},
instructions="Please review this data",
editable=True,
status=ReviewStatus.WAITING,
review_message=None,
was_edited=None,
processed=False,
created_at=FIXED_NOW,
updated_at=None,
reviewed_at=None,
)
def test_get_pending_reviews_empty(
mocker: pytest_mock.MockFixture,
snapshot: Snapshot,
test_user_id: str,
) -> None:
"""Test getting pending reviews when none exist"""
mock_get_reviews = mocker.patch(
"backend.server.v2.executions.review.routes.get_pending_reviews_for_user"
)
mock_get_reviews.return_value = []
response = client.get("/api/review/pending")
assert response.status_code == 200
assert response.json() == []
mock_get_reviews.assert_called_once_with(test_user_id, 1, 25)
def test_get_pending_reviews_with_data(
mocker: pytest_mock.MockFixture,
sample_pending_review: PendingHumanReviewModel,
snapshot: Snapshot,
test_user_id: str,
) -> None:
"""Test getting pending reviews with data"""
mock_get_reviews = mocker.patch(
"backend.server.v2.executions.review.routes.get_pending_reviews_for_user"
)
mock_get_reviews.return_value = [sample_pending_review]
response = client.get("/api/review/pending?page=2&page_size=10")
assert response.status_code == 200
data = response.json()
assert len(data) == 1
assert data[0]["node_exec_id"] == "test_node_123"
assert data[0]["status"] == "WAITING"
mock_get_reviews.assert_called_once_with(test_user_id, 2, 10)
def test_get_pending_reviews_for_execution_success(
mocker: pytest_mock.MockFixture,
sample_pending_review: PendingHumanReviewModel,
snapshot: Snapshot,
test_user_id: str,
) -> None:
"""Test getting pending reviews for specific execution"""
mock_get_graph_execution = mocker.patch(
"backend.server.v2.executions.review.routes.get_graph_execution_meta"
)
mock_get_graph_execution.return_value = {
"id": "test_graph_exec_456",
"user_id": test_user_id,
}
mock_get_reviews = mocker.patch(
"backend.server.v2.executions.review.routes.get_pending_reviews_for_execution"
)
mock_get_reviews.return_value = [sample_pending_review]
response = client.get("/api/review/execution/test_graph_exec_456")
assert response.status_code == 200
data = response.json()
assert len(data) == 1
assert data[0]["graph_exec_id"] == "test_graph_exec_456"
def test_get_pending_reviews_for_execution_access_denied(
mocker: pytest_mock.MockFixture,
test_user_id: str,
) -> None:
"""Test access denied when user doesn't own the execution"""
mock_get_graph_execution = mocker.patch(
"backend.server.v2.executions.review.routes.get_graph_execution_meta"
)
mock_get_graph_execution.return_value = None
response = client.get("/api/review/execution/test_graph_exec_456")
assert response.status_code == 403
assert "Access denied" in response.json()["detail"]
def test_process_review_action_approve_success(
mocker: pytest_mock.MockFixture,
sample_pending_review: PendingHumanReviewModel,
test_user_id: str,
) -> None:
"""Test successful review approval"""
# Mock the route functions
mock_get_reviews_for_execution = mocker.patch(
"backend.server.v2.executions.review.routes.get_pending_reviews_for_execution"
)
mock_get_reviews_for_execution.return_value = [sample_pending_review]
mock_process_all_reviews = mocker.patch(
"backend.server.v2.executions.review.routes.process_all_reviews_for_execution"
)
# Create approved review for return
approved_review = PendingHumanReviewModel(
node_exec_id="test_node_123",
user_id=test_user_id,
graph_exec_id="test_graph_exec_456",
graph_id="test_graph_789",
graph_version=1,
payload={"data": "modified payload", "value": 50},
instructions="Please review this data",
editable=True,
status=ReviewStatus.APPROVED,
review_message="Looks good",
was_edited=True,
processed=False,
created_at=FIXED_NOW,
updated_at=FIXED_NOW,
reviewed_at=FIXED_NOW,
)
mock_process_all_reviews.return_value = {"test_node_123": approved_review}
mock_has_pending = mocker.patch(
"backend.server.v2.executions.review.routes.has_pending_reviews_for_graph_exec"
)
mock_has_pending.return_value = False
mocker.patch("backend.server.v2.executions.review.routes.add_graph_execution")
request_data = {
"reviews": [
{
"node_exec_id": "test_node_123",
"approved": True,
"message": "Looks good",
"reviewed_data": {"data": "modified payload", "value": 50},
}
]
}
response = client.post("/api/review/action", json=request_data)
assert response.status_code == 200
data = response.json()
assert data["approved_count"] == 1
assert data["rejected_count"] == 0
assert data["failed_count"] == 0
assert data["error"] is None
def test_process_review_action_reject_success(
mocker: pytest_mock.MockFixture,
sample_pending_review: PendingHumanReviewModel,
test_user_id: str,
) -> None:
"""Test successful review rejection"""
# Mock the route functions
mock_get_reviews_for_execution = mocker.patch(
"backend.server.v2.executions.review.routes.get_pending_reviews_for_execution"
)
mock_get_reviews_for_execution.return_value = [sample_pending_review]
mock_process_all_reviews = mocker.patch(
"backend.server.v2.executions.review.routes.process_all_reviews_for_execution"
)
rejected_review = PendingHumanReviewModel(
node_exec_id="test_node_123",
user_id=test_user_id,
graph_exec_id="test_graph_exec_456",
graph_id="test_graph_789",
graph_version=1,
payload={"data": "test payload"},
instructions="Please review",
editable=True,
status=ReviewStatus.REJECTED,
review_message="Rejected by user",
was_edited=False,
processed=False,
created_at=FIXED_NOW,
updated_at=None,
reviewed_at=FIXED_NOW,
)
mock_process_all_reviews.return_value = {"test_node_123": rejected_review}
mock_has_pending = mocker.patch(
"backend.server.v2.executions.review.routes.has_pending_reviews_for_graph_exec"
)
mock_has_pending.return_value = False
request_data = {
"reviews": [
{
"node_exec_id": "test_node_123",
"approved": False,
"message": None,
}
]
}
response = client.post("/api/review/action", json=request_data)
assert response.status_code == 200
data = response.json()
assert data["approved_count"] == 0
assert data["rejected_count"] == 1
assert data["failed_count"] == 0
assert data["error"] is None
def test_process_review_action_mixed_success(
mocker: pytest_mock.MockFixture,
sample_pending_review: PendingHumanReviewModel,
test_user_id: str,
) -> None:
"""Test mixed approve/reject operations"""
# Create a second review
second_review = PendingHumanReviewModel(
node_exec_id="test_node_456",
user_id=test_user_id,
graph_exec_id="test_graph_exec_456",
graph_id="test_graph_789",
graph_version=1,
payload={"data": "second payload"},
instructions="Second review",
editable=False,
status=ReviewStatus.WAITING,
review_message=None,
was_edited=None,
processed=False,
created_at=FIXED_NOW,
updated_at=None,
reviewed_at=None,
)
# Mock the route functions
mock_get_reviews_for_execution = mocker.patch(
"backend.server.v2.executions.review.routes.get_pending_reviews_for_execution"
)
mock_get_reviews_for_execution.return_value = [sample_pending_review, second_review]
mock_process_all_reviews = mocker.patch(
"backend.server.v2.executions.review.routes.process_all_reviews_for_execution"
)
# Create approved version of first review
approved_review = PendingHumanReviewModel(
node_exec_id="test_node_123",
user_id=test_user_id,
graph_exec_id="test_graph_exec_456",
graph_id="test_graph_789",
graph_version=1,
payload={"data": "modified"},
instructions="Please review",
editable=True,
status=ReviewStatus.APPROVED,
review_message="Approved",
was_edited=True,
processed=False,
created_at=FIXED_NOW,
updated_at=None,
reviewed_at=FIXED_NOW,
)
# Create rejected version of second review
rejected_review = PendingHumanReviewModel(
node_exec_id="test_node_456",
user_id=test_user_id,
graph_exec_id="test_graph_exec_456",
graph_id="test_graph_789",
graph_version=1,
payload={"data": "second payload"},
instructions="Second review",
editable=False,
status=ReviewStatus.REJECTED,
review_message="Rejected by user",
was_edited=False,
processed=False,
created_at=FIXED_NOW,
updated_at=None,
reviewed_at=FIXED_NOW,
)
mock_process_all_reviews.return_value = {
"test_node_123": approved_review,
"test_node_456": rejected_review,
}
mock_has_pending = mocker.patch(
"backend.server.v2.executions.review.routes.has_pending_reviews_for_graph_exec"
)
mock_has_pending.return_value = False
request_data = {
"reviews": [
{
"node_exec_id": "test_node_123",
"approved": True,
"message": "Approved",
"reviewed_data": {"data": "modified"},
},
{
"node_exec_id": "test_node_456",
"approved": False,
"message": None,
},
]
}
response = client.post("/api/review/action", json=request_data)
assert response.status_code == 200
data = response.json()
assert data["approved_count"] == 1
assert data["rejected_count"] == 1
assert data["failed_count"] == 0
assert data["error"] is None
def test_process_review_action_empty_request(
mocker: pytest_mock.MockFixture,
test_user_id: str,
) -> None:
"""Test error when no reviews provided"""
request_data = {"reviews": []}
response = client.post("/api/review/action", json=request_data)
assert response.status_code == 422
response_data = response.json()
# Pydantic validation error format
assert isinstance(response_data["detail"], list)
assert len(response_data["detail"]) > 0
assert "At least one review must be provided" in response_data["detail"][0]["msg"]
def test_process_review_action_review_not_found(
mocker: pytest_mock.MockFixture,
test_user_id: str,
) -> None:
"""Test error when review is not found"""
# Mock the functions that extract graph execution ID from the request
mock_get_reviews_for_execution = mocker.patch(
"backend.server.v2.executions.review.routes.get_pending_reviews_for_execution"
)
mock_get_reviews_for_execution.return_value = [] # No reviews found
# Mock process_all_reviews to simulate not finding reviews
mock_process_all_reviews = mocker.patch(
"backend.server.v2.executions.review.routes.process_all_reviews_for_execution"
)
# This should raise a ValueError with "Reviews not found" message based on the data/human_review.py logic
mock_process_all_reviews.side_effect = ValueError(
"Reviews not found or access denied for IDs: nonexistent_node"
)
request_data = {
"reviews": [
{
"node_exec_id": "nonexistent_node",
"approved": True,
"message": "Test",
}
]
}
response = client.post("/api/review/action", json=request_data)
assert response.status_code == 400
assert "Reviews not found" in response.json()["detail"]
def test_process_review_action_partial_failure(
mocker: pytest_mock.MockFixture,
sample_pending_review: PendingHumanReviewModel,
test_user_id: str,
) -> None:
"""Test handling of partial failures in review processing"""
# Mock the route functions
mock_get_reviews_for_execution = mocker.patch(
"backend.server.v2.executions.review.routes.get_pending_reviews_for_execution"
)
mock_get_reviews_for_execution.return_value = [sample_pending_review]
# Mock partial failure in processing
mock_process_all_reviews = mocker.patch(
"backend.server.v2.executions.review.routes.process_all_reviews_for_execution"
)
mock_process_all_reviews.side_effect = ValueError("Some reviews failed validation")
request_data = {
"reviews": [
{
"node_exec_id": "test_node_123",
"approved": True,
"message": "Test",
}
]
}
response = client.post("/api/review/action", json=request_data)
assert response.status_code == 400
assert "Some reviews failed validation" in response.json()["detail"]
def test_process_review_action_invalid_node_exec_id(
mocker: pytest_mock.MockFixture,
sample_pending_review: PendingHumanReviewModel,
test_user_id: str,
) -> None:
"""Test failure when trying to process review with invalid node execution ID"""
# Mock the route functions
mock_get_reviews_for_execution = mocker.patch(
"backend.server.v2.executions.review.routes.get_pending_reviews_for_execution"
)
mock_get_reviews_for_execution.return_value = [sample_pending_review]
# Mock validation failure - this should return 400, not 500
mock_process_all_reviews = mocker.patch(
"backend.server.v2.executions.review.routes.process_all_reviews_for_execution"
)
mock_process_all_reviews.side_effect = ValueError(
"Invalid node execution ID format"
)
request_data = {
"reviews": [
{
"node_exec_id": "invalid-node-format",
"approved": True,
"message": "Test",
}
]
}
response = client.post("/api/review/action", json=request_data)
# Should be a 400 Bad Request, not 500 Internal Server Error
assert response.status_code == 400
assert "Invalid node execution ID format" in response.json()["detail"]

View File

@@ -1,194 +0,0 @@
import logging
from typing import List
import autogpt_libs.auth as autogpt_auth_lib
from fastapi import APIRouter, HTTPException, Query, Security, status
from prisma.enums import ReviewStatus
from backend.data.execution import get_graph_execution_meta
from backend.data.human_review import (
get_pending_reviews_for_execution,
get_pending_reviews_for_user,
has_pending_reviews_for_graph_exec,
process_all_reviews_for_execution,
)
from backend.executor.utils import add_graph_execution
from backend.server.v2.executions.review.model import (
PendingHumanReviewModel,
ReviewRequest,
ReviewResponse,
)
logger = logging.getLogger(__name__)
router = APIRouter(
tags=["executions", "review", "private"],
dependencies=[Security(autogpt_auth_lib.requires_user)],
)
@router.get(
"/pending",
summary="Get Pending Reviews",
response_model=List[PendingHumanReviewModel],
responses={
200: {"description": "List of pending reviews"},
500: {"description": "Server error", "content": {"application/json": {}}},
},
)
async def list_pending_reviews(
user_id: str = Security(autogpt_auth_lib.get_user_id),
page: int = Query(1, ge=1, description="Page number (1-indexed)"),
page_size: int = Query(25, ge=1, le=100, description="Number of reviews per page"),
) -> List[PendingHumanReviewModel]:
"""Get all pending reviews for the current user.
Retrieves all reviews with status "WAITING" that belong to the authenticated user.
Results are ordered by creation time (newest first).
Args:
user_id: Authenticated user ID from security dependency
Returns:
List of pending review objects with status converted to typed literals
Raises:
HTTPException: If authentication fails or database error occurs
Note:
Reviews with invalid status values are logged as warnings but excluded
from results rather than failing the entire request.
"""
return await get_pending_reviews_for_user(user_id, page, page_size)
@router.get(
"/execution/{graph_exec_id}",
summary="Get Pending Reviews for Execution",
response_model=List[PendingHumanReviewModel],
responses={
200: {"description": "List of pending reviews for the execution"},
400: {"description": "Invalid graph execution ID"},
403: {"description": "Access denied to graph execution"},
500: {"description": "Server error", "content": {"application/json": {}}},
},
)
async def list_pending_reviews_for_execution(
graph_exec_id: str,
user_id: str = Security(autogpt_auth_lib.get_user_id),
) -> List[PendingHumanReviewModel]:
"""Get all pending reviews for a specific graph execution.
Retrieves all reviews with status "WAITING" for the specified graph execution
that belong to the authenticated user. Results are ordered by creation time
(oldest first) to preserve review order within the execution.
Args:
graph_exec_id: ID of the graph execution to get reviews for
user_id: Authenticated user ID from security dependency
Returns:
List of pending review objects for the specified execution
Raises:
HTTPException:
- 403: If user doesn't own the graph execution
- 500: If authentication fails or database error occurs
Note:
Only returns reviews owned by the authenticated user for security.
Reviews with invalid status are excluded with warning logs.
"""
# Verify user owns the graph execution before returning reviews
graph_exec = await get_graph_execution_meta(
user_id=user_id, execution_id=graph_exec_id
)
if not graph_exec:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Access denied to graph execution",
)
return await get_pending_reviews_for_execution(graph_exec_id, user_id)
@router.post("/action", response_model=ReviewResponse)
async def process_review_action(
request: ReviewRequest,
user_id: str = Security(autogpt_auth_lib.get_user_id),
) -> ReviewResponse:
"""Process reviews with approve or reject actions."""
# Collect all node exec IDs from the request
all_request_node_ids = {review.node_exec_id for review in request.reviews}
if not all_request_node_ids:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="At least one review must be provided",
)
# Build review decisions map
review_decisions = {}
for review in request.reviews:
if review.approved:
review_decisions[review.node_exec_id] = (
ReviewStatus.APPROVED,
review.reviewed_data,
review.message,
)
else:
review_decisions[review.node_exec_id] = (
ReviewStatus.REJECTED,
None,
review.message,
)
# Process all reviews
updated_reviews = await process_all_reviews_for_execution(
user_id=user_id,
review_decisions=review_decisions,
)
# Count results
approved_count = sum(
1
for review in updated_reviews.values()
if review.status == ReviewStatus.APPROVED
)
rejected_count = sum(
1
for review in updated_reviews.values()
if review.status == ReviewStatus.REJECTED
)
# Resume execution if we processed some reviews
if updated_reviews:
# Get graph execution ID from any processed review
first_review = next(iter(updated_reviews.values()))
graph_exec_id = first_review.graph_exec_id
# Check if any pending reviews remain for this execution
still_has_pending = await has_pending_reviews_for_graph_exec(graph_exec_id)
if not still_has_pending:
# Resume execution
try:
await add_graph_execution(
graph_id=first_review.graph_id,
user_id=user_id,
graph_exec_id=graph_exec_id,
)
logger.info(f"Resumed execution {graph_exec_id}")
except Exception as e:
logger.error(f"Failed to resume execution {graph_exec_id}: {str(e)}")
return ReviewResponse(
approved_count=approved_count,
rejected_count=rejected_count,
failed_count=0,
error=None,
)

View File

@@ -17,7 +17,6 @@ import backend.server.v2.store.media as store_media
from backend.data.block import BlockInput
from backend.data.db import transaction
from backend.data.execution import get_graph_execution
from backend.data.graph import GraphSettings
from backend.data.includes import AGENT_PRESET_INCLUDE, library_agent_include
from backend.data.model import CredentialsMetaInput
from backend.integrations.creds_manager import IntegrationCredentialsManager
@@ -263,30 +262,6 @@ async def get_library_agent(id: str, user_id: str) -> library_model.LibraryAgent
if not library_agent:
raise NotFoundError(f"Library agent #{id} not found")
# Fetch marketplace listing if the agent has been published
store_listing = None
profile = None
if library_agent.AgentGraph:
store_listing = await prisma.models.StoreListing.prisma().find_first(
where={
"agentGraphId": library_agent.AgentGraph.id,
"isDeleted": False,
"hasApprovedVersion": True,
},
include={
"ActiveVersion": True,
},
)
if (
store_listing
and store_listing.ActiveVersion
and store_listing.owningUserId
):
# Fetch Profile separately since User doesn't have a direct Profile relation
profile = await prisma.models.Profile.prisma().find_first(
where={"userId": store_listing.owningUserId}
)
return library_model.LibraryAgent.from_db(
library_agent,
sub_graphs=(
@@ -294,8 +269,6 @@ async def get_library_agent(id: str, user_id: str) -> library_model.LibraryAgent
if library_agent.AgentGraph
else None
),
store_listing=store_listing,
profile=profile,
)
except prisma.errors.PrismaError as e:
@@ -401,24 +374,6 @@ async def add_generated_agent_image(
)
def _initialize_graph_settings(graph: graph_db.GraphModel) -> GraphSettings:
"""
Initialize GraphSettings based on graph content.
Args:
graph: The graph to analyze
Returns:
GraphSettings with appropriate human_in_the_loop_safe_mode value
"""
if graph.has_human_in_the_loop:
# Graph has HITL blocks - set safe mode to True by default
return GraphSettings(human_in_the_loop_safe_mode=True)
else:
# Graph has no HITL blocks - keep None
return GraphSettings(human_in_the_loop_safe_mode=None)
async def create_library_agent(
graph: graph_db.GraphModel,
user_id: str,
@@ -441,7 +396,8 @@ async def create_library_agent(
DatabaseError: If there's an error during creation or if image generation fails.
"""
logger.info(
f"Creating library agent for graph #{graph.id} v{graph.version}; user:<redacted>"
f"Creating library agent for graph #{graph.id} v{graph.version}; "
f"user #{user_id}"
)
graph_entries = (
[graph, *graph.sub_graphs] if create_library_agents_for_sub_graphs else [graph]
@@ -464,9 +420,6 @@ async def create_library_agent(
}
}
},
settings=SafeJson(
_initialize_graph_settings(graph_entry).model_dump()
),
),
include=library_agent_include(
user_id, include_nodes=False, include_executions=False
@@ -487,7 +440,7 @@ async def update_agent_version_in_library(
user_id: str,
agent_graph_id: str,
agent_graph_version: int,
) -> library_model.LibraryAgent:
) -> None:
"""
Updates the agent version in the library if useGraphIsActiveVersion is True.
@@ -511,7 +464,7 @@ async def update_agent_version_in_library(
"useGraphIsActiveVersion": True,
},
)
lib = await prisma.models.LibraryAgent.prisma().update(
await prisma.models.LibraryAgent.prisma().update(
where={"id": library_agent.id},
data={
"AgentGraph": {
@@ -523,12 +476,7 @@ async def update_agent_version_in_library(
},
},
},
include={"AgentGraph": True},
)
if lib is None:
raise NotFoundError(f"Library agent {library_agent.id} not found")
return library_model.LibraryAgent.from_db(lib)
except prisma.errors.PrismaError as e:
logger.error(f"Database error updating agent version in library: {e}")
raise DatabaseError("Failed to update agent version in library") from e
@@ -541,7 +489,6 @@ async def update_library_agent(
is_favorite: Optional[bool] = None,
is_archived: Optional[bool] = None,
is_deleted: Optional[Literal[False]] = None,
settings: Optional[GraphSettings] = None,
) -> library_model.LibraryAgent:
"""
Updates the specified LibraryAgent record.
@@ -552,7 +499,6 @@ async def update_library_agent(
auto_update_version: Whether the agent should auto-update to active version.
is_favorite: Whether this agent is marked as a favorite.
is_archived: Whether this agent is archived.
settings: User-specific settings for this library agent.
Returns:
The updated LibraryAgent.
@@ -564,7 +510,7 @@ async def update_library_agent(
logger.debug(
f"Updating library agent {library_agent_id} for user {user_id} with "
f"auto_update_version={auto_update_version}, is_favorite={is_favorite}, "
f"is_archived={is_archived}, settings={settings}"
f"is_archived={is_archived}"
)
update_fields: prisma.types.LibraryAgentUpdateManyMutationInput = {}
if auto_update_version is not None:
@@ -579,8 +525,6 @@ async def update_library_agent(
"Use delete_library_agent() to (soft-)delete library agents"
)
update_fields["isDeleted"] = is_deleted
if settings is not None:
update_fields["settings"] = SafeJson(settings.model_dump())
if not update_fields:
raise ValueError("No values were passed to update")
@@ -601,33 +545,6 @@ async def update_library_agent(
raise DatabaseError("Failed to update library agent") from e
async def update_library_agent_settings(
user_id: str,
agent_id: str,
settings: GraphSettings,
) -> library_model.LibraryAgent:
"""
Updates the settings for a specific LibraryAgent.
Args:
user_id: The owner of the LibraryAgent.
agent_id: The ID of the LibraryAgent to update.
settings: New GraphSettings to apply.
Returns:
The updated LibraryAgent.
Raises:
NotFoundError: If the specified LibraryAgent does not exist.
DatabaseError: If there's an error in the update operation.
"""
return await update_library_agent(
library_agent_id=agent_id,
user_id=user_id,
settings=settings,
)
async def delete_library_agent(
library_agent_id: str, user_id: str, soft_delete: bool = True
) -> None:
@@ -764,18 +681,6 @@ async def add_store_agent_to_library(
graph = store_listing_version.AgentGraph
# Convert to GraphModel to check for HITL blocks
graph_model = await graph_db.get_graph(
graph_id=graph.id,
version=graph.version,
user_id=user_id,
include_subgraphs=False,
)
if not graph_model:
raise store_exceptions.AgentNotFoundError(
f"Graph #{graph.id} v{graph.version} not found or accessible"
)
# Check if user already has this agent
existing_library_agent = await prisma.models.LibraryAgent.prisma().find_unique(
where={
@@ -810,9 +715,6 @@ async def add_store_agent_to_library(
}
},
"isCreatedByUser": False,
"settings": SafeJson(
_initialize_graph_settings(graph_model).model_dump()
),
},
include=library_agent_include(
user_id, include_nodes=False, include_executions=False

View File

@@ -32,7 +32,6 @@ async def test_get_library_agents(mocker):
id="ua1",
userId="test-user",
agentGraphId="agent2",
settings="{}", # type: ignore
agentGraphVersion=1,
isCreatedByUser=False,
isDeleted=False,
@@ -124,7 +123,6 @@ async def test_add_agent_to_library(mocker):
id="ua1",
userId="test-user",
agentGraphId=mock_store_listing_data.agentGraphId,
settings="{}", # type: ignore
agentGraphVersion=1,
isCreatedByUser=False,
isDeleted=False,
@@ -150,14 +148,6 @@ async def test_add_agent_to_library(mocker):
return_value=mock_library_agent_data
)
# Mock graph_db.get_graph function that's called to check for HITL blocks
mock_graph_db = mocker.patch("backend.server.v2.library.db.graph_db")
mock_graph_model = mocker.Mock()
mock_graph_model.nodes = (
[]
) # Empty list so _has_human_in_the_loop_blocks returns False
mock_graph_db.get_graph = mocker.AsyncMock(return_value=mock_graph_model)
# Mock the model conversion
mock_from_db = mocker.patch("backend.server.v2.library.model.LibraryAgent.from_db")
mock_from_db.return_value = mocker.Mock()
@@ -179,29 +169,17 @@ async def test_add_agent_to_library(mocker):
},
include={"AgentGraph": True},
)
# Check that create was called with the expected data including settings
create_call_args = mock_library_agent.return_value.create.call_args
assert create_call_args is not None
# Verify the main structure
expected_data = {
"User": {"connect": {"id": "test-user"}},
"AgentGraph": {"connect": {"graphVersionId": {"id": "agent1", "version": 1}}},
"isCreatedByUser": False,
}
actual_data = create_call_args[1]["data"]
# Check that all expected fields are present
for key, value in expected_data.items():
assert actual_data[key] == value
# Check that settings field is present and is a SafeJson object
assert "settings" in actual_data
assert hasattr(actual_data["settings"], "__class__") # Should be a SafeJson object
# Check include parameter
assert create_call_args[1]["include"] == library_agent_include(
"test-user", include_nodes=False, include_executions=False
mock_library_agent.return_value.create.assert_called_once_with(
data={
"User": {"connect": {"id": "test-user"}},
"AgentGraph": {
"connect": {"graphVersionId": {"id": "agent1", "version": 1}}
},
"isCreatedByUser": False,
},
include=library_agent_include(
"test-user", include_nodes=False, include_executions=False
),
)

View File

@@ -6,8 +6,8 @@ import prisma.enums
import prisma.models
import pydantic
from backend.data.block import BlockInput
from backend.data.graph import GraphModel, GraphSettings, GraphTriggerInfo
import backend.data.block as block_model
import backend.data.graph as graph_model
from backend.data.model import CredentialsMetaInput, is_credentials_field_name
from backend.util.models import Pagination
@@ -22,23 +22,6 @@ class LibraryAgentStatus(str, Enum):
ERROR = "ERROR" # Agent is in an error state
class MarketplaceListingCreator(pydantic.BaseModel):
"""Creator information for a marketplace listing."""
name: str
id: str
slug: str
class MarketplaceListing(pydantic.BaseModel):
"""Marketplace listing information for a library agent."""
id: str
name: str
slug: str
creator: MarketplaceListingCreator
class LibraryAgent(pydantic.BaseModel):
"""
Represents an agent in the library, including metadata for display and
@@ -56,7 +39,6 @@ class LibraryAgent(pydantic.BaseModel):
status: LibraryAgentStatus
created_at: datetime.datetime
updated_at: datetime.datetime
name: str
@@ -72,7 +54,7 @@ class LibraryAgent(pydantic.BaseModel):
has_external_trigger: bool = pydantic.Field(
description="Whether the agent has an external trigger (e.g. webhook) node"
)
trigger_setup_info: Optional[GraphTriggerInfo] = None
trigger_setup_info: Optional[graph_model.GraphTriggerInfo] = None
# Indicates whether there's a new output (based on recent runs)
new_output: bool
@@ -89,18 +71,10 @@ class LibraryAgent(pydantic.BaseModel):
# Recommended schedule cron (from marketplace agents)
recommended_schedule_cron: str | None = None
# User-specific settings for this library agent
settings: GraphSettings = pydantic.Field(default_factory=GraphSettings)
# Marketplace listing information if the agent has been published
marketplace_listing: Optional["MarketplaceListing"] = None
@staticmethod
def from_db(
agent: prisma.models.LibraryAgent,
sub_graphs: Optional[list[prisma.models.AgentGraph]] = None,
store_listing: Optional[prisma.models.StoreListing] = None,
profile: Optional[prisma.models.Profile] = None,
) -> "LibraryAgent":
"""
Factory method that constructs a LibraryAgent from a Prisma LibraryAgent
@@ -109,9 +83,7 @@ class LibraryAgent(pydantic.BaseModel):
if not agent.AgentGraph:
raise ValueError("Associated Agent record is required.")
graph = GraphModel.from_db(agent.AgentGraph, sub_graphs=sub_graphs)
created_at = agent.createdAt
graph = graph_model.GraphModel.from_db(agent.AgentGraph, sub_graphs=sub_graphs)
agent_updated_at = agent.AgentGraph.updatedAt
lib_agent_updated_at = agent.updatedAt
@@ -144,21 +116,6 @@ class LibraryAgent(pydantic.BaseModel):
# Hard-coded to True until a method to check is implemented
is_latest_version = True
# Build marketplace_listing if available
marketplace_listing_data = None
if store_listing and store_listing.ActiveVersion and profile:
creator_data = MarketplaceListingCreator(
name=profile.name,
id=profile.id,
slug=profile.username,
)
marketplace_listing_data = MarketplaceListing(
id=store_listing.id,
name=store_listing.ActiveVersion.name,
slug=store_listing.slug,
creator=creator_data,
)
return LibraryAgent(
id=agent.id,
graph_id=agent.agentGraphId,
@@ -167,7 +124,6 @@ class LibraryAgent(pydantic.BaseModel):
creator_name=creator_name,
creator_image_url=creator_image_url,
status=status,
created_at=created_at,
updated_at=updated_at,
name=graph.name,
description=graph.description,
@@ -184,8 +140,6 @@ class LibraryAgent(pydantic.BaseModel):
is_latest_version=is_latest_version,
is_favorite=agent.isFavorite,
recommended_schedule_cron=agent.AgentGraph.recommendedScheduleCron,
settings=GraphSettings.model_validate(agent.settings),
marketplace_listing=marketplace_listing_data,
)
@@ -253,7 +207,7 @@ class LibraryAgentPresetCreatable(pydantic.BaseModel):
graph_id: str
graph_version: int
inputs: BlockInput
inputs: block_model.BlockInput
credentials: dict[str, CredentialsMetaInput]
name: str
@@ -282,7 +236,7 @@ class LibraryAgentPresetUpdatable(pydantic.BaseModel):
Request model used when updating a preset for a library agent.
"""
inputs: Optional[BlockInput] = None
inputs: Optional[block_model.BlockInput] = None
credentials: Optional[dict[str, CredentialsMetaInput]] = None
name: Optional[str] = None
@@ -325,7 +279,7 @@ class LibraryAgentPreset(LibraryAgentPresetCreatable):
"Webhook must be included in AgentPreset query when webhookId is set"
)
input_data: BlockInput = {}
input_data: block_model.BlockInput = {}
input_credentials: dict[str, CredentialsMetaInput] = {}
for preset_input in preset.InputPresets:
@@ -391,6 +345,3 @@ class LibraryAgentUpdateRequest(pydantic.BaseModel):
is_archived: Optional[bool] = pydantic.Field(
default=None, description="Archive the agent"
)
settings: Optional[GraphSettings] = pydantic.Field(
default=None, description="User-specific settings for this library agent"
)

View File

@@ -22,9 +22,7 @@ router = APIRouter(
@router.get(
"",
summary="List Library Agents",
response_model=library_model.LibraryAgentResponse,
responses={
200: {"description": "List of library agents"},
500: {"description": "Server error", "content": {"application/json": {}}},
},
)
@@ -157,12 +155,7 @@ async def get_library_agent_by_graph_id(
@router.get(
"/marketplace/{store_listing_version_id}",
summary="Get Agent By Store ID",
tags=["store", "library"],
response_model=library_model.LibraryAgent | None,
responses={
200: {"description": "Library agent found"},
404: {"description": "Agent not found"},
},
tags=["store, library"],
)
async def get_library_agent_by_store_listing_version_id(
store_listing_version_id: str,
@@ -276,7 +269,6 @@ async def update_library_agent(
auto_update_version=payload.auto_update_version,
is_favorite=payload.is_favorite,
is_archived=payload.is_archived,
settings=payload.settings,
)
except NotFoundError as e:
raise HTTPException(

View File

@@ -55,7 +55,6 @@ async def test_get_library_agents_success(
can_access_graph=True,
is_latest_version=True,
is_favorite=False,
created_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
),
library_model.LibraryAgent(
@@ -77,7 +76,6 @@ async def test_get_library_agents_success(
can_access_graph=False,
is_latest_version=True,
is_favorite=False,
created_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
),
],
@@ -151,7 +149,6 @@ async def test_get_favorite_library_agents_success(
can_access_graph=True,
is_latest_version=True,
is_favorite=True,
created_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
),
],
@@ -217,7 +214,6 @@ def test_add_agent_to_library_success(
can_access_graph=True,
is_latest_version=True,
is_favorite=False,
created_at=FIXED_NOW,
updated_at=FIXED_NOW,
)

View File

@@ -1,168 +0,0 @@
"""
Script to backfill embeddings for existing store listing versions.
This script should be run after the migration to add the embedding column
to populate embeddings for all existing store listing versions.
Usage:
poetry run python -m backend.server.v2.store.backfill_embeddings
poetry run python -m backend.server.v2.store.backfill_embeddings --dry-run
poetry run python -m backend.server.v2.store.backfill_embeddings --batch-size 25
"""
import argparse
import asyncio
import logging
import sys
from backend.data.db import connect, disconnect, query_raw_with_schema
from backend.integrations.embeddings import EmbeddingService, create_search_text
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
)
logger = logging.getLogger(__name__)
# Default batch size for processing
DEFAULT_BATCH_SIZE = 50
# Delay between batches to avoid rate limits (seconds)
BATCH_DELAY_SECONDS = 1.0
async def backfill_embeddings(
dry_run: bool = False,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> tuple[int, int]:
"""
Backfill embeddings for all store listing versions without embeddings.
Args:
dry_run: If True, don't make any changes, just report what would be done.
batch_size: Number of versions to process in each batch.
Returns:
Tuple of (processed_count, error_count)
"""
await connect()
try:
embedding_service = EmbeddingService()
# Get all versions without embeddings
versions = await query_raw_with_schema(
"""
SELECT id, name, "subHeading", description
FROM {schema_prefix}"StoreListingVersion"
WHERE embedding IS NULL
ORDER BY "createdAt" DESC
"""
)
total = len(versions)
logger.info(f"Found {total} versions without embeddings")
if dry_run:
logger.info("Dry run mode - no changes will be made")
return (0, 0)
if total == 0:
logger.info("No versions need embeddings")
return (0, 0)
processed = 0
errors = 0
for i in range(0, total, batch_size):
batch = versions[i : i + batch_size]
batch_num = (i // batch_size) + 1
total_batches = (total + batch_size - 1) // batch_size
logger.info(f"Processing batch {batch_num}/{total_batches}")
for version in batch:
version_id = version["id"]
try:
search_text = create_search_text(
version["name"] or "",
version["subHeading"] or "",
version["description"] or "",
)
if not search_text:
logger.warning(f"Skipping {version_id} - no searchable text")
continue
embedding = await embedding_service.generate_embedding(search_text)
embedding_str = "[" + ",".join(map(str, embedding)) + "]"
await query_raw_with_schema(
"""
UPDATE {schema_prefix}"StoreListingVersion"
SET embedding = $1::vector
WHERE id = $2
""",
embedding_str,
version_id,
)
processed += 1
except Exception as e:
logger.error(f"Error processing {version_id}: {e}")
errors += 1
logger.info(f"Progress: {processed}/{total} processed, {errors} errors")
# Rate limit: wait between batches to avoid hitting API limits
if i + batch_size < total:
await asyncio.sleep(BATCH_DELAY_SECONDS)
logger.info(f"Backfill complete: {processed} processed, {errors} errors")
return (processed, errors)
finally:
await disconnect()
def main():
parser = argparse.ArgumentParser(
description="Backfill embeddings for store listing versions"
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Don't make any changes, just report what would be done",
)
parser.add_argument(
"--batch-size",
type=int,
default=DEFAULT_BATCH_SIZE,
help=f"Number of versions to process in each batch (default: {DEFAULT_BATCH_SIZE})",
)
args = parser.parse_args()
try:
processed, errors = asyncio.run(
backfill_embeddings(dry_run=args.dry_run, batch_size=args.batch_size)
)
if errors > 0:
logger.warning(f"Completed with {errors} errors")
sys.exit(1)
else:
logger.info("Completed successfully")
sys.exit(0)
except KeyboardInterrupt:
logger.info("Interrupted by user")
sys.exit(130)
except Exception as e:
logger.error(f"Fatal error: {e}")
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -12,7 +12,7 @@ import prisma.types
import backend.server.v2.store.exceptions
import backend.server.v2.store.model
from backend.data.db import query_raw_with_schema, transaction
from backend.data.db import transaction
from backend.data.graph import (
GraphMeta,
GraphModel,
@@ -26,7 +26,6 @@ from backend.data.notifications import (
AgentRejectionData,
NotificationEventModel,
)
from backend.integrations.embeddings import create_search_text, get_embedding_service
from backend.notifications.notifications import queue_notification_async
from backend.util.exceptions import DatabaseError
from backend.util.settings import Settings
@@ -57,40 +56,31 @@ async def get_store_agents(
)
try:
# If search_query is provided, use vector similarity search
# If search_query is provided, use full-text search
if search_query:
offset = (page - 1) * page_size
# Generate embedding for search query
embedding_service = await get_embedding_service()
query_embedding = await embedding_service.generate_embedding(search_query)
# Convert embedding to PostgreSQL array format
embedding_str = "[" + ",".join(map(str, query_embedding)) + "]"
# Whitelist allowed order_by columns
# For vector search, we use similarity instead of rank
ALLOWED_ORDER_BY = {
"rating": "rating DESC, similarity DESC",
"runs": "runs DESC, similarity DESC",
"name": "agent_name ASC, similarity DESC",
"updated_at": "updated_at DESC, similarity DESC",
"rating": "rating DESC, rank DESC",
"runs": "runs DESC, rank DESC",
"name": "agent_name ASC, rank ASC",
"updated_at": "updated_at DESC, rank DESC",
}
# Validate and get order clause
if sorted_by and sorted_by in ALLOWED_ORDER_BY:
order_by_clause = ALLOWED_ORDER_BY[sorted_by]
else:
# Default: order by vector similarity (most similar first)
order_by_clause = "similarity DESC, updated_at DESC"
order_by_clause = "updated_at DESC, rank DESC"
# Build WHERE conditions and parameters list
where_parts: list[str] = []
params: list[typing.Any] = [embedding_str] # $1 - query embedding
params: list[typing.Any] = [search_query] # $1 - search term
param_index = 2 # Start at $2 for next parameter
# Always filter for available agents and agents with embeddings
# Always filter for available agents
where_parts.append("is_available = true")
where_parts.append("embedding IS NOT NULL")
if featured:
where_parts.append("featured = true")
@@ -113,9 +103,7 @@ async def get_store_agents(
limit_param = f"${param_index}"
offset_param = f"${param_index + 1}"
# Vector similarity search query using cosine distance
# The <=> operator returns cosine distance (0 = identical, 2 = opposite)
# We convert to similarity: 1 - distance/2 gives range [0, 1]
# Execute full-text search query with parameterized values
sql_query = f"""
SELECT
slug,
@@ -131,26 +119,34 @@ async def get_store_agents(
featured,
is_available,
updated_at,
1 - (embedding <=> $1::vector) AS similarity
FROM {{schema_prefix}}"StoreAgent"
ts_rank_cd(search, query) AS rank
FROM "StoreAgent",
plainto_tsquery('english', $1) AS query
WHERE {sql_where_clause}
AND search @@ query
ORDER BY {order_by_clause}
LIMIT {limit_param} OFFSET {offset_param}
"""
# Count query for pagination
# Count query for pagination - only uses search term parameter
count_query = f"""
SELECT COUNT(*) as count
FROM {{schema_prefix}}"StoreAgent"
FROM "StoreAgent",
plainto_tsquery('english', $1) AS query
WHERE {sql_where_clause}
AND search @@ query
"""
# Execute both queries with parameters
agents = await query_raw_with_schema(sql_query, *params)
agents = await prisma.client.get_client().query_raw(
typing.cast(typing.LiteralString, sql_query), *params
)
# For count, use params without pagination (last 2 params)
count_params = params[:-2]
count_result = await query_raw_with_schema(count_query, *count_params)
count_result = await prisma.client.get_client().query_raw(
typing.cast(typing.LiteralString, count_query), *count_params
)
total = count_result[0]["count"] if count_result else 0
total_pages = (total + page_size - 1) // page_size
@@ -263,56 +259,6 @@ async def log_search_term(search_query: str):
logger.error(f"Error logging search term: {e}")
async def _generate_and_store_embedding(
store_listing_version_id: str,
name: str,
sub_heading: str,
description: str,
) -> None:
"""
Generate and store embedding for a store listing version.
This creates a vector embedding from the agent's name, sub_heading, and
description, which is used for semantic search.
Args:
store_listing_version_id: The ID of the store listing version.
name: The agent name.
sub_heading: The agent sub-heading/tagline.
description: The agent description.
"""
try:
embedding_service = await get_embedding_service()
search_text = create_search_text(name, sub_heading, description)
if not search_text:
logger.warning(
f"No searchable text for version {store_listing_version_id}, "
"skipping embedding generation"
)
return
embedding = await embedding_service.generate_embedding(search_text)
embedding_str = "[" + ",".join(map(str, embedding)) + "]"
await query_raw_with_schema(
"""
UPDATE {schema_prefix}"StoreListingVersion"
SET embedding = $1::vector
WHERE id = $2
""",
embedding_str,
store_listing_version_id,
)
logger.debug(f"Generated embedding for version {store_listing_version_id}")
except Exception as e:
# Log error but don't fail the whole operation
# Embeddings can be generated later via backfill
logger.error(
f"Failed to generate embedding for {store_listing_version_id}: {e}"
)
async def get_store_agent_details(
username: str, agent_name: str
) -> backend.server.v2.store.model.StoreAgentDetails:
@@ -859,12 +805,6 @@ async def create_store_submission(
else None
)
# Generate embedding for semantic search
if store_listing_version_id:
await _generate_and_store_embedding(
store_listing_version_id, name, sub_heading, description
)
logger.debug(f"Created store listing for agent {agent_id}")
# Return submission details
return backend.server.v2.store.model.StoreSubmission(
@@ -1027,12 +967,6 @@ async def edit_store_submission(
if not updated_version:
raise DatabaseError("Failed to update store listing version")
# Regenerate embedding with updated content
await _generate_and_store_embedding(
store_listing_version_id, name, sub_heading, description
)
return backend.server.v2.store.model.StoreSubmission(
agent_id=current_version.agentGraphId,
agent_version=current_version.agentGraphVersion,
@@ -1163,12 +1097,6 @@ async def create_store_version(
logger.debug(
f"Created new version for listing {store_listing_id} of agent {agent_id}"
)
# Generate embedding for semantic search
await _generate_and_store_embedding(
new_version.id, name, sub_heading, description
)
# Return submission details
return backend.server.v2.store.model.StoreSubmission(
agent_id=agent_id,

View File

@@ -405,237 +405,3 @@ async def test_get_store_agents_search_category_array_injection():
# Verify the query executed without error
# Category should be parameterized, preventing SQL injection
assert isinstance(result.agents, list)
# Vector search tests
@pytest.mark.asyncio(loop_scope="session")
async def test_get_store_agents_vector_search_mocked(mocker):
"""Test vector search uses embedding service and executes query safely."""
from backend.integrations.embeddings import EMBEDDING_DIMENSIONS
# Mock embedding service
mock_embedding = [0.1] * EMBEDDING_DIMENSIONS
mock_embedding_service = mocker.MagicMock()
mock_embedding_service.generate_embedding = mocker.AsyncMock(
return_value=mock_embedding
)
mocker.patch(
"backend.server.v2.store.db.get_embedding_service",
mocker.AsyncMock(return_value=mock_embedding_service),
)
# Mock query_raw_with_schema to return empty results
mocker.patch(
"backend.server.v2.store.db.query_raw_with_schema",
mocker.AsyncMock(side_effect=[[], [{"count": 0}]]),
)
# Call function with search query
result = await db.get_store_agents(search_query="test query")
# Verify embedding service was called
mock_embedding_service.generate_embedding.assert_called_once_with("test query")
# Verify results
assert isinstance(result.agents, list)
assert len(result.agents) == 0
@pytest.mark.asyncio(loop_scope="session")
async def test_get_store_agents_vector_search_with_results(mocker):
"""Test vector search returns properly formatted results."""
from backend.integrations.embeddings import EMBEDDING_DIMENSIONS
# Mock embedding service
mock_embedding = [0.1] * EMBEDDING_DIMENSIONS
mock_embedding_service = mocker.MagicMock()
mock_embedding_service.generate_embedding = mocker.AsyncMock(
return_value=mock_embedding
)
mocker.patch(
"backend.server.v2.store.db.get_embedding_service",
mocker.AsyncMock(return_value=mock_embedding_service),
)
# Mock query results
mock_agents = [
{
"slug": "test-agent",
"agent_name": "Test Agent",
"agent_image": ["image.jpg"],
"creator_username": "creator",
"creator_avatar": "avatar.jpg",
"sub_heading": "Test heading",
"description": "Test description",
"runs": 10,
"rating": 4.5,
"categories": ["test"],
"featured": False,
"is_available": True,
"updated_at": datetime.now(),
"similarity": 0.95,
}
]
mock_count = [{"count": 1}]
mocker.patch(
"backend.server.v2.store.db.query_raw_with_schema",
mocker.AsyncMock(side_effect=[mock_agents, mock_count]),
)
# Call function with search query
result = await db.get_store_agents(search_query="test query")
# Verify results
assert len(result.agents) == 1
assert result.agents[0].slug == "test-agent"
assert result.agents[0].agent_name == "Test Agent"
assert result.pagination.total_items == 1
@pytest.mark.asyncio(loop_scope="session")
async def test_get_store_agents_vector_search_with_filters(mocker):
"""Test vector search works correctly with additional filters."""
from backend.integrations.embeddings import EMBEDDING_DIMENSIONS
# Mock embedding service
mock_embedding = [0.1] * EMBEDDING_DIMENSIONS
mock_embedding_service = mocker.MagicMock()
mock_embedding_service.generate_embedding = mocker.AsyncMock(
return_value=mock_embedding
)
mocker.patch(
"backend.server.v2.store.db.get_embedding_service",
mocker.AsyncMock(return_value=mock_embedding_service),
)
# Mock query_raw_with_schema
mock_query = mocker.patch(
"backend.server.v2.store.db.query_raw_with_schema",
mocker.AsyncMock(side_effect=[[], [{"count": 0}]]),
)
# Call function with search query and filters
await db.get_store_agents(
search_query="test query",
featured=True,
creators=["creator1", "creator2"],
category="AI",
sorted_by="rating",
)
# Verify query was called with parameterized values
# First call is the main query, second is count
assert mock_query.call_count == 2
# Check that the SQL query includes proper parameterization
first_call_args = mock_query.call_args_list[0]
sql_query = first_call_args[0][0]
# Verify key elements of the query
assert "embedding <=> $1::vector" in sql_query
assert "featured = true" in sql_query
assert "creator_username = ANY($" in sql_query
assert "= ANY(categories)" in sql_query
@pytest.mark.asyncio(loop_scope="session")
async def test_generate_and_store_embedding_success(mocker):
"""Test that embedding generation and storage works correctly."""
from backend.integrations.embeddings import EMBEDDING_DIMENSIONS
# Mock embedding service
mock_embedding = [0.1] * EMBEDDING_DIMENSIONS
mock_embedding_service = mocker.MagicMock()
mock_embedding_service.generate_embedding = mocker.AsyncMock(
return_value=mock_embedding
)
mocker.patch(
"backend.server.v2.store.db.get_embedding_service",
mocker.AsyncMock(return_value=mock_embedding_service),
)
# Mock query_raw_with_schema
mock_query = mocker.patch(
"backend.server.v2.store.db.query_raw_with_schema",
mocker.AsyncMock(return_value=[]),
)
# Call the internal function
await db._generate_and_store_embedding(
store_listing_version_id="version-123",
name="Test Agent",
sub_heading="A test agent",
description="Does testing",
)
# Verify embedding service was called with combined text
mock_embedding_service.generate_embedding.assert_called_once_with(
"Test Agent A test agent Does testing"
)
# Verify database update was called
mock_query.assert_called_once()
call_args = mock_query.call_args
assert "UPDATE" in call_args[0][0]
assert "embedding = $1::vector" in call_args[0][0]
assert call_args[0][2] == "version-123"
@pytest.mark.asyncio(loop_scope="session")
async def test_generate_and_store_embedding_empty_text(mocker):
"""Test that embedding is not generated for empty text."""
# Mock embedding service
mock_embedding_service = mocker.MagicMock()
mock_embedding_service.generate_embedding = mocker.AsyncMock()
mocker.patch(
"backend.server.v2.store.db.get_embedding_service",
mocker.AsyncMock(return_value=mock_embedding_service),
)
# Mock query_raw_with_schema
mock_query = mocker.patch(
"backend.server.v2.store.db.query_raw_with_schema",
mocker.AsyncMock(return_value=[]),
)
# Call with empty fields
await db._generate_and_store_embedding(
store_listing_version_id="version-123",
name="",
sub_heading="",
description="",
)
# Verify embedding service was NOT called
mock_embedding_service.generate_embedding.assert_not_called()
# Verify database was NOT updated
mock_query.assert_not_called()
@pytest.mark.asyncio(loop_scope="session")
async def test_generate_and_store_embedding_handles_error(mocker):
"""Test that embedding generation errors don't crash the operation."""
# Mock embedding service to raise an error
mock_embedding_service = mocker.MagicMock()
mock_embedding_service.generate_embedding = mocker.AsyncMock(
side_effect=Exception("API error")
)
mocker.patch(
"backend.server.v2.store.db.get_embedding_service",
mocker.AsyncMock(return_value=mock_embedding_service),
)
# Call should not raise - errors are logged but not propagated
await db._generate_and_store_embedding(
store_listing_version_id="version-123",
name="Test Agent",
sub_heading="A test agent",
description="Does testing",
)
# Verify embedding service was called (and failed)
mock_embedding_service.generate_embedding.assert_called_once()

View File

@@ -27,7 +27,6 @@ from backend.util.settings import Settings
P = ParamSpec("P")
R = TypeVar("R")
R_co = TypeVar("R_co", covariant=True)
T = TypeVar("T")
logger = logging.getLogger(__name__)
settings = Settings()
@@ -144,7 +143,7 @@ def cached(
ttl_seconds: int,
shared_cache: bool = False,
refresh_ttl_on_get: bool = False,
) -> Callable[[Callable[P, R]], CachedFunction[P, R]]:
) -> Callable[[Callable], CachedFunction]:
"""
Thundering herd safe cache decorator for both sync and async functions.
@@ -170,7 +169,7 @@ def cached(
return {"result": param}
"""
def decorator(target_func: Callable[P, R]) -> CachedFunction[P, R]:
def decorator(target_func):
cache_storage: dict[tuple, CachedValue] = {}
_event_loop_locks: dict[Any, asyncio.Lock] = {}
@@ -387,7 +386,7 @@ def cached(
setattr(wrapper, "cache_info", cache_info)
setattr(wrapper, "cache_delete", cache_delete)
return cast(CachedFunction[P, R], wrapper)
return cast(CachedFunction, wrapper)
return decorator

View File

@@ -21,20 +21,10 @@ class BlockOutputError(BlockError, ValueError):
class BlockExecutionError(BlockError, ValueError):
"""The block failed to execute at runtime, resulting in a handled error"""
def __init__(self, message: str | None, block_name: str, block_id: str) -> None:
if message is None:
message = "Output error was None"
super().__init__(message, block_name, block_id)
class BlockUnknownError(BlockError):
"""Critical unknown error with block handling"""
def __init__(self, message: str | None, block_name: str, block_id: str) -> None:
if not message:
message = "Unknown error occurred"
super().__init__(message, block_name, block_id)
class MissingConfigError(Exception):
"""The attempted operation requires configuration which is not available"""

View File

@@ -1,125 +0,0 @@
from backend.util.exceptions import (
BlockError,
BlockExecutionError,
BlockInputError,
BlockOutputError,
BlockUnknownError,
)
class TestBlockError:
"""Tests for BlockError and its subclasses."""
def test_block_error_message_format(self):
"""Test that BlockError formats the message correctly."""
error = BlockError(
message="Test error", block_name="TestBlock", block_id="test-123"
)
assert (
str(error)
== "raised by TestBlock with message: Test error. block_id: test-123"
)
def test_block_input_error_inherits_format(self):
"""Test that BlockInputError uses parent's message format."""
error = BlockInputError(
message="Invalid input", block_name="TestBlock", block_id="test-123"
)
assert "raised by TestBlock with message: Invalid input" in str(error)
def test_block_output_error_inherits_format(self):
"""Test that BlockOutputError uses parent's message format."""
error = BlockOutputError(
message="Invalid output", block_name="TestBlock", block_id="test-123"
)
assert "raised by TestBlock with message: Invalid output" in str(error)
class TestBlockExecutionErrorNoneHandling:
"""Tests for BlockExecutionError handling of None messages."""
def test_execution_error_with_none_message(self):
"""Test that None message is replaced with descriptive text."""
error = BlockExecutionError(
message=None, block_name="TestBlock", block_id="test-123"
)
assert "Output error was None" in str(error)
assert "raised by TestBlock with message: Output error was None" in str(error)
def test_execution_error_with_valid_message(self):
"""Test that valid messages are preserved."""
error = BlockExecutionError(
message="Actual error", block_name="TestBlock", block_id="test-123"
)
assert "Actual error" in str(error)
assert "Output error was None" not in str(error)
def test_execution_error_with_empty_string(self):
"""Test that empty string message is NOT replaced (only None is)."""
error = BlockExecutionError(
message="", block_name="TestBlock", block_id="test-123"
)
# Empty string is falsy but not None, so it's preserved
assert "raised by TestBlock with message: . block_id:" in str(error)
class TestBlockUnknownErrorNoneHandling:
"""Tests for BlockUnknownError handling of None/empty messages."""
def test_unknown_error_with_none_message(self):
"""Test that None message is replaced with descriptive text."""
error = BlockUnknownError(
message=None, block_name="TestBlock", block_id="test-123"
)
assert "Unknown error occurred" in str(error)
def test_unknown_error_with_empty_string(self):
"""Test that empty string is replaced with descriptive text."""
error = BlockUnknownError(
message="", block_name="TestBlock", block_id="test-123"
)
assert "Unknown error occurred" in str(error)
def test_unknown_error_with_valid_message(self):
"""Test that valid messages are preserved."""
error = BlockUnknownError(
message="Something went wrong", block_name="TestBlock", block_id="test-123"
)
assert "Something went wrong" in str(error)
assert "Unknown error occurred" not in str(error)
class TestBlockErrorInheritance:
"""Tests for proper exception inheritance."""
def test_block_execution_error_is_value_error(self):
"""Test that BlockExecutionError is a ValueError."""
error = BlockExecutionError(
message="test", block_name="TestBlock", block_id="test-123"
)
assert isinstance(error, ValueError)
assert isinstance(error, BlockError)
def test_block_input_error_is_value_error(self):
"""Test that BlockInputError is a ValueError."""
error = BlockInputError(
message="test", block_name="TestBlock", block_id="test-123"
)
assert isinstance(error, ValueError)
assert isinstance(error, BlockError)
def test_block_output_error_is_value_error(self):
"""Test that BlockOutputError is a ValueError."""
error = BlockOutputError(
message="test", block_name="TestBlock", block_id="test-123"
)
assert isinstance(error, ValueError)
assert isinstance(error, BlockError)
def test_block_unknown_error_is_not_value_error(self):
"""Test that BlockUnknownError is NOT a ValueError."""
error = BlockUnknownError(
message="test", block_name="TestBlock", block_id="test-123"
)
assert not isinstance(error, ValueError)
assert isinstance(error, BlockError)

View File

@@ -11,13 +11,7 @@ from urllib.parse import quote, urljoin, urlparse
import aiohttp
import idna
from aiohttp import FormData, abc
from tenacity import (
RetryCallState,
retry,
retry_if_result,
stop_after_attempt,
wait_exponential_jitter,
)
from tenacity import retry, retry_if_result, wait_exponential_jitter
from backend.util.json import loads
@@ -291,20 +285,6 @@ class Response:
return 200 <= self.status < 300
def _return_last_result(retry_state: RetryCallState) -> "Response":
"""
Ensure the final attempt's response is returned when retrying stops.
"""
if retry_state.outcome is None:
raise RuntimeError("Retry state is missing an outcome.")
exception = retry_state.outcome.exception()
if exception is not None:
raise exception
return retry_state.outcome.result()
class Requests:
"""
A wrapper around an aiohttp ClientSession that validates URLs before
@@ -319,7 +299,6 @@ class Requests:
extra_url_validator: Callable[[URL], URL] | None = None,
extra_headers: dict[str, str] | None = None,
retry_max_wait: float = 300.0,
retry_max_attempts: int | None = None,
):
self.trusted_origins = []
for url in trusted_origins or []:
@@ -332,9 +311,6 @@ class Requests:
self.extra_url_validator = extra_url_validator
self.extra_headers = extra_headers
self.retry_max_wait = retry_max_wait
if retry_max_attempts is not None and retry_max_attempts < 1:
raise ValueError("retry_max_attempts must be None or >= 1")
self.retry_max_attempts = retry_max_attempts
async def request(
self,
@@ -349,17 +325,11 @@ class Requests:
max_redirects: int = 10,
**kwargs,
) -> Response:
retry_kwargs: dict[str, Any] = {
"wait": wait_exponential_jitter(max=self.retry_max_wait),
"retry": retry_if_result(lambda r: r.status in THROTTLE_RETRY_STATUS_CODES),
"reraise": True,
}
if self.retry_max_attempts is not None:
retry_kwargs["stop"] = stop_after_attempt(self.retry_max_attempts)
retry_kwargs["retry_error_callback"] = _return_last_result
@retry(**retry_kwargs)
@retry(
wait=wait_exponential_jitter(max=self.retry_max_wait),
retry=retry_if_result(lambda r: r.status in THROTTLE_RETRY_STATUS_CODES),
reraise=True,
)
async def _make_request() -> Response:
return await self._request(
method=method,

View File

@@ -28,7 +28,6 @@ from typing import (
import httpx
import uvicorn
from fastapi import FastAPI, Request, responses
from prisma.errors import DataError
from pydantic import BaseModel, TypeAdapter, create_model
import backend.util.exceptions as exceptions
@@ -194,7 +193,6 @@ EXCEPTION_MAPPING = {
e.__name__: e
for e in [
ValueError,
DataError,
RuntimeError,
TimeoutError,
ConnectionError,
@@ -413,9 +411,6 @@ class AppService(BaseAppService, ABC):
self.fastapi_app.add_exception_handler(
ValueError, self._handle_internal_http_error(400)
)
self.fastapi_app.add_exception_handler(
DataError, self._handle_internal_http_error(400)
)
self.fastapi_app.add_exception_handler(
Exception, self._handle_internal_http_error(500)
)
@@ -477,7 +472,6 @@ def get_service_client(
exclude_exceptions=(
# Don't retry these specific exceptions that won't be fixed by retrying
ValueError, # Invalid input/parameters
DataError, # Prisma data integrity errors (foreign key, unique constraints)
KeyError, # Missing required data
TypeError, # Wrong data types
AttributeError, # Missing attributes

View File

@@ -439,12 +439,6 @@ class Config(UpdateTrackingModel["Config"], BaseSettings):
'"regex:" to match via regular expression.',
)
external_oauth_callback_origins: List[str] = Field(
default=["http://localhost:3000"],
description="Allowed callback URL origins for external OAuth flows. "
"External apps (like Autopilot) must have their callback URLs start with one of these origins.",
)
@field_validator("backend_cors_allow_origins")
@classmethod
def validate_cors_allow_origins(cls, v: List[str]) -> List[str]:
@@ -577,12 +571,6 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
open_router_api_key: str = Field(default="", description="Open Router API Key")
llama_api_key: str = Field(default="", description="Llama API Key")
v0_api_key: str = Field(default="", description="v0 by Vercel API key")
webshare_proxy_username: str = Field(
default="", description="Webshare Proxy Username"
)
webshare_proxy_password: str = Field(
default="", description="Webshare Proxy Password"
)
reddit_client_id: str = Field(default="", description="Reddit client ID")
reddit_client_secret: str = Field(default="", description="Reddit client secret")

View File

@@ -9,9 +9,9 @@ from autogpt_libs.auth import get_user_id
from backend.data import db
from backend.data.block import Block, BlockSchema, initialize_blocks
from backend.data.execution import (
ExecutionContext,
ExecutionStatus,
NodeExecutionResult,
UserContext,
get_graph_execution,
)
from backend.data.model import _BaseCredentials
@@ -140,12 +140,9 @@ async def execute_block_test(block: Block):
"graph_exec_id": str(uuid.uuid4()),
"node_exec_id": str(uuid.uuid4()),
"user_id": str(uuid.uuid4()),
"graph_version": 1, # Default version for tests
"execution_context": ExecutionContext(),
"user_context": UserContext(timezone="UTC"), # Default for tests
}
input_model = cast(type[BlockSchema], block.input_schema)
# Handle regular credentials fields
credentials_input_fields = input_model.get_credentials_fields()
if len(credentials_input_fields) == 1 and isinstance(
block.test_credentials, _BaseCredentials
@@ -160,18 +157,6 @@ async def execute_block_test(block: Block):
if field_name in block.test_credentials:
extra_exec_kwargs[field_name] = block.test_credentials[field_name]
# Handle auto-generated credentials (e.g., from GoogleDriveFileInput)
auto_creds_fields = input_model.get_auto_credentials_fields()
if auto_creds_fields and block.test_credentials:
if isinstance(block.test_credentials, _BaseCredentials):
# Single credentials object - use for all auto_credentials kwargs
for kwarg_name in auto_creds_fields.keys():
extra_exec_kwargs[kwarg_name] = block.test_credentials
elif isinstance(block.test_credentials, dict):
for kwarg_name in auto_creds_fields.keys():
if kwarg_name in block.test_credentials:
extra_exec_kwargs[kwarg_name] = block.test_credentials[kwarg_name]
for input_data in block.test_input:
log.info(f"{prefix} in: {input_data}")

View File

@@ -10,8 +10,6 @@ from zoneinfo import ZoneInfo
from croniter import croniter
from backend.data.model import USER_TIMEZONE_NOT_SET
logger = logging.getLogger(__name__)
@@ -140,7 +138,7 @@ def get_user_timezone_or_utc(user_timezone: Optional[str]) -> str:
Returns:
Valid timezone string (user's preference or UTC fallback)
"""
if not user_timezone or user_timezone == USER_TIMEZONE_NOT_SET:
if not user_timezone or user_timezone == "not-set":
return "UTC"
if validate_timezone(user_timezone):

View File

@@ -1,44 +0,0 @@
-- CreateEnum
CREATE TYPE "ReviewStatus" AS ENUM ('WAITING', 'APPROVED', 'REJECTED');
-- AlterEnum
ALTER TYPE "AgentExecutionStatus" ADD VALUE 'REVIEW';
-- CreateTable
CREATE TABLE "PendingHumanReview" (
"nodeExecId" TEXT NOT NULL,
"userId" TEXT NOT NULL,
"graphExecId" TEXT NOT NULL,
"graphId" TEXT NOT NULL,
"graphVersion" INTEGER NOT NULL,
"payload" JSONB NOT NULL,
"instructions" TEXT,
"editable" BOOLEAN NOT NULL DEFAULT true,
"status" "ReviewStatus" NOT NULL DEFAULT 'WAITING',
"reviewMessage" TEXT,
"wasEdited" BOOLEAN,
"processed" BOOLEAN NOT NULL DEFAULT false,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3),
"reviewedAt" TIMESTAMP(3),
CONSTRAINT "PendingHumanReview_pkey" PRIMARY KEY ("nodeExecId")
);
-- CreateIndex
CREATE INDEX "PendingHumanReview_userId_status_idx" ON "PendingHumanReview"("userId", "status");
-- CreateIndex
CREATE INDEX "PendingHumanReview_graphExecId_status_idx" ON "PendingHumanReview"("graphExecId", "status");
-- CreateIndex
CREATE UNIQUE INDEX "PendingHumanReview_nodeExecId_key" ON "PendingHumanReview"("nodeExecId");
-- AddForeignKey
ALTER TABLE "PendingHumanReview" ADD CONSTRAINT "PendingHumanReview_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "PendingHumanReview" ADD CONSTRAINT "PendingHumanReview_nodeExecId_fkey" FOREIGN KEY ("nodeExecId") REFERENCES "AgentNodeExecution"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "PendingHumanReview" ADD CONSTRAINT "PendingHumanReview_graphExecId_fkey" FOREIGN KEY ("graphExecId") REFERENCES "AgentGraphExecution"("id") ON DELETE CASCADE ON UPDATE CASCADE;

View File

@@ -1,2 +0,0 @@
-- AlterEnum
ALTER TYPE "APIKeyPermission" ADD VALUE 'READ_STORE';

View File

@@ -1,2 +0,0 @@
-- AlterEnum
ALTER TYPE "APIKeyPermission" ADD VALUE 'USE_TOOLS';

View File

@@ -1,4 +0,0 @@
-- AlterEnum
ALTER TYPE "APIKeyPermission" ADD VALUE 'MANAGE_INTEGRATIONS';
ALTER TYPE "APIKeyPermission" ADD VALUE 'READ_INTEGRATIONS';
ALTER TYPE "APIKeyPermission" ADD VALUE 'DELETE_INTEGRATIONS';

View File

@@ -1,2 +0,0 @@
-- AlterTable
ALTER TABLE "LibraryAgent" ADD COLUMN "settings" JSONB NOT NULL DEFAULT '{}';

View File

@@ -1,92 +0,0 @@
-- Migration: Replace full-text search with pgvector-based vector search
-- This migration:
-- 1. Enables the pgvector extension
-- 2. Drops the StoreAgent view (depends on search column)
-- 3. Removes the full-text search infrastructure (trigger, function, tsvector column)
-- 4. Adds a vector embedding column for semantic search
-- 5. Creates an index for fast vector similarity search
-- 6. Recreates the StoreAgent view with the embedding column
-- Enable pgvector extension
CREATE EXTENSION IF NOT EXISTS vector;
-- First drop the view that depends on the search column
DROP VIEW IF EXISTS "StoreAgent";
-- Remove full-text search infrastructure
DROP TRIGGER IF EXISTS "update_tsvector" ON "StoreListingVersion";
DROP FUNCTION IF EXISTS update_tsvector_column();
-- Drop the tsvector search column
ALTER TABLE "StoreListingVersion" DROP COLUMN IF EXISTS "search";
-- Add embedding column for vector search (1536 dimensions for text-embedding-3-small)
ALTER TABLE "StoreListingVersion"
ADD COLUMN IF NOT EXISTS "embedding" vector(1536);
-- Create IVFFlat index for fast similarity search
-- Using cosine distance (vector_cosine_ops) which is standard for text embeddings
-- lists = 100 is appropriate for datasets under 1M rows
CREATE INDEX IF NOT EXISTS idx_store_listing_version_embedding
ON "StoreListingVersion"
USING ivfflat (embedding vector_cosine_ops)
WITH (lists = 100);
-- Recreate StoreAgent view WITHOUT search column, WITH embedding column
CREATE OR REPLACE VIEW "StoreAgent" AS
WITH latest_versions AS (
SELECT
"storeListingId",
MAX(version) AS max_version
FROM "StoreListingVersion"
WHERE "submissionStatus" = 'APPROVED'
GROUP BY "storeListingId"
),
agent_versions AS (
SELECT
"storeListingId",
array_agg(DISTINCT version::text ORDER BY version::text) AS versions
FROM "StoreListingVersion"
WHERE "submissionStatus" = 'APPROVED'
GROUP BY "storeListingId"
)
SELECT
sl.id AS listing_id,
slv.id AS "storeListingVersionId",
slv."createdAt" AS updated_at,
sl.slug,
COALESCE(slv.name, '') AS agent_name,
slv."videoUrl" AS agent_video,
COALESCE(slv."imageUrls", ARRAY[]::text[]) AS agent_image,
slv."isFeatured" AS featured,
p.username AS creator_username,
p."avatarUrl" AS creator_avatar,
slv."subHeading" AS sub_heading,
slv.description,
slv.categories,
slv.embedding,
COALESCE(ar.run_count, 0::bigint) AS runs,
COALESCE(rs.avg_rating, 0.0)::double precision AS rating,
COALESCE(av.versions, ARRAY[slv.version::text]) AS versions,
COALESCE(sl."useForOnboarding", false) AS "useForOnboarding",
slv."isAvailable" AS is_available
FROM "StoreListing" sl
JOIN latest_versions lv
ON sl.id = lv."storeListingId"
JOIN "StoreListingVersion" slv
ON slv."storeListingId" = lv."storeListingId"
AND slv.version = lv.max_version
AND slv."submissionStatus" = 'APPROVED'
JOIN "AgentGraph" a
ON slv."agentGraphId" = a.id
AND slv."agentGraphVersion" = a.version
LEFT JOIN "Profile" p
ON sl."owningUserId" = p."userId"
LEFT JOIN "mv_review_stats" rs
ON sl.id = rs."storeListingId"
LEFT JOIN "mv_agent_run_counts" ar
ON a.id = ar."agentGraphId"
LEFT JOIN agent_versions av
ON sl.id = av."storeListingId"
WHERE sl."isDeleted" = false
AND sl."hasApprovedVersion" = true;

Some files were not shown because too many files have changed in this diff Show More