mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-10 07:38:04 -05:00
Merge branch 'dev' into toran/open-2856-handle-failed-replicate-predictions-with-retries-in-all
This commit is contained in:
4
.github/workflows/platform-frontend-ci.yml
vendored
4
.github/workflows/platform-frontend-ci.yml
vendored
@@ -12,6 +12,10 @@ on:
|
||||
- "autogpt_platform/frontend/**"
|
||||
merge_group:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name == 'merge_group' && format('merge-queue-{0}', github.ref) || format('{0}-{1}', github.ref, github.event.pull_request.number || github.sha) }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
4
.github/workflows/platform-fullstack-ci.yml
vendored
4
.github/workflows/platform-fullstack-ci.yml
vendored
@@ -12,6 +12,10 @@ on:
|
||||
- "autogpt_platform/**"
|
||||
merge_group:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name == 'merge_group' && format('merge-queue-{0}', github.ref) || github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
@@ -11,7 +11,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v9
|
||||
- uses: actions/stale@v10
|
||||
with:
|
||||
# operations-per-run: 5000
|
||||
stale-issue-message: >
|
||||
|
||||
2
.github/workflows/repo-pr-label.yml
vendored
2
.github/workflows/repo-pr-label.yml
vendored
@@ -61,6 +61,6 @@ jobs:
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/labeler@v5
|
||||
- uses: actions/labeler@v6
|
||||
with:
|
||||
sync-labels: true
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.PHONY: start-core stop-core logs-core format lint migrate run-backend run-frontend
|
||||
.PHONY: start-core stop-core logs-core format lint migrate run-backend run-frontend load-store-agents
|
||||
|
||||
# Run just Supabase + Redis + RabbitMQ
|
||||
start-core:
|
||||
@@ -42,7 +42,10 @@ run-frontend:
|
||||
|
||||
test-data:
|
||||
cd backend && poetry run python test/test_data_creator.py
|
||||
|
||||
|
||||
load-store-agents:
|
||||
cd backend && poetry run load-store-agents
|
||||
|
||||
help:
|
||||
@echo "Usage: make <target>"
|
||||
@echo "Targets:"
|
||||
@@ -54,4 +57,5 @@ help:
|
||||
@echo " migrate - Run backend database migrations"
|
||||
@echo " run-backend - Run the backend FastAPI server"
|
||||
@echo " run-frontend - Run the frontend Next.js development server"
|
||||
@echo " test-data - Run the test data creator"
|
||||
@echo " test-data - Run the test data creator"
|
||||
@echo " load-store-agents - Load store agents from agents/ folder into test database"
|
||||
242
autogpt_platform/backend/agents/StoreAgent_rows.csv
Normal file
242
autogpt_platform/backend/agents/StoreAgent_rows.csv
Normal file
@@ -0,0 +1,242 @@
|
||||
listing_id,storeListingVersionId,slug,agent_name,agent_video,agent_image,featured,sub_heading,description,categories,useForOnboarding,is_available
|
||||
6e60a900-9d7d-490e-9af2-a194827ed632,d85882b8-633f-44ce-a315-c20a8c123d19,flux-ai-image-generator,Flux AI Image Generator,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/ca154dd1-140e-454c-91bd-2d8a00de3f08.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/577d995d-bc38-40a9-a23f-1f30f5774bdb.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/415db1b7-115c-43ab-bd6c-4e9f7ef95be1.jpg""]",false,Transform ideas into breathtaking images,"Transform ideas into breathtaking images with this AI-powered Image Generator. Using cutting-edge Flux AI technology, the tool crafts highly detailed, photorealistic visuals from simple text prompts. Perfect for artists, marketers, and content creators, this generator produces unique images tailored to user specifications. From fantastical scenes to lifelike portraits, users can unleash creativity with professional-quality results in seconds. Easy to use and endlessly versatile, bring imagination to life with the AI Image Generator today!","[""creative""]",false,true
|
||||
f11fc6e9-6166-4676-ac5d-f07127b270c1,c775f60d-b99f-418b-8fe0-53172258c3ce,youtube-transcription-scraper,YouTube Transcription Scraper,https://youtu.be/H8S3pU68lGE,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/65bce54b-0124-4b0d-9e3e-f9b89d0dc99e.jpg""]",false,Fetch the transcriptions from the most popular YouTube videos in your chosen topic,"Effortlessly gather transcriptions from multiple YouTube videos with this agent. It scrapes and compiles video transcripts into a clean, organized list, making it easy to extract insights, quotes, or content from various sources in one go. Ideal for researchers, content creators, and marketers looking to quickly analyze or repurpose video content.","[""writing""]",false,true
|
||||
17908889-b599-4010-8e4f-bed19b8f3446,6e16e65a-ad34-4108-b4fd-4a23fced5ea2,business-ownerceo-finder,Decision Maker Lead Finder,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/1020d94e-b6a2-4fa7-bbdf-2c218b0de563.jpg""]",false,Contact CEOs today,"Find the key decision-makers you need, fast.
|
||||
|
||||
This agent identifies business owners or CEOs of local companies in any area you choose. Simply enter what kind of businesses you’re looking for and where, and it will:
|
||||
|
||||
* Search the area and gather public information
|
||||
* Return names, roles, and contact details when available
|
||||
* Provide smart Google search suggestions if details aren’t found
|
||||
|
||||
Perfect for:
|
||||
|
||||
* B2B sales teams seeking verified leads
|
||||
* Recruiters sourcing local talent
|
||||
* Researchers looking to connect with business leaders
|
||||
|
||||
Save hours of manual searching and get straight to the people who matter most.","[""business""]",true,true
|
||||
72beca1d-45ea-4403-a7ce-e2af168ee428,415b7352-0dc6-4214-9d87-0ad3751b711d,smart-meeting-brief,Smart Meeting Prep,https://youtu.be/9ydZR2hkxaY,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/2f116ce1-63ae-4d39-a5cd-f514defc2b97.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/0a71a60a-2263-4f12-9836-9c76ab49f155.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/95327695-9184-403c-907a-a9d3bdafa6a5.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/2bc77788-790b-47d4-8a61-ce97b695e9f5.png""]",true,Business meeting briefings delivered daily,"Never walk into a meeting unprepared again. Every day at 4 pm, the Smart Meeting Prep Agent scans your calendar for tomorrow's external meetings. It reviews your past email exchanges, researches each participant's background and role, and compiles the insights into a concise briefing, so you can close your workday ready for tomorrow's calls.
|
||||
|
||||
How It Works
|
||||
1. At 4 pm, the agent scans your calendar and identifies external meetings scheduled for the next day.
|
||||
2. It reviews recent email threads with each participant to surface key relationship history and communication context.
|
||||
3. It conducts online research to gather publicly available information on roles, company backgrounds, and relevant professional data.
|
||||
4. It produces a unified briefing for each participant, including past exchange highlights, profile notes, and strategic conversation points.","[""personal""]",true,true
|
||||
9fa5697a-617b-4fae-aea0-7dbbed279976,b8ceb480-a7a2-4c90-8513-181a49f7071f,automated-support-ai,Automated Support Agent,https://youtu.be/nBMfu_5sgDA,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/ed56febc-2205-4179-9e7e-505d8500b66c.png""]",true,Automate up to 80 percent of inbound support emails,"Overview:
|
||||
Support teams spend countless hours on basic tickets. This agent automates repetitive customer support tasks. It reads incoming requests, researches your knowledge base, and responds automatically when confident. When unsure, it escalates to a human for final resolution.
|
||||
|
||||
How it Works:
|
||||
New support emails are routed to the agent.
|
||||
The agent checks internal documentation for answers.
|
||||
It measures confidence in the answer found and either replies directly or escalates to a human.
|
||||
|
||||
Business Value:
|
||||
Automating the easy 80 percent of support tickets allows your team to focus on high-value, complex customer issues, improving efficiency and response times.","[""business""]",false,true
|
||||
2bdac92b-a12c-4131-bb46-0e3b89f61413,31daf49d-31d3-476b-aa4c-099abc59b458,unspirational-poster-maker,Unspirational Poster Maker,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/6a490dac-27e5-405f-a4c4-8d1c55b85060.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/d343fbb5-478c-4e38-94df-4337293b61f1.jpg""]",false,Because adulting is hard,"This witty AI agent generates hilariously relatable ""motivational"" posters that tackle the everyday struggles of procrastination, overthinking, and workplace chaos with a blend of absurdity and sarcasm. From goldfish facing impossible tasks to cats in existential crises, The Unspirational Poster Maker designs tongue-in-cheek graphics and captions that mock productivity clichés and embrace our collective struggles to ""get it together."" Perfect for adding a touch of humour to the workday, these posters remind us that sometimes, all we can do is laugh at the chaos.","[""creative""]",false,true
|
||||
9adf005e-2854-4cc7-98cf-f7103b92a7b7,a03b0d8c-4751-43d6-a54e-c3b7856ba4e3,ai-shortform-video-generator-create-viral-ready-content,AI Video Generator,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/8d2670b9-fea5-4966-a597-0a4511bffdc3.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/aabe8aec-0110-4ce7-a259-4f86fe8fe07d.png""]",false,Create Viral-Ready Shorts Content in Seconds,"OVERVIEW
|
||||
Transform any trending headline or broad topic into a polished, vertical short-form video in a single run.
|
||||
The agent automates research, scriptwriting, metadata creation, and Revid.ai rendering, returning one ready-to-publish MP4 plus its title, script and hashtags.
|
||||
|
||||
HOW IT WORKS
|
||||
1. Input a topic or an exact news headline.
|
||||
2. The agent fetches live search results and selects the most engaging related story.
|
||||
3. Key facts are summarised into concise research notes.
|
||||
4. Claude writes a 30–35 second script with visual cues, a three-second hook, tension loops, and a call-to-action.
|
||||
5. GPT-4o generates an eye-catching title and one or two discoverability hashtags.
|
||||
6. The script is sent to a state-of-the-art AI video generator to render a single 9:16 MP4 (default: 720 p, 30 fps, voice “Brian”, style “movingImage”, music “Bladerunner 2049”).
|
||||
– All voice, style and resolution settings can be adjusted in the Builder before you press ""Run"".
|
||||
7. Output delivered: Title, Script, Hashtags, Video URL.
|
||||
|
||||
KEY USE CASES
|
||||
- Broad-topic explainers (e.g. “Artificial Intelligence” or “Climate Tech”).
|
||||
- Real-time newsjacking with a specific breaking headline.
|
||||
- Product-launch spotlights and quick event recaps while interest is high.
|
||||
|
||||
BUSINESS VALUE
|
||||
- One-click speed: from idea to finished video in minutes.
|
||||
- Consistent brand look: Revid presets keep voice, style and aspect ratio on spec.
|
||||
- No-code workflow: marketers create social video without design or development queues.
|
||||
- Cloud convenience: Auto-GPT Cloud users are pre-configured with all required keys.
|
||||
Self-hosted users simply add OpenAI, Anthropic, Perplexity (OpenRouter/Jina) and Revid keys once.
|
||||
|
||||
IMPORTANT NOTES
|
||||
- The agent outputs exactly one video per execution. Run it again for additional shorts.
|
||||
- Video rendering time varies; AI-generated footage may take several minutes.","[""writing""]",false,true
|
||||
864e48ef-fee5-42c1-b6a4-2ae139db9fc1,55d40473-0f31-4ada-9e40-d3a7139fcbd4,automated-blog-writer,Automated SEO Blog Writer,https://youtu.be/nKcDCbDVobs,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/2dd5f95b-5b30-4bf8-a11b-bac776c5141a.jpg""]",true,"Automate research, writing, and publishing for high-ranking blog posts","Scale your blog with a fully automated content engine. The Automated SEO Blog Writer learns your brand voice, finds high-demand keywords, and creates SEO-optimized articles that attract organic traffic and boost visibility.
|
||||
|
||||
How it works:
|
||||
|
||||
1. Share your pitch, website, and values.
|
||||
2. The agent studies your site and uncovers proven SEO opportunities.
|
||||
3. It spends two hours researching and drafting each post.
|
||||
4. You set the cadence—publishing runs on autopilot.
|
||||
|
||||
Business value: Consistently publish research-backed, optimized posts that build domain authority, rankings, and thought leadership while you focus on what matters most.
|
||||
|
||||
Use cases:
|
||||
• Founders: Keep your blog active with no time drain.
|
||||
• Agencies: Deliver scalable SEO content for clients.
|
||||
• Strategists: Automate execution, focus on strategy.
|
||||
• Marketers: Drive steady organic growth.
|
||||
• Local businesses: Capture nearby search traffic.","[""writing""]",false,true
|
||||
6046f42e-eb84-406f-bae0-8e052064a4fa,a548e507-09a7-4b30-909c-f63fcda10fff,lead-finder-local-businesses,Lead Finder,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/abd6605f-d5f8-426b-af36-052e8ba5044f.webp""]",false,Auto-Prospect Like a Pro,"Turbo-charge your local lead generation with the AutoGPT Marketplace’s top Google Maps prospecting agent. “Lead Finder: Local Businesses” delivers verified, ready-to-contact prospects in any niche and city—so you can focus on closing, not searching.
|
||||
|
||||
**WHAT IT DOES**
|
||||
• Searches Google Maps via the official API (no scraping)
|
||||
• Prompts like “dentists in Chicago” or “coffee shops near me”
|
||||
• Returns: Name, Website, Rating, Reviews, **Phone & Address**
|
||||
• Exports instantly to your CRM, sheet, or outreach workflow
|
||||
|
||||
**WHY YOU’LL LOVE IT**
|
||||
✓ Hyper-targeted leads in minutes
|
||||
✓ Unlimited searches & locations
|
||||
✓ Zero CAPTCHAs or IP blocks
|
||||
✓ Works on AutoGPT Cloud or self-hosted (with your API key)
|
||||
✓ Cut prospecting time by 90%
|
||||
|
||||
**PERFECT FOR**
|
||||
— Marketers & PPC agencies
|
||||
— SEO consultants & designers
|
||||
— SaaS founders & sales teams
|
||||
|
||||
Stop scrolling directories—start filling your pipeline. Start now and let AI prospect while you profit.
|
||||
|
||||
→ Click *Add to Library* and own your market today.","[""business""]",true,true
|
||||
f623c862-24e9-44fc-8ce8-d8282bb51ad2,eafa21d3-bf14-4f63-a97f-a5ee41df83b3,linkedin-post-generator,LinkedIn Post Generator,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/297f6a8e-81a8-43e2-b106-c7ad4a5662df.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/fceebdc1-aef6-4000-97fc-4ef587f56bda.png""]",false,Auto‑craft LinkedIn gold,"Create research‑driven, high‑impact LinkedIn posts in minutes. This agent searches YouTube for the best videos on your chosen topic, pulls their transcripts, and distils the most valuable insights into a polished post ready for your company page or personal feed.
|
||||
|
||||
FEATURES
|
||||
• Automated YouTube research – discovers and analyses top‑ranked videos so you don’t have to
|
||||
• AI‑curated synthesis – combines multiple transcripts into one authoritative narrative
|
||||
• Full creative control – adjust style, tone, objective, opinion, clarity, target word count and number of videos
|
||||
• LinkedIn‑optimised output – hook, 2‑3 key points, CTA, strategic line breaks, 3‑5 hashtags, no markdown
|
||||
• One‑click publish – returns a ready‑to‑post text block (≤1 300 characters)
|
||||
|
||||
HOW IT WORKS
|
||||
1. Enter a topic and your preferred writing parameters.
|
||||
2. The agent builds a YouTube search, fetches the page, and extracts the top N video URLs.
|
||||
3. It pulls each transcript, then feeds them—plus your settings—into Claude 3.5 Sonnet.
|
||||
4. The model writes a concise, engaging post designed for maximum LinkedIn engagement.
|
||||
|
||||
USE CASES
|
||||
• Thought‑leadership updates backed by fresh video research
|
||||
• Rapid industry summaries after major events, webinars, or conferences
|
||||
• Consistent LinkedIn content for busy founders, marketers, and creators
|
||||
|
||||
WHY YOU’LL LOVE IT
|
||||
Save hours of manual research, avoid surface‑level hot‑takes, and publish posts that showcase real expertise—without the heavy lift.","[""writing""]",true,true
|
||||
7d4120ad-b6b3-4419-8bdb-7dd7d350ef32,e7bb29a1-23c7-4fee-aa3b-5426174b8c52,youtube-to-linkedin-post-converter,YouTube to LinkedIn Post Converter,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/f084b326-a708-4396-be51-7ba59ad2ef32.png""]",false,Transform Your YouTube Videos into Engaging LinkedIn Posts with AI,"WHAT IT DOES:
|
||||
This agent converts YouTube video content into a LinkedIn post by analyzing the video's transcript. It provides you with a tailored post that reflects the core ideas, key takeaways, and tone of the original video, optimizing it for engagement on LinkedIn.
|
||||
|
||||
HOW IT WORKS:
|
||||
- You provide the URL to the YouTube video (required)
|
||||
- You can choose the structure for the LinkedIn post (e.g., Personal Achievement Story, Lesson Learned, Thought Leadership, etc.)
|
||||
- You can also select the tone (e.g., Inspirational, Analytical, Conversational, etc.)
|
||||
- The transcript of the video is analyzed by the GPT-4 model and the Claude 3.5 Sonnet model
|
||||
- The models extract key insights, memorable quotes, and the main points from the video
|
||||
- You’ll receive a LinkedIn post, formatted according to your chosen structure and tone, optimized for professional engagement
|
||||
|
||||
INPUTS:
|
||||
- Source YouTube Video – Provide the URL to the YouTube video
|
||||
- Structure – Choose the post format (e.g., Personal Achievement Story, Thought Leadership, etc.)
|
||||
- Content – Specify the main message or idea of the post (e.g., Hot Take, Key Takeaways, etc.)
|
||||
- Tone – Select the tone for the post (e.g., Conversational, Inspirational, etc.)
|
||||
|
||||
OUTPUT:
|
||||
- LinkedIn Post – A well-crafted, AI-generated LinkedIn post with a professional tone, based on the video content and your specified preferences
|
||||
|
||||
Perfect for content creators, marketers, and professionals who want to repurpose YouTube videos for LinkedIn and boost their professional branding.","[""writing""]",false,true
|
||||
c61d6a83-ea48-4df8-b447-3da2d9fe5814,00fdd42c-a14c-4d19-a567-65374ea0e87f,personalized-morning-coffee-newsletter,Personal Newsletter,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/f4b38e4c-8166-4caf-9411-96c9c4c82d4c.png""]",false,Start your day with personalized AI newsletters that deliver credibility and context for every interest or mood.,"This Personal Newsletter Agent provides a bespoke daily digest on your favorite topics and tone. Whether you prefer industry insights, lighthearted reads, or breaking news, this agent crafts your own unique newsletter to keep you informed and entertained.
|
||||
|
||||
|
||||
How It Works
|
||||
1. Enter your favorite topics, industries, or areas of interest.
|
||||
2. Choose your tone—professional, casual, or humorous.
|
||||
3. Set your preferred delivery cadence: daily or weekly.
|
||||
4. The agent scans top sources and compiles 3–5 engaging stories, insights, and fun facts into a conversational newsletter.
|
||||
|
||||
Skip the morning scroll and enjoy a thoughtfully curated newsletter designed just for you. Stay ahead of trends, spark creative ideas, and enjoy an effortless, informed start to your day.
|
||||
|
||||
|
||||
Use Cases
|
||||
• Executives: Get a daily digest of market updates and leadership insights.
|
||||
• Marketers: Receive curated creative trends and campaign inspiration.
|
||||
• Entrepreneurs: Stay updated on your industry without information overload.","[""research""]",true,true
|
||||
e2e49cfc-4a39-4d62-a6b3-c095f6d025ff,fc2c9976-0962-4625-a27b-d316573a9e7f,email-address-finder,Email Scout - Contact Finder Assistant,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/da8a690a-7a8b-4c1d-b6f8-e2f840c0205d.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/6a2ac25c-1609-4881-8140-e6da2421afb3.jpg"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/26179263-fe06-45bd-b6a0-0754660a0a46.jpg""]",false,Find contact details from name and location using AI search,"Finding someone's professional email address can be time-consuming and frustrating. Manual searching across multiple websites, social profiles, and business directories often leads to dead ends or outdated information.
|
||||
|
||||
Email Scout automates this process by intelligently searching across publicly available sources when you provide a person's name and location. Simply input basic information like ""Tim Cook, USA"" or ""Sarah Smith, London"" and let the AI assistant do the work of finding potential contact details.
|
||||
|
||||
Key Features:
|
||||
- Quick search from just name and location
|
||||
- Scans multiple public sources
|
||||
- Automated AI-powered search process
|
||||
- Easy to use with simple inputs
|
||||
|
||||
Perfect for recruiters, business development professionals, researchers, and anyone needing to establish professional contact.
|
||||
|
||||
Note: This tool searches only publicly available information. Search results depend on what contact information people have made public. Some searches may not yield results if the information isn't publicly accessible.","[""""]",false,true
|
||||
81bcc372-0922-4a36-bc35-f7b1e51d6939,e437cc95-e671-489d-b915-76561fba8c7f,ai-youtube-to-blog-converter,YouTube Video to SEO Blog Writer,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/239e5a41-2515-4e1c-96ef-31d0d37ecbeb.webp"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/c7d96966-786f-4be6-ad7d-3a51c84efc0e.png"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/0275a74c-e2c2-4e29-a6e4-3a616c3c35dd.png""]",false,One link. One click. One powerful blog post.,"Effortlessly transform your YouTube videos into high-quality, SEO-optimized blog posts.
|
||||
|
||||
Your videos deserve a second life—in writing.
|
||||
Make your content work twice as hard by repurposing it into engaging, searchable articles.
|
||||
|
||||
Perfect for content creators, marketers, and bloggers, this tool analyzes video content and generates well-structured blog posts tailored to your tone, audience, and word count. Just paste a YouTube URL and let the AI handle the rest.
|
||||
|
||||
FEATURES
|
||||
|
||||
• CONTENT ANALYSIS
|
||||
Extracts key points from the video while preserving your message and intent.
|
||||
|
||||
• CUSTOMIZABLE OUTPUT
|
||||
Select a tone that fits your audience: casual, professional, educational, or formal.
|
||||
|
||||
• SEO OPTIMIZATION
|
||||
Automatically creates engaging titles and structured subheadings for better search visibility.
|
||||
|
||||
• USER-FRIENDLY
|
||||
Repurpose your videos into written content to expand your reach and improve accessibility.
|
||||
|
||||
Whether you're looking to grow your blog, boost SEO, or simply get more out of your content, the AI YouTube-to-Blog Converter makes it effortless.
|
||||
","[""writing""]",true,true
|
||||
5c3510d2-fc8b-4053-8e19-67f53c86eb1a,f2cc74bb-f43f-4395-9c35-ecb30b5b4fc9,ai-webpage-copy-improver,AI Webpage Copy Improver,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/d562d26f-5891-4b09-8859-fbb205972313.jpg""]",false,Boost Your Website's Search Engine Performance,"Elevate your web content with this powerful AI Webpage Copy Improver. Designed for marketers, SEO specialists, and web developers, this tool analyses and enhances website copy for maximum impact. Using advanced language models, it optimizes text for better clarity, SEO performance, and increased conversion rates. The AI examines your existing content, identifies areas for improvement, and generates refined copy that maintains your brand voice while boosting engagement. From homepage headlines to product descriptions, transform your web presence with AI-driven insights. Improve readability, incorporate targeted keywords, and craft compelling calls-to-action - all with the click of a button. Take your digital marketing to the next level with the AI Webpage Copy Improver.","[""marketing""]",true,true
|
||||
94d03bd3-7d44-4d47-b60c-edb2f89508d6,b6f6f0d3-49f4-4e3b-8155-ffe9141b32c0,domain-name-finder,Domain Name Finder,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/28545e09-b2b8-4916-b4c6-67f982510a78.jpeg""]",false,Instantly generate brand-ready domain names that are actually available,"Overview:
|
||||
Finding a domain name that fits your brand shouldn’t take hours of searching and failed checks. The Domain Name Finder Agent turns your pitch into hundreds of creative, brand-ready domain ideas—filtered by live availability so every result is actionable.
|
||||
|
||||
How It Works
|
||||
1. Input your product pitch, company name, or core keywords.
|
||||
2. The agent analyzes brand tone, audience, and industry context.
|
||||
3. It generates a list of unique, memorable domains that match your criteria.
|
||||
4. All names are pre-filtered for real-time availability, so you can register immediately.
|
||||
|
||||
|
||||
Business Value
|
||||
Save hours of guesswork and eliminate dead ends. Accelerate brand launches, startup naming, and campaign creation with ready-to-claim domains.
|
||||
|
||||
|
||||
Key Use Cases
|
||||
• Startup Founders: Quickly find brand-ready domains for MVP launches or rebrands.
|
||||
• Marketers: Test name options across campaigns with instant availability data.
|
||||
• Entrepreneurs: Validate ideas faster with instant domain options.","[""business""]",false,true
|
||||
7a831906-daab-426f-9d66-bcf98d869426,516d813b-d1bc-470f-add7-c63a4b2c2bad,ai-function,AI Function,,"[""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/620e8117-2ee1-4384-89e6-c2ef4ec3d9c9.webp"",""https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/476259e2-5a79-4a7b-8e70-deeebfca70d7.png""]",false,Never Code Again,"AI FUNCTION MAGIC
|
||||
Your AI‑powered assistant for turning plain‑English descriptions into working Python functions.
|
||||
|
||||
HOW IT WORKS
|
||||
1. Describe what the function should do.
|
||||
2. Specify the inputs it needs.
|
||||
3. Receive the generated Python code.
|
||||
|
||||
FEATURES
|
||||
- Effortless Function Generation: convert natural‑language specs into complete functions.
|
||||
- Customizable Inputs: define the parameters that matter to you.
|
||||
- Versatile Use Cases: simulate data, automate tasks, prototype ideas.
|
||||
- Seamless Integration: add the generated function directly to your codebase.
|
||||
|
||||
EXAMPLE
|
||||
Request: “Create a function that generates 20 examples of fake people, each with a name, date of birth, job title, and age.”
|
||||
Input parameter: number_of_people (default 20)
|
||||
Result: a list of dictionaries such as
|
||||
[
|
||||
{ ""name"": ""Emma Martinez"", ""date_of_birth"": ""1992‑11‑03"", ""job_title"": ""Data Analyst"", ""age"": 32 },
|
||||
{ ""name"": ""Liam O’Connor"", ""date_of_birth"": ""1985‑07‑19"", ""job_title"": ""Marketing Manager"", ""age"": 39 },
|
||||
…18 more entries…
|
||||
]","[""development""]",false,true
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,590 @@
|
||||
{
|
||||
"id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
|
||||
"version": 29,
|
||||
"is_active": false,
|
||||
"name": "Unspirational Poster Maker",
|
||||
"description": "This witty AI agent generates hilariously relatable \"motivational\" posters that tackle the everyday struggles of procrastination, overthinking, and workplace chaos with a blend of absurdity and sarcasm. From goldfish facing impossible tasks to cats in existential crises, The Unspirational Poster Maker designs tongue-in-cheek graphics and captions that mock productivity clich\u00e9s and embrace our collective struggles to \"get it together.\" Perfect for adding a touch of humour to the workday, these posters remind us that sometimes, all we can do is laugh at the chaos.",
|
||||
"instructions": null,
|
||||
"recommended_schedule_cron": null,
|
||||
"nodes": [
|
||||
{
|
||||
"id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
"input_default": {
|
||||
"name": "Generated Image",
|
||||
"description": "The resulting generated image ready for you to review and post."
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 2329.937006807125,
|
||||
"y": 80.49068076698347
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "c6c511e8-e6a4-4969-9bc8-f67d60c1e229",
|
||||
"source_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "20845dda-91de-4508-8077-0504b1a5ae03",
|
||||
"source_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "6524c611-774b-45e9-899d-9a6aa80c549c",
|
||||
"source_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "714a0821-e5ba-4af7-9432-50491adda7b1",
|
||||
"source_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [],
|
||||
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "7e026d19-f9a6-412f-8082-610f9ba0c410",
|
||||
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
"input_default": {
|
||||
"name": "Theme",
|
||||
"value": "Cooking"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": -1219.5966324967521,
|
||||
"y": 80.50339731789956
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "8c2bd1f7-b17b-4835-81b6-bb336097aa7a",
|
||||
"source_id": "7e026d19-f9a6-412f-8082-610f9ba0c410",
|
||||
"sink_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_THEME",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "28bda769-b88b-44c9-be5c-52c2667f137e",
|
||||
"block_id": "6ab085e2-20b3-4055-bc3e-08036e01eca6",
|
||||
"input_default": {
|
||||
"upscale": "No Upscale"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1132.373897280427,
|
||||
"y": 88.44610377514573
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "54588c74-e090-4e49-89e4-844b9952a585",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "20845dda-91de-4508-8077-0504b1a5ae03",
|
||||
"source_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
|
||||
"block_id": "6ab085e2-20b3-4055-bc3e-08036e01eca6",
|
||||
"input_default": {
|
||||
"upscale": "No Upscale"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 590.7543882245375,
|
||||
"y": 85.69546832466654
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "66646786-3006-4417-a6b7-0158f2603d1d",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "6524c611-774b-45e9-899d-9a6aa80c549c",
|
||||
"source_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "576c5677-9050-4d1c-aad4-36b820c04fef",
|
||||
"block_id": "6ab085e2-20b3-4055-bc3e-08036e01eca6",
|
||||
"input_default": {
|
||||
"upscale": "No Upscale"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 60.48904654237981,
|
||||
"y": 86.06183359510214
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "201d3e03-bc06-4cee-846d-4c3c804d8857",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "714a0821-e5ba-4af7-9432-50491adda7b1",
|
||||
"source_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
|
||||
"block_id": "6ab085e2-20b3-4055-bc3e-08036e01eca6",
|
||||
"input_default": {
|
||||
"prompt": "A cat sprawled dramatically across an important-looking document during a work-from-home meeting, making direct eye contact with the camera while knocking over a coffee mug in slow motion. Text Overlay: \"Chaos is a career path. Be the obstacle everyone has to work around.\"",
|
||||
"upscale": "No Upscale"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1668.3572666956795,
|
||||
"y": 89.69665262457966
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "509b7587-1940-4a06-808d-edde9a74f400",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "c6c511e8-e6a4-4969-9bc8-f67d60c1e229",
|
||||
"source_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
|
||||
"input_default": {
|
||||
"model": "gpt-4o",
|
||||
"prompt": "<example_output>\nA photo of a sloth lounging on a desk, with its head resting on a keyboard. The keyboard is on top of a laptop with a blank spreadsheet open. A to-do list is placed beside the laptop, with the top item written as \"Do literally anything\". There is a text overlay that says \"If you can't outwork them, outnap them.\".\n</example_output>\n\nCreate a relatable satirical, snarky, user-deprecating motivational style image based on the theme: \"{{THEME}}\".\n\nOutput only the image description and caption, without any additional commentary or formatting.",
|
||||
"prompt_values": {}
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": -561.1139207164056,
|
||||
"y": 78.60434452403524
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "8c2bd1f7-b17b-4835-81b6-bb336097aa7a",
|
||||
"source_id": "7e026d19-f9a6-412f-8082-610f9ba0c410",
|
||||
"sink_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_THEME",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "54588c74-e090-4e49-89e4-844b9952a585",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "201d3e03-bc06-4cee-846d-4c3c804d8857",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "509b7587-1940-4a06-808d-edde9a74f400",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "66646786-3006-4417-a6b7-0158f2603d1d",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "7b2e2095-782a-4f8d-adda-e62b661bccf5",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
{
|
||||
"id": "66646786-3006-4417-a6b7-0158f2603d1d",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "c6c511e8-e6a4-4969-9bc8-f67d60c1e229",
|
||||
"source_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "6524c611-774b-45e9-899d-9a6aa80c549c",
|
||||
"source_id": "e7cdc1a2-4427-4a8a-a31b-63c8e74842f8",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "20845dda-91de-4508-8077-0504b1a5ae03",
|
||||
"source_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "8c2bd1f7-b17b-4835-81b6-bb336097aa7a",
|
||||
"source_id": "7e026d19-f9a6-412f-8082-610f9ba0c410",
|
||||
"sink_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_THEME",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "201d3e03-bc06-4cee-846d-4c3c804d8857",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "714a0821-e5ba-4af7-9432-50491adda7b1",
|
||||
"source_id": "576c5677-9050-4d1c-aad4-36b820c04fef",
|
||||
"sink_id": "5ac3727a-1ea7-436b-a902-ef1bfd883a30",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "54588c74-e090-4e49-89e4-844b9952a585",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "28bda769-b88b-44c9-be5c-52c2667f137e",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "509b7587-1940-4a06-808d-edde9a74f400",
|
||||
"source_id": "7543b9b0-0409-4cf8-bc4e-e0336273e2c4",
|
||||
"sink_id": "86665e90-ffbf-48fb-ad3f-e5d31fd50c51",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"forked_from_id": null,
|
||||
"forked_from_version": null,
|
||||
"sub_graphs": [],
|
||||
"user_id": "",
|
||||
"created_at": "2024-12-20T19:58:34.390Z",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Theme": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Theme",
|
||||
"default": "Cooking"
|
||||
}
|
||||
},
|
||||
"required": []
|
||||
},
|
||||
"output_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Generated Image": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Generated Image",
|
||||
"description": "The resulting generated image ready for you to review and post."
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"Generated Image"
|
||||
]
|
||||
},
|
||||
"has_external_trigger": false,
|
||||
"has_human_in_the_loop": false,
|
||||
"trigger_setup_info": null,
|
||||
"credentials_input_schema": {
|
||||
"properties": {
|
||||
"ideogram_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"ideogram"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "ideogram",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.IDEOGRAM: 'ideogram'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator_values": []
|
||||
},
|
||||
"openai_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"openai"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "openai",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.OPENAI: 'openai'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator": "model",
|
||||
"discriminator_mapping": {
|
||||
"Llama-3.3-70B-Instruct": "llama_api",
|
||||
"Llama-3.3-8B-Instruct": "llama_api",
|
||||
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
|
||||
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
|
||||
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
|
||||
"amazon/nova-lite-v1": "open_router",
|
||||
"amazon/nova-micro-v1": "open_router",
|
||||
"amazon/nova-pro-v1": "open_router",
|
||||
"claude-3-7-sonnet-20250219": "anthropic",
|
||||
"claude-3-haiku-20240307": "anthropic",
|
||||
"claude-haiku-4-5-20251001": "anthropic",
|
||||
"claude-opus-4-1-20250805": "anthropic",
|
||||
"claude-opus-4-20250514": "anthropic",
|
||||
"claude-opus-4-5-20251101": "anthropic",
|
||||
"claude-sonnet-4-20250514": "anthropic",
|
||||
"claude-sonnet-4-5-20250929": "anthropic",
|
||||
"cohere/command-r-08-2024": "open_router",
|
||||
"cohere/command-r-plus-08-2024": "open_router",
|
||||
"deepseek/deepseek-chat": "open_router",
|
||||
"deepseek/deepseek-r1-0528": "open_router",
|
||||
"dolphin-mistral:latest": "ollama",
|
||||
"google/gemini-2.0-flash-001": "open_router",
|
||||
"google/gemini-2.0-flash-lite-001": "open_router",
|
||||
"google/gemini-2.5-flash": "open_router",
|
||||
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
|
||||
"google/gemini-2.5-pro-preview-03-25": "open_router",
|
||||
"google/gemini-3-pro-preview": "open_router",
|
||||
"gpt-3.5-turbo": "openai",
|
||||
"gpt-4-turbo": "openai",
|
||||
"gpt-4.1-2025-04-14": "openai",
|
||||
"gpt-4.1-mini-2025-04-14": "openai",
|
||||
"gpt-4o": "openai",
|
||||
"gpt-4o-mini": "openai",
|
||||
"gpt-5-2025-08-07": "openai",
|
||||
"gpt-5-chat-latest": "openai",
|
||||
"gpt-5-mini-2025-08-07": "openai",
|
||||
"gpt-5-nano-2025-08-07": "openai",
|
||||
"gpt-5.1-2025-11-13": "openai",
|
||||
"gryphe/mythomax-l2-13b": "open_router",
|
||||
"llama-3.1-8b-instant": "groq",
|
||||
"llama-3.3-70b-versatile": "groq",
|
||||
"llama3": "ollama",
|
||||
"llama3.1:405b": "ollama",
|
||||
"llama3.2": "ollama",
|
||||
"llama3.3": "ollama",
|
||||
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/llama-4-maverick": "open_router",
|
||||
"meta-llama/llama-4-scout": "open_router",
|
||||
"microsoft/wizardlm-2-8x22b": "open_router",
|
||||
"mistralai/mistral-nemo": "open_router",
|
||||
"moonshotai/kimi-k2": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
|
||||
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
|
||||
"o1": "openai",
|
||||
"o1-mini": "openai",
|
||||
"o3-2025-04-16": "openai",
|
||||
"o3-mini": "openai",
|
||||
"openai/gpt-oss-120b": "open_router",
|
||||
"openai/gpt-oss-20b": "open_router",
|
||||
"perplexity/sonar": "open_router",
|
||||
"perplexity/sonar-deep-research": "open_router",
|
||||
"perplexity/sonar-pro": "open_router",
|
||||
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
|
||||
"qwen/qwen3-coder": "open_router",
|
||||
"v0-1.0-md": "v0",
|
||||
"v0-1.5-lg": "v0",
|
||||
"v0-1.5-md": "v0",
|
||||
"x-ai/grok-4": "open_router",
|
||||
"x-ai/grok-4-fast": "open_router",
|
||||
"x-ai/grok-4.1-fast": "open_router",
|
||||
"x-ai/grok-code-fast-1": "open_router"
|
||||
},
|
||||
"discriminator_values": [
|
||||
"gpt-4o"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"ideogram_api_key_credentials",
|
||||
"openai_api_key_credentials"
|
||||
],
|
||||
"title": "UnspirationalPosterMakerCredentialsInputSchema",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,447 @@
|
||||
{
|
||||
"id": "622849a7-5848-4838-894d-01f8f07e3fad",
|
||||
"version": 18,
|
||||
"is_active": true,
|
||||
"name": "AI Function",
|
||||
"description": "## AI-Powered Function Magic: Never code again!\nProvide a description of a python function and your inputs and AI will provide the results.",
|
||||
"instructions": null,
|
||||
"recommended_schedule_cron": null,
|
||||
"nodes": [
|
||||
{
|
||||
"id": "26ff2973-3f9a-451d-b902-d45e5da0a7fe",
|
||||
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
"input_default": {
|
||||
"name": "return",
|
||||
"title": null,
|
||||
"value": null,
|
||||
"format": "",
|
||||
"secret": false,
|
||||
"advanced": false,
|
||||
"description": "The value returned by the function"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1598.8622921127233,
|
||||
"y": 291.59140862204725
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "caecc1de-fdbc-4fd9-9570-074057bb15f9",
|
||||
"source_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"sink_id": "26ff2973-3f9a-451d-b902-d45e5da0a7fe",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [],
|
||||
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
|
||||
"graph_version": 18,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
|
||||
"input_default": {
|
||||
"model": "o3-mini",
|
||||
"retry": 3,
|
||||
"prompt": "{{ARGS}}",
|
||||
"sys_prompt": "You are now the following python function:\n\n```\n# {{DESCRIPTION}}\n{{FUNCTION}}\n```\n\nThe user will provide your input arguments.\nOnly respond with your `return` value.\nDo not include any commentary or additional text in your response. \nDo not include ``` backticks or any other decorators.",
|
||||
"ollama_host": "localhost:11434",
|
||||
"prompt_values": {}
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 995,
|
||||
"y": 290.50000000000006
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "dc7cb15f-76cc-4533-b96c-dd9e3f7f75ed",
|
||||
"source_id": "4eab3a55-20f2-4c1d-804c-7377ba8202d2",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_FUNCTION",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "093bdca5-9f44-42f9-8e1c-276dd2971675",
|
||||
"source_id": "844530de-2354-46d8-b748-67306b7bbca1",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_ARGS",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "6c63d8ee-b63d-4ff6-bae0-7db8f99bb7af",
|
||||
"source_id": "0fd6ef54-c1cd-478d-b764-17e40f882b99",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_DESCRIPTION",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "caecc1de-fdbc-4fd9-9570-074057bb15f9",
|
||||
"source_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"sink_id": "26ff2973-3f9a-451d-b902-d45e5da0a7fe",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
|
||||
"graph_version": 18,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "4eab3a55-20f2-4c1d-804c-7377ba8202d2",
|
||||
"block_id": "7fcd3bcb-8e1b-4e69-903d-32d3d4a92158",
|
||||
"input_default": {
|
||||
"name": "Function Definition",
|
||||
"title": null,
|
||||
"value": "def fake_people(n: int) -> list[dict]:",
|
||||
"secret": false,
|
||||
"advanced": false,
|
||||
"description": "The function definition (text). This is what you would type on the first line of the function when programming.\n\ne.g \"def fake_people(n: int) -> list[dict]:\"",
|
||||
"placeholder_values": []
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": -672.6908629664215,
|
||||
"y": 302.42044359789116
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "dc7cb15f-76cc-4533-b96c-dd9e3f7f75ed",
|
||||
"source_id": "4eab3a55-20f2-4c1d-804c-7377ba8202d2",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_FUNCTION",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
|
||||
"graph_version": 18,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "844530de-2354-46d8-b748-67306b7bbca1",
|
||||
"block_id": "7fcd3bcb-8e1b-4e69-903d-32d3d4a92158",
|
||||
"input_default": {
|
||||
"name": "Arguments",
|
||||
"title": null,
|
||||
"value": "20",
|
||||
"secret": false,
|
||||
"advanced": false,
|
||||
"description": "The function's inputs\n\ne.g \"20\"",
|
||||
"placeholder_values": []
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": -158.1623599617334,
|
||||
"y": 295.410856928333
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "093bdca5-9f44-42f9-8e1c-276dd2971675",
|
||||
"source_id": "844530de-2354-46d8-b748-67306b7bbca1",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_ARGS",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
|
||||
"graph_version": 18,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "0fd6ef54-c1cd-478d-b764-17e40f882b99",
|
||||
"block_id": "90a56ffb-7024-4b2b-ab50-e26c5e5ab8ba",
|
||||
"input_default": {
|
||||
"name": "Description",
|
||||
"title": null,
|
||||
"value": "Generates n examples of fake data representing people, each with a name, DoB, Job title, and an age.",
|
||||
"secret": false,
|
||||
"advanced": false,
|
||||
"description": "Describe what the function does.\n\ne.g \"Generates n examples of fake data representing people, each with a name, DoB, Job title, and an age.\"",
|
||||
"placeholder_values": []
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 374.4548658057796,
|
||||
"y": 290.3779121974126
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "6c63d8ee-b63d-4ff6-bae0-7db8f99bb7af",
|
||||
"source_id": "0fd6ef54-c1cd-478d-b764-17e40f882b99",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_DESCRIPTION",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"graph_id": "622849a7-5848-4838-894d-01f8f07e3fad",
|
||||
"graph_version": 18,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
{
|
||||
"id": "caecc1de-fdbc-4fd9-9570-074057bb15f9",
|
||||
"source_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"sink_id": "26ff2973-3f9a-451d-b902-d45e5da0a7fe",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "6c63d8ee-b63d-4ff6-bae0-7db8f99bb7af",
|
||||
"source_id": "0fd6ef54-c1cd-478d-b764-17e40f882b99",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_DESCRIPTION",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "093bdca5-9f44-42f9-8e1c-276dd2971675",
|
||||
"source_id": "844530de-2354-46d8-b748-67306b7bbca1",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_ARGS",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "dc7cb15f-76cc-4533-b96c-dd9e3f7f75ed",
|
||||
"source_id": "4eab3a55-20f2-4c1d-804c-7377ba8202d2",
|
||||
"sink_id": "c5d16ee4-de9e-4d93-bf32-ac2d15760d5b",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_FUNCTION",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"forked_from_id": null,
|
||||
"forked_from_version": null,
|
||||
"sub_graphs": [],
|
||||
"user_id": "",
|
||||
"created_at": "2025-04-19T17:10:48.857Z",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Function Definition": {
|
||||
"advanced": false,
|
||||
"anyOf": [
|
||||
{
|
||||
"format": "short-text",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"secret": false,
|
||||
"title": "Function Definition",
|
||||
"description": "The function definition (text). This is what you would type on the first line of the function when programming.\n\ne.g \"def fake_people(n: int) -> list[dict]:\"",
|
||||
"default": "def fake_people(n: int) -> list[dict]:"
|
||||
},
|
||||
"Arguments": {
|
||||
"advanced": false,
|
||||
"anyOf": [
|
||||
{
|
||||
"format": "short-text",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"secret": false,
|
||||
"title": "Arguments",
|
||||
"description": "The function's inputs\n\ne.g \"20\"",
|
||||
"default": "20"
|
||||
},
|
||||
"Description": {
|
||||
"advanced": false,
|
||||
"anyOf": [
|
||||
{
|
||||
"format": "long-text",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"secret": false,
|
||||
"title": "Description",
|
||||
"description": "Describe what the function does.\n\ne.g \"Generates n examples of fake data representing people, each with a name, DoB, Job title, and an age.\"",
|
||||
"default": "Generates n examples of fake data representing people, each with a name, DoB, Job title, and an age."
|
||||
}
|
||||
},
|
||||
"required": []
|
||||
},
|
||||
"output_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"return": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "return",
|
||||
"description": "The value returned by the function"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"return"
|
||||
]
|
||||
},
|
||||
"has_external_trigger": false,
|
||||
"has_human_in_the_loop": false,
|
||||
"trigger_setup_info": null,
|
||||
"credentials_input_schema": {
|
||||
"properties": {
|
||||
"openai_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"openai"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "openai",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.OPENAI: 'openai'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator": "model",
|
||||
"discriminator_mapping": {
|
||||
"Llama-3.3-70B-Instruct": "llama_api",
|
||||
"Llama-3.3-8B-Instruct": "llama_api",
|
||||
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
|
||||
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
|
||||
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
|
||||
"amazon/nova-lite-v1": "open_router",
|
||||
"amazon/nova-micro-v1": "open_router",
|
||||
"amazon/nova-pro-v1": "open_router",
|
||||
"claude-3-7-sonnet-20250219": "anthropic",
|
||||
"claude-3-haiku-20240307": "anthropic",
|
||||
"claude-haiku-4-5-20251001": "anthropic",
|
||||
"claude-opus-4-1-20250805": "anthropic",
|
||||
"claude-opus-4-20250514": "anthropic",
|
||||
"claude-opus-4-5-20251101": "anthropic",
|
||||
"claude-sonnet-4-20250514": "anthropic",
|
||||
"claude-sonnet-4-5-20250929": "anthropic",
|
||||
"cohere/command-r-08-2024": "open_router",
|
||||
"cohere/command-r-plus-08-2024": "open_router",
|
||||
"deepseek/deepseek-chat": "open_router",
|
||||
"deepseek/deepseek-r1-0528": "open_router",
|
||||
"dolphin-mistral:latest": "ollama",
|
||||
"google/gemini-2.0-flash-001": "open_router",
|
||||
"google/gemini-2.0-flash-lite-001": "open_router",
|
||||
"google/gemini-2.5-flash": "open_router",
|
||||
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
|
||||
"google/gemini-2.5-pro-preview-03-25": "open_router",
|
||||
"google/gemini-3-pro-preview": "open_router",
|
||||
"gpt-3.5-turbo": "openai",
|
||||
"gpt-4-turbo": "openai",
|
||||
"gpt-4.1-2025-04-14": "openai",
|
||||
"gpt-4.1-mini-2025-04-14": "openai",
|
||||
"gpt-4o": "openai",
|
||||
"gpt-4o-mini": "openai",
|
||||
"gpt-5-2025-08-07": "openai",
|
||||
"gpt-5-chat-latest": "openai",
|
||||
"gpt-5-mini-2025-08-07": "openai",
|
||||
"gpt-5-nano-2025-08-07": "openai",
|
||||
"gpt-5.1-2025-11-13": "openai",
|
||||
"gryphe/mythomax-l2-13b": "open_router",
|
||||
"llama-3.1-8b-instant": "groq",
|
||||
"llama-3.3-70b-versatile": "groq",
|
||||
"llama3": "ollama",
|
||||
"llama3.1:405b": "ollama",
|
||||
"llama3.2": "ollama",
|
||||
"llama3.3": "ollama",
|
||||
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/llama-4-maverick": "open_router",
|
||||
"meta-llama/llama-4-scout": "open_router",
|
||||
"microsoft/wizardlm-2-8x22b": "open_router",
|
||||
"mistralai/mistral-nemo": "open_router",
|
||||
"moonshotai/kimi-k2": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
|
||||
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
|
||||
"o1": "openai",
|
||||
"o1-mini": "openai",
|
||||
"o3-2025-04-16": "openai",
|
||||
"o3-mini": "openai",
|
||||
"openai/gpt-oss-120b": "open_router",
|
||||
"openai/gpt-oss-20b": "open_router",
|
||||
"perplexity/sonar": "open_router",
|
||||
"perplexity/sonar-deep-research": "open_router",
|
||||
"perplexity/sonar-pro": "open_router",
|
||||
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
|
||||
"qwen/qwen3-coder": "open_router",
|
||||
"v0-1.0-md": "v0",
|
||||
"v0-1.5-lg": "v0",
|
||||
"v0-1.5-md": "v0",
|
||||
"x-ai/grok-4": "open_router",
|
||||
"x-ai/grok-4-fast": "open_router",
|
||||
"x-ai/grok-4.1-fast": "open_router",
|
||||
"x-ai/grok-code-fast-1": "open_router"
|
||||
},
|
||||
"discriminator_values": [
|
||||
"o3-mini"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"openai_api_key_credentials"
|
||||
],
|
||||
"title": "AIFunctionCredentialsInputSchema",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,403 @@
|
||||
{
|
||||
"id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
|
||||
"version": 12,
|
||||
"is_active": true,
|
||||
"name": "Flux AI Image Generator",
|
||||
"description": "Transform ideas into breathtaking images with this AI-powered Image Generator. Using cutting-edge Flux AI technology, the tool crafts highly detailed, photorealistic visuals from simple text prompts. Perfect for artists, marketers, and content creators, this generator produces unique images tailored to user specifications. From fantastical scenes to lifelike portraits, users can unleash creativity with professional-quality results in seconds. Easy to use and endlessly versatile, bring imagination to life with the AI Image Generator today!",
|
||||
"instructions": null,
|
||||
"recommended_schedule_cron": null,
|
||||
"nodes": [
|
||||
{
|
||||
"id": "7482c59d-725f-4686-82b9-0dfdc4e92316",
|
||||
"block_id": "cc10ff7b-7753-4ff2-9af6-9399b1a7eddc",
|
||||
"input_default": {
|
||||
"text": "Press the \"Advanced\" toggle and input your replicate API key.\n\nYou can get one here:\nhttps://replicate.com/account/api-tokens\n"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 872.8268131538296,
|
||||
"y": 614.9436919065381
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [],
|
||||
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "0d1dec1a-e4ee-4349-9673-449a01bbf14e",
|
||||
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
"input_default": {
|
||||
"name": "Generated Image"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1453.6844137728922,
|
||||
"y": 963.2466395125115
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "06665d23-2f3d-4445-8f22-573446fcff5b",
|
||||
"source_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
|
||||
"sink_id": "0d1dec1a-e4ee-4349-9673-449a01bbf14e",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [],
|
||||
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "6f24c45f-1548-4eda-9784-da06ce0abef8",
|
||||
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
"input_default": {
|
||||
"name": "Image Subject",
|
||||
"value": "Otto the friendly, purple \"Chief Automation Octopus\" helping people automate their tedious tasks.",
|
||||
"description": "The subject of the image"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": -314.43009631839783,
|
||||
"y": 962.935949165938
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "1077c61a-a32a-4ed7-becf-11bcf835b914",
|
||||
"source_id": "6f24c45f-1548-4eda-9784-da06ce0abef8",
|
||||
"sink_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_TOPIC",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
|
||||
"block_id": "90f8c45e-e983-4644-aa0b-b4ebe2f531bc",
|
||||
"input_default": {
|
||||
"prompt": "dog",
|
||||
"output_format": "png",
|
||||
"replicate_model_name": "Flux Pro 1.1"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 873.0119949791526,
|
||||
"y": 966.1604399052493
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "a17ec505-9377-4700-8fe0-124ca81d43a9",
|
||||
"source_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
|
||||
"sink_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "06665d23-2f3d-4445-8f22-573446fcff5b",
|
||||
"source_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
|
||||
"sink_id": "0d1dec1a-e4ee-4349-9673-449a01bbf14e",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
|
||||
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
|
||||
"input_default": {
|
||||
"model": "gpt-4o-mini",
|
||||
"prompt": "Generate an incredibly detailed, photorealistic image prompt about {{TOPIC}}, describing the camera it's taken with and prompting the diffusion model to use all the best quality techniques.\n\nOutput only the prompt with no additional commentary.",
|
||||
"prompt_values": {}
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 277.3057034159709,
|
||||
"y": 962.8382498113764
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "1077c61a-a32a-4ed7-becf-11bcf835b914",
|
||||
"source_id": "6f24c45f-1548-4eda-9784-da06ce0abef8",
|
||||
"sink_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_TOPIC",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "a17ec505-9377-4700-8fe0-124ca81d43a9",
|
||||
"source_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
|
||||
"sink_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "ed2091cf-5b27-45a9-b3ea-42396f95b256",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
{
|
||||
"id": "1077c61a-a32a-4ed7-becf-11bcf835b914",
|
||||
"source_id": "6f24c45f-1548-4eda-9784-da06ce0abef8",
|
||||
"sink_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_TOPIC",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "06665d23-2f3d-4445-8f22-573446fcff5b",
|
||||
"source_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
|
||||
"sink_id": "0d1dec1a-e4ee-4349-9673-449a01bbf14e",
|
||||
"source_name": "result",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "a17ec505-9377-4700-8fe0-124ca81d43a9",
|
||||
"source_id": "0d1bca9a-d9b8-4bfd-a19c-fe50b54f4b12",
|
||||
"sink_id": "50bc23e9-f2b7-4959-8710-99679ed9eeea",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"forked_from_id": null,
|
||||
"forked_from_version": null,
|
||||
"sub_graphs": [],
|
||||
"user_id": "",
|
||||
"created_at": "2024-12-20T18:46:11.492Z",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Image Subject": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Image Subject",
|
||||
"description": "The subject of the image",
|
||||
"default": "Otto the friendly, purple \"Chief Automation Octopus\" helping people automate their tedious tasks."
|
||||
}
|
||||
},
|
||||
"required": []
|
||||
},
|
||||
"output_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Generated Image": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Generated Image"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"Generated Image"
|
||||
]
|
||||
},
|
||||
"has_external_trigger": false,
|
||||
"has_human_in_the_loop": false,
|
||||
"trigger_setup_info": null,
|
||||
"credentials_input_schema": {
|
||||
"properties": {
|
||||
"replicate_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"replicate"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "replicate",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.REPLICATE: 'replicate'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator_values": []
|
||||
},
|
||||
"openai_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"openai"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "openai",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.OPENAI: 'openai'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator": "model",
|
||||
"discriminator_mapping": {
|
||||
"Llama-3.3-70B-Instruct": "llama_api",
|
||||
"Llama-3.3-8B-Instruct": "llama_api",
|
||||
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
|
||||
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
|
||||
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
|
||||
"amazon/nova-lite-v1": "open_router",
|
||||
"amazon/nova-micro-v1": "open_router",
|
||||
"amazon/nova-pro-v1": "open_router",
|
||||
"claude-3-7-sonnet-20250219": "anthropic",
|
||||
"claude-3-haiku-20240307": "anthropic",
|
||||
"claude-haiku-4-5-20251001": "anthropic",
|
||||
"claude-opus-4-1-20250805": "anthropic",
|
||||
"claude-opus-4-20250514": "anthropic",
|
||||
"claude-opus-4-5-20251101": "anthropic",
|
||||
"claude-sonnet-4-20250514": "anthropic",
|
||||
"claude-sonnet-4-5-20250929": "anthropic",
|
||||
"cohere/command-r-08-2024": "open_router",
|
||||
"cohere/command-r-plus-08-2024": "open_router",
|
||||
"deepseek/deepseek-chat": "open_router",
|
||||
"deepseek/deepseek-r1-0528": "open_router",
|
||||
"dolphin-mistral:latest": "ollama",
|
||||
"google/gemini-2.0-flash-001": "open_router",
|
||||
"google/gemini-2.0-flash-lite-001": "open_router",
|
||||
"google/gemini-2.5-flash": "open_router",
|
||||
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
|
||||
"google/gemini-2.5-pro-preview-03-25": "open_router",
|
||||
"google/gemini-3-pro-preview": "open_router",
|
||||
"gpt-3.5-turbo": "openai",
|
||||
"gpt-4-turbo": "openai",
|
||||
"gpt-4.1-2025-04-14": "openai",
|
||||
"gpt-4.1-mini-2025-04-14": "openai",
|
||||
"gpt-4o": "openai",
|
||||
"gpt-4o-mini": "openai",
|
||||
"gpt-5-2025-08-07": "openai",
|
||||
"gpt-5-chat-latest": "openai",
|
||||
"gpt-5-mini-2025-08-07": "openai",
|
||||
"gpt-5-nano-2025-08-07": "openai",
|
||||
"gpt-5.1-2025-11-13": "openai",
|
||||
"gryphe/mythomax-l2-13b": "open_router",
|
||||
"llama-3.1-8b-instant": "groq",
|
||||
"llama-3.3-70b-versatile": "groq",
|
||||
"llama3": "ollama",
|
||||
"llama3.1:405b": "ollama",
|
||||
"llama3.2": "ollama",
|
||||
"llama3.3": "ollama",
|
||||
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/llama-4-maverick": "open_router",
|
||||
"meta-llama/llama-4-scout": "open_router",
|
||||
"microsoft/wizardlm-2-8x22b": "open_router",
|
||||
"mistralai/mistral-nemo": "open_router",
|
||||
"moonshotai/kimi-k2": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
|
||||
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
|
||||
"o1": "openai",
|
||||
"o1-mini": "openai",
|
||||
"o3-2025-04-16": "openai",
|
||||
"o3-mini": "openai",
|
||||
"openai/gpt-oss-120b": "open_router",
|
||||
"openai/gpt-oss-20b": "open_router",
|
||||
"perplexity/sonar": "open_router",
|
||||
"perplexity/sonar-deep-research": "open_router",
|
||||
"perplexity/sonar-pro": "open_router",
|
||||
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
|
||||
"qwen/qwen3-coder": "open_router",
|
||||
"v0-1.0-md": "v0",
|
||||
"v0-1.5-lg": "v0",
|
||||
"v0-1.5-md": "v0",
|
||||
"x-ai/grok-4": "open_router",
|
||||
"x-ai/grok-4-fast": "open_router",
|
||||
"x-ai/grok-4.1-fast": "open_router",
|
||||
"x-ai/grok-code-fast-1": "open_router"
|
||||
},
|
||||
"discriminator_values": [
|
||||
"gpt-4o-mini"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"replicate_api_key_credentials",
|
||||
"openai_api_key_credentials"
|
||||
],
|
||||
"title": "FluxAIImageGeneratorCredentialsInputSchema",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,505 @@
|
||||
{
|
||||
"id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
|
||||
"version": 12,
|
||||
"is_active": true,
|
||||
"name": "AI Webpage Copy Improver",
|
||||
"description": "Elevate your web content with this powerful AI Webpage Copy Improver. Designed for marketers, SEO specialists, and web developers, this tool analyses and enhances website copy for maximum impact. Using advanced language models, it optimizes text for better clarity, SEO performance, and increased conversion rates. The AI examines your existing content, identifies areas for improvement, and generates refined copy that maintains your brand voice while boosting engagement. From homepage headlines to product descriptions, transform your web presence with AI-driven insights. Improve readability, incorporate targeted keywords, and craft compelling calls-to-action - all with the click of a button. Take your digital marketing to the next level with the AI Webpage Copy Improver.",
|
||||
"instructions": null,
|
||||
"recommended_schedule_cron": null,
|
||||
"nodes": [
|
||||
{
|
||||
"id": "130ec496-f75d-4fe2-9cd6-8c00d08ea4a7",
|
||||
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
"input_default": {
|
||||
"name": "Improved Webpage Copy"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1039.5884372540172,
|
||||
"y": -0.8359099621230968
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "d4334477-3616-454f-a430-614ca27f5b36",
|
||||
"source_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"sink_id": "130ec496-f75d-4fe2-9cd6-8c00d08ea4a7",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [],
|
||||
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "cefccd07-fe70-4feb-bf76-46b20aaa5d35",
|
||||
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
"input_default": {
|
||||
"name": "Original Page Analysis",
|
||||
"description": "Analysis of the webpage as it currently stands."
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1037.7724103954706,
|
||||
"y": -606.5934325506903
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "f979ab78-0903-4f19-a7c2-a419d5d81aef",
|
||||
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"sink_id": "cefccd07-fe70-4feb-bf76-46b20aaa5d35",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [],
|
||||
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "375f8bc3-afd9-4025-ad8e-9aeb329af7ce",
|
||||
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
"input_default": {
|
||||
"name": "Homepage URL",
|
||||
"value": "https://agpt.co",
|
||||
"description": "Enter the URL of the homepage you want to improve"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": -1195.1455674454749,
|
||||
"y": 0
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "cbb12335-fefd-4560-9fff-98675130fbad",
|
||||
"source_id": "375f8bc3-afd9-4025-ad8e-9aeb329af7ce",
|
||||
"sink_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"source_name": "result",
|
||||
"sink_name": "url",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"block_id": "436c3984-57fd-4b85-8e9a-459b356883bd",
|
||||
"input_default": {
|
||||
"raw_content": false
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": -631.7330786555249,
|
||||
"y": 1.9638396496230826
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "cbb12335-fefd-4560-9fff-98675130fbad",
|
||||
"source_id": "375f8bc3-afd9-4025-ad8e-9aeb329af7ce",
|
||||
"sink_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"source_name": "result",
|
||||
"sink_name": "url",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "adfa6113-77b3-4e32-b136-3e694b87553e",
|
||||
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"source_name": "content",
|
||||
"sink_name": "prompt_values_#_CONTENT",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "5d5656fd-4208-4296-bc70-e39cc31caada",
|
||||
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"sink_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"source_name": "content",
|
||||
"sink_name": "prompt_values_#_CONTENT",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
|
||||
"input_default": {
|
||||
"model": "gpt-4o",
|
||||
"prompt": "Current Webpage Content:\n```\n{{CONTENT}}\n```\n\nBased on the following analysis of the webpage content:\n\n```\n{{ANALYSIS}}\n```\n\nRewrite and improve the content to address the identified issues. Focus on:\n1. Enhancing clarity and readability\n2. Optimizing for SEO (suggest and incorporate relevant keywords)\n3. Improving calls-to-action for better conversion rates\n4. Refining the structure and organization\n5. Maintaining brand consistency while improving the overall tone\n\nProvide the improved content in HTML format inside a code-block with \"```\" backticks, preserving the original structure where appropriate. Also, include a brief summary of the changes made and their potential impact.",
|
||||
"prompt_values": {}
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 488.37278423303917,
|
||||
"y": 0
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "adfa6113-77b3-4e32-b136-3e694b87553e",
|
||||
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"source_name": "content",
|
||||
"sink_name": "prompt_values_#_CONTENT",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "6bcca45d-c9d5-439e-ac43-e4a1264d8f57",
|
||||
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt_values_#_ANALYSIS",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "d4334477-3616-454f-a430-614ca27f5b36",
|
||||
"source_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"sink_id": "130ec496-f75d-4fe2-9cd6-8c00d08ea4a7",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
|
||||
"input_default": {
|
||||
"model": "gpt-4o",
|
||||
"prompt": "Analyze the following webpage content and provide a detailed report on its current state, including strengths and weaknesses in terms of clarity, SEO optimization, and potential for conversion:\n\n{{CONTENT}}\n\nInclude observations on:\n1. Overall readability and clarity\n2. Use of keywords and SEO-friendly language\n3. Effectiveness of calls-to-action\n4. Structure and organization of content\n5. Tone and brand consistency",
|
||||
"prompt_values": {}
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": -72.66206703605442,
|
||||
"y": -0.58403945075381
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "5d5656fd-4208-4296-bc70-e39cc31caada",
|
||||
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"sink_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"source_name": "content",
|
||||
"sink_name": "prompt_values_#_CONTENT",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "f979ab78-0903-4f19-a7c2-a419d5d81aef",
|
||||
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"sink_id": "cefccd07-fe70-4feb-bf76-46b20aaa5d35",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "6bcca45d-c9d5-439e-ac43-e4a1264d8f57",
|
||||
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt_values_#_ANALYSIS",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "0d440799-44ba-4d6c-85b3-b3739f1e1287",
|
||||
"graph_version": 12,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
{
|
||||
"id": "adfa6113-77b3-4e32-b136-3e694b87553e",
|
||||
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"source_name": "content",
|
||||
"sink_name": "prompt_values_#_CONTENT",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "d4334477-3616-454f-a430-614ca27f5b36",
|
||||
"source_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"sink_id": "130ec496-f75d-4fe2-9cd6-8c00d08ea4a7",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "5d5656fd-4208-4296-bc70-e39cc31caada",
|
||||
"source_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"sink_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"source_name": "content",
|
||||
"sink_name": "prompt_values_#_CONTENT",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "f979ab78-0903-4f19-a7c2-a419d5d81aef",
|
||||
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"sink_id": "cefccd07-fe70-4feb-bf76-46b20aaa5d35",
|
||||
"source_name": "response",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "6bcca45d-c9d5-439e-ac43-e4a1264d8f57",
|
||||
"source_id": "08612ce2-625b-4c17-accd-3acace7b6477",
|
||||
"sink_id": "c9924577-70d8-4ccb-9106-6f796df09ef9",
|
||||
"source_name": "response",
|
||||
"sink_name": "prompt_values_#_ANALYSIS",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "cbb12335-fefd-4560-9fff-98675130fbad",
|
||||
"source_id": "375f8bc3-afd9-4025-ad8e-9aeb329af7ce",
|
||||
"sink_id": "b40595c6-dba3-4779-a129-cd4f01fff103",
|
||||
"source_name": "result",
|
||||
"sink_name": "url",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"forked_from_id": null,
|
||||
"forked_from_version": null,
|
||||
"sub_graphs": [],
|
||||
"user_id": "",
|
||||
"created_at": "2024-12-20T19:47:22.036Z",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Homepage URL": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Homepage URL",
|
||||
"description": "Enter the URL of the homepage you want to improve",
|
||||
"default": "https://agpt.co"
|
||||
}
|
||||
},
|
||||
"required": []
|
||||
},
|
||||
"output_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Improved Webpage Copy": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Improved Webpage Copy"
|
||||
},
|
||||
"Original Page Analysis": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Original Page Analysis",
|
||||
"description": "Analysis of the webpage as it currently stands."
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"Improved Webpage Copy",
|
||||
"Original Page Analysis"
|
||||
]
|
||||
},
|
||||
"has_external_trigger": false,
|
||||
"has_human_in_the_loop": false,
|
||||
"trigger_setup_info": null,
|
||||
"credentials_input_schema": {
|
||||
"properties": {
|
||||
"jina_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"jina"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "jina",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.JINA: 'jina'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator_values": []
|
||||
},
|
||||
"openai_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"openai"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "openai",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.OPENAI: 'openai'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator": "model",
|
||||
"discriminator_mapping": {
|
||||
"Llama-3.3-70B-Instruct": "llama_api",
|
||||
"Llama-3.3-8B-Instruct": "llama_api",
|
||||
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
|
||||
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
|
||||
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
|
||||
"amazon/nova-lite-v1": "open_router",
|
||||
"amazon/nova-micro-v1": "open_router",
|
||||
"amazon/nova-pro-v1": "open_router",
|
||||
"claude-3-7-sonnet-20250219": "anthropic",
|
||||
"claude-3-haiku-20240307": "anthropic",
|
||||
"claude-haiku-4-5-20251001": "anthropic",
|
||||
"claude-opus-4-1-20250805": "anthropic",
|
||||
"claude-opus-4-20250514": "anthropic",
|
||||
"claude-opus-4-5-20251101": "anthropic",
|
||||
"claude-sonnet-4-20250514": "anthropic",
|
||||
"claude-sonnet-4-5-20250929": "anthropic",
|
||||
"cohere/command-r-08-2024": "open_router",
|
||||
"cohere/command-r-plus-08-2024": "open_router",
|
||||
"deepseek/deepseek-chat": "open_router",
|
||||
"deepseek/deepseek-r1-0528": "open_router",
|
||||
"dolphin-mistral:latest": "ollama",
|
||||
"google/gemini-2.0-flash-001": "open_router",
|
||||
"google/gemini-2.0-flash-lite-001": "open_router",
|
||||
"google/gemini-2.5-flash": "open_router",
|
||||
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
|
||||
"google/gemini-2.5-pro-preview-03-25": "open_router",
|
||||
"google/gemini-3-pro-preview": "open_router",
|
||||
"gpt-3.5-turbo": "openai",
|
||||
"gpt-4-turbo": "openai",
|
||||
"gpt-4.1-2025-04-14": "openai",
|
||||
"gpt-4.1-mini-2025-04-14": "openai",
|
||||
"gpt-4o": "openai",
|
||||
"gpt-4o-mini": "openai",
|
||||
"gpt-5-2025-08-07": "openai",
|
||||
"gpt-5-chat-latest": "openai",
|
||||
"gpt-5-mini-2025-08-07": "openai",
|
||||
"gpt-5-nano-2025-08-07": "openai",
|
||||
"gpt-5.1-2025-11-13": "openai",
|
||||
"gryphe/mythomax-l2-13b": "open_router",
|
||||
"llama-3.1-8b-instant": "groq",
|
||||
"llama-3.3-70b-versatile": "groq",
|
||||
"llama3": "ollama",
|
||||
"llama3.1:405b": "ollama",
|
||||
"llama3.2": "ollama",
|
||||
"llama3.3": "ollama",
|
||||
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/llama-4-maverick": "open_router",
|
||||
"meta-llama/llama-4-scout": "open_router",
|
||||
"microsoft/wizardlm-2-8x22b": "open_router",
|
||||
"mistralai/mistral-nemo": "open_router",
|
||||
"moonshotai/kimi-k2": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
|
||||
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
|
||||
"o1": "openai",
|
||||
"o1-mini": "openai",
|
||||
"o3-2025-04-16": "openai",
|
||||
"o3-mini": "openai",
|
||||
"openai/gpt-oss-120b": "open_router",
|
||||
"openai/gpt-oss-20b": "open_router",
|
||||
"perplexity/sonar": "open_router",
|
||||
"perplexity/sonar-deep-research": "open_router",
|
||||
"perplexity/sonar-pro": "open_router",
|
||||
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
|
||||
"qwen/qwen3-coder": "open_router",
|
||||
"v0-1.0-md": "v0",
|
||||
"v0-1.5-lg": "v0",
|
||||
"v0-1.5-md": "v0",
|
||||
"x-ai/grok-4": "open_router",
|
||||
"x-ai/grok-4-fast": "open_router",
|
||||
"x-ai/grok-4.1-fast": "open_router",
|
||||
"x-ai/grok-code-fast-1": "open_router"
|
||||
},
|
||||
"discriminator_values": [
|
||||
"gpt-4o"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"jina_api_key_credentials",
|
||||
"openai_api_key_credentials"
|
||||
],
|
||||
"title": "AIWebpageCopyImproverCredentialsInputSchema",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,615 @@
|
||||
{
|
||||
"id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"version": 29,
|
||||
"is_active": true,
|
||||
"name": "Email Address Finder",
|
||||
"description": "Input information of a business and find their email address",
|
||||
"instructions": null,
|
||||
"recommended_schedule_cron": null,
|
||||
"nodes": [
|
||||
{
|
||||
"id": "04cad535-9f1a-4876-8b07-af5897d8c282",
|
||||
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
"input_default": {
|
||||
"name": "Address",
|
||||
"value": "USA"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1047.9357219838776,
|
||||
"y": 1067.9123910370954
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "aac29f7b-3cd1-4c91-9a2a-72a8301c0957",
|
||||
"source_id": "04cad535-9f1a-4876-8b07-af5897d8c282",
|
||||
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"source_name": "result",
|
||||
"sink_name": "values_#_ADDRESS",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"block_id": "3146e4fe-2cdd-4f29-bd12-0c9d5bb4deb0",
|
||||
"input_default": {
|
||||
"group": 1,
|
||||
"pattern": "<email>(.*?)<\\/email>"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 3381.2821481740634,
|
||||
"y": 246.091098184158
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "9f8188ce-1f3d-46fb-acda-b2a57c0e5da6",
|
||||
"source_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"sink_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"source_name": "response",
|
||||
"sink_name": "text",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "b15b5143-27b7-486e-a166-4095e72e5235",
|
||||
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"sink_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
|
||||
"source_name": "negative",
|
||||
"sink_name": "values_#_Result",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "23591872-3c6b-4562-87d3-5b6ade698e48",
|
||||
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
|
||||
"source_name": "positive",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
|
||||
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
"input_default": {
|
||||
"name": "Email"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 4525.4246310882,
|
||||
"y": 246.36913665010354
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "d87b07ea-dcec-4d38-a644-2c1d741ea3cb",
|
||||
"source_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
|
||||
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
|
||||
"source_name": "output",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "23591872-3c6b-4562-87d3-5b6ade698e48",
|
||||
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
|
||||
"source_name": "positive",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [],
|
||||
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "4a41df99-ffe2-4c12-b528-632979c9c030",
|
||||
"block_id": "87840993-2053-44b7-8da4-187ad4ee518c",
|
||||
"input_default": {},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 2182.7499999999995,
|
||||
"y": 242.00001144409185
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "2e411d3d-79ba-4958-9c1c-b76a45a2e649",
|
||||
"source_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"sink_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
|
||||
"source_name": "output",
|
||||
"sink_name": "query",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "899cc7d8-a96b-4107-b3c6-4c78edcf0c6b",
|
||||
"source_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
|
||||
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"source_name": "results",
|
||||
"sink_name": "prompt_values_#_WEBSITE_CONTENT",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
|
||||
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
"input_default": {
|
||||
"name": "Business Name",
|
||||
"value": "Tim Cook"
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1049.9704155272595,
|
||||
"y": 244.49931152418344
|
||||
}
|
||||
},
|
||||
"input_links": [],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "946b522c-365f-4ee0-96f9-28863d9882ea",
|
||||
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
|
||||
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"source_name": "result",
|
||||
"sink_name": "values_#_NAME",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "43e920a7-0bb4-4fae-9a22-91df95c7342a",
|
||||
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
|
||||
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_BUSINESS_NAME",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"block_id": "db7d8f02-2f44-4c55-ab7a-eae0941f0c30",
|
||||
"input_default": {
|
||||
"format": "Email Address of {{NAME}}, {{ADDRESS}}",
|
||||
"values": {}
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1625.25,
|
||||
"y": 243.25001144409185
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "946b522c-365f-4ee0-96f9-28863d9882ea",
|
||||
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
|
||||
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"source_name": "result",
|
||||
"sink_name": "values_#_NAME",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "aac29f7b-3cd1-4c91-9a2a-72a8301c0957",
|
||||
"source_id": "04cad535-9f1a-4876-8b07-af5897d8c282",
|
||||
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"source_name": "result",
|
||||
"sink_name": "values_#_ADDRESS",
|
||||
"is_static": true
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "2e411d3d-79ba-4958-9c1c-b76a45a2e649",
|
||||
"source_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"sink_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
|
||||
"source_name": "output",
|
||||
"sink_name": "query",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "266b7255-11c4-4b88-99e2-85db31a2e865",
|
||||
"block_id": "db7d8f02-2f44-4c55-ab7a-eae0941f0c30",
|
||||
"input_default": {
|
||||
"format": "Failed to find email. \nResult:\n{{RESULT}}",
|
||||
"values": {}
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 3949.7493830805934,
|
||||
"y": 705.209819698647
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "b15b5143-27b7-486e-a166-4095e72e5235",
|
||||
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"sink_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
|
||||
"source_name": "negative",
|
||||
"sink_name": "values_#_Result",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "d87b07ea-dcec-4d38-a644-2c1d741ea3cb",
|
||||
"source_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
|
||||
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
|
||||
"source_name": "output",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
},
|
||||
{
|
||||
"id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
|
||||
"input_default": {
|
||||
"model": "claude-sonnet-4-5-20250929",
|
||||
"prompt": "<business_website>\n{{WEBSITE_CONTENT}}\n</business_website>\n\nExtract the Contact Email of {{BUSINESS_NAME}}.\n\nIf no email that can be used to contact {{BUSINESS_NAME}} is present, output `N/A`.\nDo not share any emails other than the email for this specific entity.\n\nIf multiple present pick the likely best one.\n\nRespond with the email (or N/A) inside <email></email> tags.\n\nExample Response:\n\n<thoughts_or_comments>\nThere were many emails present, but luckily one was for {{BUSINESS_NAME}} which I have included below.\n</thoughts_or_comments>\n<email>\nexample@email.com\n</email>",
|
||||
"prompt_values": {}
|
||||
},
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 2774.879259081777,
|
||||
"y": 243.3102035752969
|
||||
}
|
||||
},
|
||||
"input_links": [
|
||||
{
|
||||
"id": "43e920a7-0bb4-4fae-9a22-91df95c7342a",
|
||||
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
|
||||
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_BUSINESS_NAME",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "899cc7d8-a96b-4107-b3c6-4c78edcf0c6b",
|
||||
"source_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
|
||||
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"source_name": "results",
|
||||
"sink_name": "prompt_values_#_WEBSITE_CONTENT",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"output_links": [
|
||||
{
|
||||
"id": "9f8188ce-1f3d-46fb-acda-b2a57c0e5da6",
|
||||
"source_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"sink_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"source_name": "response",
|
||||
"sink_name": "text",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"graph_id": "4c6b68cb-bb75-4044-b1cb-2cee3fd39b26",
|
||||
"graph_version": 29,
|
||||
"webhook_id": null,
|
||||
"webhook": null
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
{
|
||||
"id": "9f8188ce-1f3d-46fb-acda-b2a57c0e5da6",
|
||||
"source_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"sink_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"source_name": "response",
|
||||
"sink_name": "text",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "b15b5143-27b7-486e-a166-4095e72e5235",
|
||||
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"sink_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
|
||||
"source_name": "negative",
|
||||
"sink_name": "values_#_Result",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "d87b07ea-dcec-4d38-a644-2c1d741ea3cb",
|
||||
"source_id": "266b7255-11c4-4b88-99e2-85db31a2e865",
|
||||
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
|
||||
"source_name": "output",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "946b522c-365f-4ee0-96f9-28863d9882ea",
|
||||
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
|
||||
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"source_name": "result",
|
||||
"sink_name": "values_#_NAME",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "23591872-3c6b-4562-87d3-5b6ade698e48",
|
||||
"source_id": "a6e7355e-5bf8-4b09-b11c-a5e140389981",
|
||||
"sink_id": "310c8fab-2ae6-4158-bd48-01dbdc434130",
|
||||
"source_name": "positive",
|
||||
"sink_name": "value",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "43e920a7-0bb4-4fae-9a22-91df95c7342a",
|
||||
"source_id": "9708a10a-8be0-4c44-abb3-bd0f7c594794",
|
||||
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"source_name": "result",
|
||||
"sink_name": "prompt_values_#_BUSINESS_NAME",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "2e411d3d-79ba-4958-9c1c-b76a45a2e649",
|
||||
"source_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"sink_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
|
||||
"source_name": "output",
|
||||
"sink_name": "query",
|
||||
"is_static": false
|
||||
},
|
||||
{
|
||||
"id": "aac29f7b-3cd1-4c91-9a2a-72a8301c0957",
|
||||
"source_id": "04cad535-9f1a-4876-8b07-af5897d8c282",
|
||||
"sink_id": "28b5ddcc-dc20-41cc-ad21-c54ff459f694",
|
||||
"source_name": "result",
|
||||
"sink_name": "values_#_ADDRESS",
|
||||
"is_static": true
|
||||
},
|
||||
{
|
||||
"id": "899cc7d8-a96b-4107-b3c6-4c78edcf0c6b",
|
||||
"source_id": "4a41df99-ffe2-4c12-b528-632979c9c030",
|
||||
"sink_id": "510937b3-0134-4e45-b2ba-05a447bbaf50",
|
||||
"source_name": "results",
|
||||
"sink_name": "prompt_values_#_WEBSITE_CONTENT",
|
||||
"is_static": false
|
||||
}
|
||||
],
|
||||
"forked_from_id": null,
|
||||
"forked_from_version": null,
|
||||
"sub_graphs": [],
|
||||
"user_id": "",
|
||||
"created_at": "2025-01-03T00:46:30.244Z",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Address": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Address",
|
||||
"default": "USA"
|
||||
},
|
||||
"Business Name": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Business Name",
|
||||
"default": "Tim Cook"
|
||||
}
|
||||
},
|
||||
"required": []
|
||||
},
|
||||
"output_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Email": {
|
||||
"advanced": false,
|
||||
"secret": false,
|
||||
"title": "Email"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"Email"
|
||||
]
|
||||
},
|
||||
"has_external_trigger": false,
|
||||
"has_human_in_the_loop": false,
|
||||
"trigger_setup_info": null,
|
||||
"credentials_input_schema": {
|
||||
"properties": {
|
||||
"jina_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"jina"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "jina",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.JINA: 'jina'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator_values": []
|
||||
},
|
||||
"anthropic_api_key_credentials": {
|
||||
"credentials_provider": [
|
||||
"anthropic"
|
||||
],
|
||||
"credentials_types": [
|
||||
"api_key"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"title": "Id",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Title"
|
||||
},
|
||||
"provider": {
|
||||
"const": "anthropic",
|
||||
"title": "Provider",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "api_key",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"provider",
|
||||
"type"
|
||||
],
|
||||
"title": "CredentialsMetaInput[Literal[<ProviderName.ANTHROPIC: 'anthropic'>], Literal['api_key']]",
|
||||
"type": "object",
|
||||
"discriminator": "model",
|
||||
"discriminator_mapping": {
|
||||
"Llama-3.3-70B-Instruct": "llama_api",
|
||||
"Llama-3.3-8B-Instruct": "llama_api",
|
||||
"Llama-4-Maverick-17B-128E-Instruct-FP8": "llama_api",
|
||||
"Llama-4-Scout-17B-16E-Instruct-FP8": "llama_api",
|
||||
"Qwen/Qwen2.5-72B-Instruct-Turbo": "aiml_api",
|
||||
"amazon/nova-lite-v1": "open_router",
|
||||
"amazon/nova-micro-v1": "open_router",
|
||||
"amazon/nova-pro-v1": "open_router",
|
||||
"claude-3-7-sonnet-20250219": "anthropic",
|
||||
"claude-3-haiku-20240307": "anthropic",
|
||||
"claude-haiku-4-5-20251001": "anthropic",
|
||||
"claude-opus-4-1-20250805": "anthropic",
|
||||
"claude-opus-4-20250514": "anthropic",
|
||||
"claude-opus-4-5-20251101": "anthropic",
|
||||
"claude-sonnet-4-20250514": "anthropic",
|
||||
"claude-sonnet-4-5-20250929": "anthropic",
|
||||
"cohere/command-r-08-2024": "open_router",
|
||||
"cohere/command-r-plus-08-2024": "open_router",
|
||||
"deepseek/deepseek-chat": "open_router",
|
||||
"deepseek/deepseek-r1-0528": "open_router",
|
||||
"dolphin-mistral:latest": "ollama",
|
||||
"google/gemini-2.0-flash-001": "open_router",
|
||||
"google/gemini-2.0-flash-lite-001": "open_router",
|
||||
"google/gemini-2.5-flash": "open_router",
|
||||
"google/gemini-2.5-flash-lite-preview-06-17": "open_router",
|
||||
"google/gemini-2.5-pro-preview-03-25": "open_router",
|
||||
"google/gemini-3-pro-preview": "open_router",
|
||||
"gpt-3.5-turbo": "openai",
|
||||
"gpt-4-turbo": "openai",
|
||||
"gpt-4.1-2025-04-14": "openai",
|
||||
"gpt-4.1-mini-2025-04-14": "openai",
|
||||
"gpt-4o": "openai",
|
||||
"gpt-4o-mini": "openai",
|
||||
"gpt-5-2025-08-07": "openai",
|
||||
"gpt-5-chat-latest": "openai",
|
||||
"gpt-5-mini-2025-08-07": "openai",
|
||||
"gpt-5-nano-2025-08-07": "openai",
|
||||
"gpt-5.1-2025-11-13": "openai",
|
||||
"gryphe/mythomax-l2-13b": "open_router",
|
||||
"llama-3.1-8b-instant": "groq",
|
||||
"llama-3.3-70b-versatile": "groq",
|
||||
"llama3": "ollama",
|
||||
"llama3.1:405b": "ollama",
|
||||
"llama3.2": "ollama",
|
||||
"llama3.3": "ollama",
|
||||
"meta-llama/Llama-3.2-3B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Llama-3.3-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "aiml_api",
|
||||
"meta-llama/llama-4-maverick": "open_router",
|
||||
"meta-llama/llama-4-scout": "open_router",
|
||||
"microsoft/wizardlm-2-8x22b": "open_router",
|
||||
"mistralai/mistral-nemo": "open_router",
|
||||
"moonshotai/kimi-k2": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-405b": "open_router",
|
||||
"nousresearch/hermes-3-llama-3.1-70b": "open_router",
|
||||
"nvidia/llama-3.1-nemotron-70b-instruct": "aiml_api",
|
||||
"o1": "openai",
|
||||
"o1-mini": "openai",
|
||||
"o3-2025-04-16": "openai",
|
||||
"o3-mini": "openai",
|
||||
"openai/gpt-oss-120b": "open_router",
|
||||
"openai/gpt-oss-20b": "open_router",
|
||||
"perplexity/sonar": "open_router",
|
||||
"perplexity/sonar-deep-research": "open_router",
|
||||
"perplexity/sonar-pro": "open_router",
|
||||
"qwen/qwen3-235b-a22b-thinking-2507": "open_router",
|
||||
"qwen/qwen3-coder": "open_router",
|
||||
"v0-1.0-md": "v0",
|
||||
"v0-1.5-lg": "v0",
|
||||
"v0-1.5-md": "v0",
|
||||
"x-ai/grok-4": "open_router",
|
||||
"x-ai/grok-4-fast": "open_router",
|
||||
"x-ai/grok-4.1-fast": "open_router",
|
||||
"x-ai/grok-code-fast-1": "open_router"
|
||||
},
|
||||
"discriminator_values": [
|
||||
"claude-sonnet-4-5-20250929"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"jina_api_key_credentials",
|
||||
"anthropic_api_key_credentials"
|
||||
],
|
||||
"title": "EmailAddressFinderCredentialsInputSchema",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
@@ -1371,7 +1371,7 @@ async def create_base(
|
||||
if tables:
|
||||
params["tables"] = tables
|
||||
|
||||
print(params)
|
||||
logger.debug(f"Creating Airtable base with params: {params}")
|
||||
|
||||
response = await Requests().post(
|
||||
"https://api.airtable.com/v0/meta/bases",
|
||||
|
||||
@@ -0,0 +1,108 @@
|
||||
{
|
||||
"action": "created",
|
||||
"discussion": {
|
||||
"repository_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT",
|
||||
"category": {
|
||||
"id": 12345678,
|
||||
"node_id": "DIC_kwDOJKSTjM4CXXXX",
|
||||
"repository_id": 614765452,
|
||||
"emoji": ":pray:",
|
||||
"name": "Q&A",
|
||||
"description": "Ask the community for help",
|
||||
"created_at": "2023-03-16T09:21:07Z",
|
||||
"updated_at": "2023-03-16T09:21:07Z",
|
||||
"slug": "q-a",
|
||||
"is_answerable": true
|
||||
},
|
||||
"answer_html_url": null,
|
||||
"answer_chosen_at": null,
|
||||
"answer_chosen_by": null,
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT/discussions/9999",
|
||||
"id": 5000000001,
|
||||
"node_id": "D_kwDOJKSTjM4AYYYY",
|
||||
"number": 9999,
|
||||
"title": "How do I configure custom blocks?",
|
||||
"user": {
|
||||
"login": "curious-user",
|
||||
"id": 22222222,
|
||||
"node_id": "MDQ6VXNlcjIyMjIyMjIy",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/22222222?v=4",
|
||||
"url": "https://api.github.com/users/curious-user",
|
||||
"html_url": "https://github.com/curious-user",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
},
|
||||
"state": "open",
|
||||
"state_reason": null,
|
||||
"locked": false,
|
||||
"comments": 0,
|
||||
"created_at": "2024-12-01T17:00:00Z",
|
||||
"updated_at": "2024-12-01T17:00:00Z",
|
||||
"author_association": "NONE",
|
||||
"active_lock_reason": null,
|
||||
"body": "## Question\n\nI'm trying to create a custom block for my specific use case. I've read the documentation but I'm not sure how to:\n\n1. Define the input/output schema\n2. Handle authentication\n3. Test my block locally\n\nCan someone point me to examples or provide guidance?\n\n## Environment\n\n- AutoGPT Platform version: latest\n- Python: 3.11",
|
||||
"reactions": {
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/discussions/9999/reactions",
|
||||
"total_count": 0,
|
||||
"+1": 0,
|
||||
"-1": 0,
|
||||
"laugh": 0,
|
||||
"hooray": 0,
|
||||
"confused": 0,
|
||||
"heart": 0,
|
||||
"rocket": 0,
|
||||
"eyes": 0
|
||||
},
|
||||
"timeline_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/discussions/9999/timeline"
|
||||
},
|
||||
"repository": {
|
||||
"id": 614765452,
|
||||
"node_id": "R_kgDOJKSTjA",
|
||||
"name": "AutoGPT",
|
||||
"full_name": "Significant-Gravitas/AutoGPT",
|
||||
"private": false,
|
||||
"owner": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"url": "https://api.github.com/users/Significant-Gravitas",
|
||||
"html_url": "https://github.com/Significant-Gravitas",
|
||||
"type": "Organization",
|
||||
"site_admin": false
|
||||
},
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on.",
|
||||
"fork": false,
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT",
|
||||
"created_at": "2023-03-16T09:21:07Z",
|
||||
"updated_at": "2024-12-01T17:00:00Z",
|
||||
"pushed_at": "2024-12-01T12:00:00Z",
|
||||
"stargazers_count": 170000,
|
||||
"watchers_count": 170000,
|
||||
"language": "Python",
|
||||
"has_discussions": true,
|
||||
"forks_count": 45000,
|
||||
"visibility": "public",
|
||||
"default_branch": "master"
|
||||
},
|
||||
"organization": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"url": "https://api.github.com/orgs/Significant-Gravitas",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"description": ""
|
||||
},
|
||||
"sender": {
|
||||
"login": "curious-user",
|
||||
"id": 22222222,
|
||||
"node_id": "MDQ6VXNlcjIyMjIyMjIy",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/22222222?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/curious-user",
|
||||
"html_url": "https://github.com/curious-user",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,112 @@
|
||||
{
|
||||
"action": "opened",
|
||||
"issue": {
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345",
|
||||
"repository_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT",
|
||||
"labels_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/labels{/name}",
|
||||
"comments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/comments",
|
||||
"events_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/events",
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT/issues/12345",
|
||||
"id": 2000000001,
|
||||
"node_id": "I_kwDOJKSTjM5wXXXX",
|
||||
"number": 12345,
|
||||
"title": "Bug: Application crashes when processing large files",
|
||||
"user": {
|
||||
"login": "bug-reporter",
|
||||
"id": 11111111,
|
||||
"node_id": "MDQ6VXNlcjExMTExMTEx",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/11111111?v=4",
|
||||
"url": "https://api.github.com/users/bug-reporter",
|
||||
"html_url": "https://github.com/bug-reporter",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
},
|
||||
"labels": [
|
||||
{
|
||||
"id": 5272676214,
|
||||
"node_id": "LA_kwDOJKSTjM8AAAABOkandg",
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels/bug",
|
||||
"name": "bug",
|
||||
"color": "d73a4a",
|
||||
"default": true,
|
||||
"description": "Something isn't working"
|
||||
}
|
||||
],
|
||||
"state": "open",
|
||||
"locked": false,
|
||||
"assignee": null,
|
||||
"assignees": [],
|
||||
"milestone": null,
|
||||
"comments": 0,
|
||||
"created_at": "2024-12-01T16:00:00Z",
|
||||
"updated_at": "2024-12-01T16:00:00Z",
|
||||
"closed_at": null,
|
||||
"author_association": "NONE",
|
||||
"active_lock_reason": null,
|
||||
"body": "## Description\n\nWhen I try to process a file larger than 100MB, the application crashes with an out of memory error.\n\n## Steps to Reproduce\n\n1. Open the application\n2. Select a file larger than 100MB\n3. Click 'Process'\n4. Application crashes\n\n## Expected Behavior\n\nThe application should handle large files gracefully.\n\n## Environment\n\n- OS: Ubuntu 22.04\n- Python: 3.11\n- AutoGPT Version: 1.0.0",
|
||||
"reactions": {
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/reactions",
|
||||
"total_count": 0,
|
||||
"+1": 0,
|
||||
"-1": 0,
|
||||
"laugh": 0,
|
||||
"hooray": 0,
|
||||
"confused": 0,
|
||||
"heart": 0,
|
||||
"rocket": 0,
|
||||
"eyes": 0
|
||||
},
|
||||
"timeline_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/timeline",
|
||||
"state_reason": null
|
||||
},
|
||||
"repository": {
|
||||
"id": 614765452,
|
||||
"node_id": "R_kgDOJKSTjA",
|
||||
"name": "AutoGPT",
|
||||
"full_name": "Significant-Gravitas/AutoGPT",
|
||||
"private": false,
|
||||
"owner": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"url": "https://api.github.com/users/Significant-Gravitas",
|
||||
"html_url": "https://github.com/Significant-Gravitas",
|
||||
"type": "Organization",
|
||||
"site_admin": false
|
||||
},
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on.",
|
||||
"fork": false,
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT",
|
||||
"created_at": "2023-03-16T09:21:07Z",
|
||||
"updated_at": "2024-12-01T16:00:00Z",
|
||||
"pushed_at": "2024-12-01T12:00:00Z",
|
||||
"stargazers_count": 170000,
|
||||
"watchers_count": 170000,
|
||||
"language": "Python",
|
||||
"forks_count": 45000,
|
||||
"open_issues_count": 190,
|
||||
"visibility": "public",
|
||||
"default_branch": "master"
|
||||
},
|
||||
"organization": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"url": "https://api.github.com/orgs/Significant-Gravitas",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"description": ""
|
||||
},
|
||||
"sender": {
|
||||
"login": "bug-reporter",
|
||||
"id": 11111111,
|
||||
"node_id": "MDQ6VXNlcjExMTExMTEx",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/11111111?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/bug-reporter",
|
||||
"html_url": "https://github.com/bug-reporter",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,97 @@
|
||||
{
|
||||
"action": "published",
|
||||
"release": {
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/releases/123456789",
|
||||
"assets_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/releases/123456789/assets",
|
||||
"upload_url": "https://uploads.github.com/repos/Significant-Gravitas/AutoGPT/releases/123456789/assets{?name,label}",
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT/releases/tag/v1.0.0",
|
||||
"id": 123456789,
|
||||
"author": {
|
||||
"login": "ntindle",
|
||||
"id": 12345678,
|
||||
"node_id": "MDQ6VXNlcjEyMzQ1Njc4",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/12345678?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/ntindle",
|
||||
"html_url": "https://github.com/ntindle",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
},
|
||||
"node_id": "RE_kwDOJKSTjM4HWwAA",
|
||||
"tag_name": "v1.0.0",
|
||||
"target_commitish": "master",
|
||||
"name": "AutoGPT Platform v1.0.0",
|
||||
"draft": false,
|
||||
"prerelease": false,
|
||||
"created_at": "2024-12-01T10:00:00Z",
|
||||
"published_at": "2024-12-01T12:00:00Z",
|
||||
"assets": [
|
||||
{
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/releases/assets/987654321",
|
||||
"id": 987654321,
|
||||
"node_id": "RA_kwDOJKSTjM4HWwBB",
|
||||
"name": "autogpt-v1.0.0.zip",
|
||||
"label": "Release Package",
|
||||
"content_type": "application/zip",
|
||||
"state": "uploaded",
|
||||
"size": 52428800,
|
||||
"download_count": 0,
|
||||
"created_at": "2024-12-01T11:30:00Z",
|
||||
"updated_at": "2024-12-01T11:35:00Z",
|
||||
"browser_download_url": "https://github.com/Significant-Gravitas/AutoGPT/releases/download/v1.0.0/autogpt-v1.0.0.zip"
|
||||
}
|
||||
],
|
||||
"tarball_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/tarball/v1.0.0",
|
||||
"zipball_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/zipball/v1.0.0",
|
||||
"body": "## What's New\n\n- Feature 1: Amazing new capability\n- Feature 2: Performance improvements\n- Bug fixes and stability improvements\n\n## Breaking Changes\n\nNone\n\n## Contributors\n\nThanks to all our contributors!"
|
||||
},
|
||||
"repository": {
|
||||
"id": 614765452,
|
||||
"node_id": "R_kgDOJKSTjA",
|
||||
"name": "AutoGPT",
|
||||
"full_name": "Significant-Gravitas/AutoGPT",
|
||||
"private": false,
|
||||
"owner": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"url": "https://api.github.com/users/Significant-Gravitas",
|
||||
"html_url": "https://github.com/Significant-Gravitas",
|
||||
"type": "Organization",
|
||||
"site_admin": false
|
||||
},
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on.",
|
||||
"fork": false,
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT",
|
||||
"created_at": "2023-03-16T09:21:07Z",
|
||||
"updated_at": "2024-12-01T12:00:00Z",
|
||||
"pushed_at": "2024-12-01T12:00:00Z",
|
||||
"stargazers_count": 170000,
|
||||
"watchers_count": 170000,
|
||||
"language": "Python",
|
||||
"forks_count": 45000,
|
||||
"visibility": "public",
|
||||
"default_branch": "master"
|
||||
},
|
||||
"organization": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"url": "https://api.github.com/orgs/Significant-Gravitas",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"description": ""
|
||||
},
|
||||
"sender": {
|
||||
"login": "ntindle",
|
||||
"id": 12345678,
|
||||
"node_id": "MDQ6VXNlcjEyMzQ1Njc4",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/12345678?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/ntindle",
|
||||
"html_url": "https://github.com/ntindle",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,53 @@
|
||||
{
|
||||
"action": "created",
|
||||
"starred_at": "2024-12-01T15:30:00Z",
|
||||
"repository": {
|
||||
"id": 614765452,
|
||||
"node_id": "R_kgDOJKSTjA",
|
||||
"name": "AutoGPT",
|
||||
"full_name": "Significant-Gravitas/AutoGPT",
|
||||
"private": false,
|
||||
"owner": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"url": "https://api.github.com/users/Significant-Gravitas",
|
||||
"html_url": "https://github.com/Significant-Gravitas",
|
||||
"type": "Organization",
|
||||
"site_admin": false
|
||||
},
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on.",
|
||||
"fork": false,
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT",
|
||||
"created_at": "2023-03-16T09:21:07Z",
|
||||
"updated_at": "2024-12-01T15:30:00Z",
|
||||
"pushed_at": "2024-12-01T12:00:00Z",
|
||||
"stargazers_count": 170001,
|
||||
"watchers_count": 170001,
|
||||
"language": "Python",
|
||||
"forks_count": 45000,
|
||||
"visibility": "public",
|
||||
"default_branch": "master"
|
||||
},
|
||||
"organization": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"url": "https://api.github.com/orgs/Significant-Gravitas",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"description": ""
|
||||
},
|
||||
"sender": {
|
||||
"login": "awesome-contributor",
|
||||
"id": 98765432,
|
||||
"node_id": "MDQ6VXNlcjk4NzY1NDMy",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/98765432?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/awesome-contributor",
|
||||
"html_url": "https://github.com/awesome-contributor",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
}
|
||||
}
|
||||
@@ -159,3 +159,391 @@ class GithubPullRequestTriggerBlock(GitHubTriggerBase, Block):
|
||||
|
||||
|
||||
# --8<-- [end:GithubTriggerExample]
|
||||
|
||||
|
||||
class GithubStarTriggerBlock(GitHubTriggerBase, Block):
|
||||
"""Trigger block for GitHub star events - useful for milestone celebrations."""
|
||||
|
||||
EXAMPLE_PAYLOAD_FILE = (
|
||||
Path(__file__).parent / "example_payloads" / "star.created.json"
|
||||
)
|
||||
|
||||
class Input(GitHubTriggerBase.Input):
|
||||
class EventsFilter(BaseModel):
|
||||
"""
|
||||
https://docs.github.com/en/webhooks/webhook-events-and-payloads#star
|
||||
"""
|
||||
|
||||
created: bool = False
|
||||
deleted: bool = False
|
||||
|
||||
events: EventsFilter = SchemaField(
|
||||
title="Events", description="The star events to subscribe to"
|
||||
)
|
||||
|
||||
class Output(GitHubTriggerBase.Output):
|
||||
event: str = SchemaField(
|
||||
description="The star event that triggered the webhook ('created' or 'deleted')"
|
||||
)
|
||||
starred_at: str = SchemaField(
|
||||
description="ISO timestamp when the repo was starred (empty if deleted)"
|
||||
)
|
||||
stargazers_count: int = SchemaField(
|
||||
description="Current number of stars on the repository"
|
||||
)
|
||||
repository_name: str = SchemaField(
|
||||
description="Full name of the repository (owner/repo)"
|
||||
)
|
||||
repository_url: str = SchemaField(description="URL to the repository")
|
||||
|
||||
def __init__(self):
|
||||
from backend.integrations.webhooks.github import GithubWebhookType
|
||||
|
||||
example_payload = json.loads(
|
||||
self.EXAMPLE_PAYLOAD_FILE.read_text(encoding="utf-8")
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
id="551e0a35-100b-49b7-89b8-3031322239b6",
|
||||
description="This block triggers on GitHub star events. "
|
||||
"Useful for celebrating milestones (e.g., 1k, 10k stars) or tracking engagement.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.INPUT},
|
||||
input_schema=GithubStarTriggerBlock.Input,
|
||||
output_schema=GithubStarTriggerBlock.Output,
|
||||
webhook_config=BlockWebhookConfig(
|
||||
provider=ProviderName.GITHUB,
|
||||
webhook_type=GithubWebhookType.REPO,
|
||||
resource_format="{repo}",
|
||||
event_filter_input="events",
|
||||
event_format="star.{event}",
|
||||
),
|
||||
test_input={
|
||||
"repo": "Significant-Gravitas/AutoGPT",
|
||||
"events": {"created": True},
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"payload": example_payload,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("payload", example_payload),
|
||||
("triggered_by_user", example_payload["sender"]),
|
||||
("event", example_payload["action"]),
|
||||
("starred_at", example_payload.get("starred_at", "")),
|
||||
("stargazers_count", example_payload["repository"]["stargazers_count"]),
|
||||
("repository_name", example_payload["repository"]["full_name"]),
|
||||
("repository_url", example_payload["repository"]["html_url"]),
|
||||
],
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
|
||||
async for name, value in super().run(input_data, **kwargs):
|
||||
yield name, value
|
||||
yield "event", input_data.payload["action"]
|
||||
yield "starred_at", input_data.payload.get("starred_at", "")
|
||||
yield "stargazers_count", input_data.payload["repository"]["stargazers_count"]
|
||||
yield "repository_name", input_data.payload["repository"]["full_name"]
|
||||
yield "repository_url", input_data.payload["repository"]["html_url"]
|
||||
|
||||
|
||||
class GithubReleaseTriggerBlock(GitHubTriggerBase, Block):
|
||||
"""Trigger block for GitHub release events - ideal for announcing new versions."""
|
||||
|
||||
EXAMPLE_PAYLOAD_FILE = (
|
||||
Path(__file__).parent / "example_payloads" / "release.published.json"
|
||||
)
|
||||
|
||||
class Input(GitHubTriggerBase.Input):
|
||||
class EventsFilter(BaseModel):
|
||||
"""
|
||||
https://docs.github.com/en/webhooks/webhook-events-and-payloads#release
|
||||
"""
|
||||
|
||||
published: bool = False
|
||||
unpublished: bool = False
|
||||
created: bool = False
|
||||
edited: bool = False
|
||||
deleted: bool = False
|
||||
prereleased: bool = False
|
||||
released: bool = False
|
||||
|
||||
events: EventsFilter = SchemaField(
|
||||
title="Events", description="The release events to subscribe to"
|
||||
)
|
||||
|
||||
class Output(GitHubTriggerBase.Output):
|
||||
event: str = SchemaField(
|
||||
description="The release event that triggered the webhook (e.g., 'published')"
|
||||
)
|
||||
release: dict = SchemaField(description="The full release object")
|
||||
release_url: str = SchemaField(description="URL to the release page")
|
||||
tag_name: str = SchemaField(description="The release tag name (e.g., 'v1.0.0')")
|
||||
release_name: str = SchemaField(description="Human-readable release name")
|
||||
body: str = SchemaField(description="Release notes/description")
|
||||
prerelease: bool = SchemaField(description="Whether this is a prerelease")
|
||||
draft: bool = SchemaField(description="Whether this is a draft release")
|
||||
assets: list = SchemaField(description="List of release assets/files")
|
||||
|
||||
def __init__(self):
|
||||
from backend.integrations.webhooks.github import GithubWebhookType
|
||||
|
||||
example_payload = json.loads(
|
||||
self.EXAMPLE_PAYLOAD_FILE.read_text(encoding="utf-8")
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
id="2052dd1b-74e1-46ac-9c87-c7a0e057b60b",
|
||||
description="This block triggers on GitHub release events. "
|
||||
"Perfect for automating announcements to Discord, Twitter, or other platforms.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.INPUT},
|
||||
input_schema=GithubReleaseTriggerBlock.Input,
|
||||
output_schema=GithubReleaseTriggerBlock.Output,
|
||||
webhook_config=BlockWebhookConfig(
|
||||
provider=ProviderName.GITHUB,
|
||||
webhook_type=GithubWebhookType.REPO,
|
||||
resource_format="{repo}",
|
||||
event_filter_input="events",
|
||||
event_format="release.{event}",
|
||||
),
|
||||
test_input={
|
||||
"repo": "Significant-Gravitas/AutoGPT",
|
||||
"events": {"published": True},
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"payload": example_payload,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("payload", example_payload),
|
||||
("triggered_by_user", example_payload["sender"]),
|
||||
("event", example_payload["action"]),
|
||||
("release", example_payload["release"]),
|
||||
("release_url", example_payload["release"]["html_url"]),
|
||||
("tag_name", example_payload["release"]["tag_name"]),
|
||||
("release_name", example_payload["release"]["name"]),
|
||||
("body", example_payload["release"]["body"]),
|
||||
("prerelease", example_payload["release"]["prerelease"]),
|
||||
("draft", example_payload["release"]["draft"]),
|
||||
("assets", example_payload["release"]["assets"]),
|
||||
],
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
|
||||
async for name, value in super().run(input_data, **kwargs):
|
||||
yield name, value
|
||||
release = input_data.payload["release"]
|
||||
yield "event", input_data.payload["action"]
|
||||
yield "release", release
|
||||
yield "release_url", release["html_url"]
|
||||
yield "tag_name", release["tag_name"]
|
||||
yield "release_name", release.get("name", "")
|
||||
yield "body", release.get("body", "")
|
||||
yield "prerelease", release["prerelease"]
|
||||
yield "draft", release["draft"]
|
||||
yield "assets", release["assets"]
|
||||
|
||||
|
||||
class GithubIssuesTriggerBlock(GitHubTriggerBase, Block):
|
||||
"""Trigger block for GitHub issues events - great for triage and notifications."""
|
||||
|
||||
EXAMPLE_PAYLOAD_FILE = (
|
||||
Path(__file__).parent / "example_payloads" / "issues.opened.json"
|
||||
)
|
||||
|
||||
class Input(GitHubTriggerBase.Input):
|
||||
class EventsFilter(BaseModel):
|
||||
"""
|
||||
https://docs.github.com/en/webhooks/webhook-events-and-payloads#issues
|
||||
"""
|
||||
|
||||
opened: bool = False
|
||||
edited: bool = False
|
||||
deleted: bool = False
|
||||
closed: bool = False
|
||||
reopened: bool = False
|
||||
assigned: bool = False
|
||||
unassigned: bool = False
|
||||
labeled: bool = False
|
||||
unlabeled: bool = False
|
||||
locked: bool = False
|
||||
unlocked: bool = False
|
||||
transferred: bool = False
|
||||
milestoned: bool = False
|
||||
demilestoned: bool = False
|
||||
pinned: bool = False
|
||||
unpinned: bool = False
|
||||
|
||||
events: EventsFilter = SchemaField(
|
||||
title="Events", description="The issue events to subscribe to"
|
||||
)
|
||||
|
||||
class Output(GitHubTriggerBase.Output):
|
||||
event: str = SchemaField(
|
||||
description="The issue event that triggered the webhook (e.g., 'opened')"
|
||||
)
|
||||
number: int = SchemaField(description="The issue number")
|
||||
issue: dict = SchemaField(description="The full issue object")
|
||||
issue_url: str = SchemaField(description="URL to the issue")
|
||||
issue_title: str = SchemaField(description="The issue title")
|
||||
issue_body: str = SchemaField(description="The issue body/description")
|
||||
labels: list = SchemaField(description="List of labels on the issue")
|
||||
assignees: list = SchemaField(description="List of assignees")
|
||||
state: str = SchemaField(description="Issue state ('open' or 'closed')")
|
||||
|
||||
def __init__(self):
|
||||
from backend.integrations.webhooks.github import GithubWebhookType
|
||||
|
||||
example_payload = json.loads(
|
||||
self.EXAMPLE_PAYLOAD_FILE.read_text(encoding="utf-8")
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
id="b2605464-e486-4bf4-aad3-d8a213c8a48a",
|
||||
description="This block triggers on GitHub issues events. "
|
||||
"Useful for automated triage, notifications, and welcoming first-time contributors.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.INPUT},
|
||||
input_schema=GithubIssuesTriggerBlock.Input,
|
||||
output_schema=GithubIssuesTriggerBlock.Output,
|
||||
webhook_config=BlockWebhookConfig(
|
||||
provider=ProviderName.GITHUB,
|
||||
webhook_type=GithubWebhookType.REPO,
|
||||
resource_format="{repo}",
|
||||
event_filter_input="events",
|
||||
event_format="issues.{event}",
|
||||
),
|
||||
test_input={
|
||||
"repo": "Significant-Gravitas/AutoGPT",
|
||||
"events": {"opened": True},
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"payload": example_payload,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("payload", example_payload),
|
||||
("triggered_by_user", example_payload["sender"]),
|
||||
("event", example_payload["action"]),
|
||||
("number", example_payload["issue"]["number"]),
|
||||
("issue", example_payload["issue"]),
|
||||
("issue_url", example_payload["issue"]["html_url"]),
|
||||
("issue_title", example_payload["issue"]["title"]),
|
||||
("issue_body", example_payload["issue"]["body"]),
|
||||
("labels", example_payload["issue"]["labels"]),
|
||||
("assignees", example_payload["issue"]["assignees"]),
|
||||
("state", example_payload["issue"]["state"]),
|
||||
],
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
|
||||
async for name, value in super().run(input_data, **kwargs):
|
||||
yield name, value
|
||||
issue = input_data.payload["issue"]
|
||||
yield "event", input_data.payload["action"]
|
||||
yield "number", issue["number"]
|
||||
yield "issue", issue
|
||||
yield "issue_url", issue["html_url"]
|
||||
yield "issue_title", issue["title"]
|
||||
yield "issue_body", issue.get("body") or ""
|
||||
yield "labels", issue["labels"]
|
||||
yield "assignees", issue["assignees"]
|
||||
yield "state", issue["state"]
|
||||
|
||||
|
||||
class GithubDiscussionTriggerBlock(GitHubTriggerBase, Block):
|
||||
"""Trigger block for GitHub discussion events - perfect for community Q&A sync."""
|
||||
|
||||
EXAMPLE_PAYLOAD_FILE = (
|
||||
Path(__file__).parent / "example_payloads" / "discussion.created.json"
|
||||
)
|
||||
|
||||
class Input(GitHubTriggerBase.Input):
|
||||
class EventsFilter(BaseModel):
|
||||
"""
|
||||
https://docs.github.com/en/webhooks/webhook-events-and-payloads#discussion
|
||||
"""
|
||||
|
||||
created: bool = False
|
||||
edited: bool = False
|
||||
deleted: bool = False
|
||||
answered: bool = False
|
||||
unanswered: bool = False
|
||||
labeled: bool = False
|
||||
unlabeled: bool = False
|
||||
locked: bool = False
|
||||
unlocked: bool = False
|
||||
category_changed: bool = False
|
||||
transferred: bool = False
|
||||
pinned: bool = False
|
||||
unpinned: bool = False
|
||||
|
||||
events: EventsFilter = SchemaField(
|
||||
title="Events", description="The discussion events to subscribe to"
|
||||
)
|
||||
|
||||
class Output(GitHubTriggerBase.Output):
|
||||
event: str = SchemaField(
|
||||
description="The discussion event that triggered the webhook"
|
||||
)
|
||||
number: int = SchemaField(description="The discussion number")
|
||||
discussion: dict = SchemaField(description="The full discussion object")
|
||||
discussion_url: str = SchemaField(description="URL to the discussion")
|
||||
title: str = SchemaField(description="The discussion title")
|
||||
body: str = SchemaField(description="The discussion body")
|
||||
category: dict = SchemaField(description="The discussion category object")
|
||||
category_name: str = SchemaField(description="Name of the category")
|
||||
state: str = SchemaField(description="Discussion state")
|
||||
|
||||
def __init__(self):
|
||||
from backend.integrations.webhooks.github import GithubWebhookType
|
||||
|
||||
example_payload = json.loads(
|
||||
self.EXAMPLE_PAYLOAD_FILE.read_text(encoding="utf-8")
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
id="87f847b3-d81a-424e-8e89-acadb5c9d52b",
|
||||
description="This block triggers on GitHub Discussions events. "
|
||||
"Great for syncing Q&A to Discord or auto-responding to common questions. "
|
||||
"Note: Discussions must be enabled on the repository.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.INPUT},
|
||||
input_schema=GithubDiscussionTriggerBlock.Input,
|
||||
output_schema=GithubDiscussionTriggerBlock.Output,
|
||||
webhook_config=BlockWebhookConfig(
|
||||
provider=ProviderName.GITHUB,
|
||||
webhook_type=GithubWebhookType.REPO,
|
||||
resource_format="{repo}",
|
||||
event_filter_input="events",
|
||||
event_format="discussion.{event}",
|
||||
),
|
||||
test_input={
|
||||
"repo": "Significant-Gravitas/AutoGPT",
|
||||
"events": {"created": True},
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"payload": example_payload,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("payload", example_payload),
|
||||
("triggered_by_user", example_payload["sender"]),
|
||||
("event", example_payload["action"]),
|
||||
("number", example_payload["discussion"]["number"]),
|
||||
("discussion", example_payload["discussion"]),
|
||||
("discussion_url", example_payload["discussion"]["html_url"]),
|
||||
("title", example_payload["discussion"]["title"]),
|
||||
("body", example_payload["discussion"]["body"]),
|
||||
("category", example_payload["discussion"]["category"]),
|
||||
("category_name", example_payload["discussion"]["category"]["name"]),
|
||||
("state", example_payload["discussion"]["state"]),
|
||||
],
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
|
||||
async for name, value in super().run(input_data, **kwargs):
|
||||
yield name, value
|
||||
discussion = input_data.payload["discussion"]
|
||||
yield "event", input_data.payload["action"]
|
||||
yield "number", discussion["number"]
|
||||
yield "discussion", discussion
|
||||
yield "discussion_url", discussion["html_url"]
|
||||
yield "title", discussion["title"]
|
||||
yield "body", discussion.get("body") or ""
|
||||
yield "category", discussion["category"]
|
||||
yield "category_name", discussion["category"]["name"]
|
||||
yield "state", discussion["state"]
|
||||
|
||||
@@ -1,16 +1,8 @@
|
||||
import asyncio
|
||||
import mimetypes
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from typing import Any, Literal, Optional
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.file import get_exec_file_path
|
||||
from backend.util.request import Requests
|
||||
from backend.util.type import MediaFileType
|
||||
from backend.util.virus_scanner import scan_content_safe
|
||||
|
||||
AttachmentView = Literal[
|
||||
"DOCS",
|
||||
@@ -30,8 +22,8 @@ ATTACHMENT_VIEWS: tuple[AttachmentView, ...] = (
|
||||
)
|
||||
|
||||
|
||||
class GoogleDriveFile(BaseModel):
|
||||
"""Represents a single file/folder picked from Google Drive"""
|
||||
class _GoogleDriveFileBase(BaseModel):
|
||||
"""Internal base class for Google Drive file representation."""
|
||||
|
||||
model_config = ConfigDict(populate_by_name=True)
|
||||
|
||||
@@ -49,144 +41,115 @@ class GoogleDriveFile(BaseModel):
|
||||
)
|
||||
|
||||
|
||||
def GoogleDrivePickerField(
|
||||
multiselect: bool = False,
|
||||
allow_folder_selection: bool = False,
|
||||
allowed_views: Optional[list[AttachmentView]] = None,
|
||||
allowed_mime_types: Optional[list[str]] = None,
|
||||
scopes: Optional[list[str]] = None,
|
||||
title: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
placeholder: Optional[str] = None,
|
||||
**kwargs,
|
||||
class GoogleDriveFile(_GoogleDriveFileBase):
|
||||
"""
|
||||
Represents a Google Drive file/folder with optional credentials for chaining.
|
||||
|
||||
Used for both inputs and outputs in Google Drive blocks. The `_credentials_id`
|
||||
field enables chaining between blocks - when one block outputs a file, the
|
||||
next block can use the same credentials to access it.
|
||||
|
||||
When used with GoogleDriveFileField(), the frontend renders a combined
|
||||
auth + file picker UI that automatically populates `_credentials_id`.
|
||||
"""
|
||||
|
||||
# Hidden field for credential ID - populated by frontend, preserved in outputs
|
||||
credentials_id: Optional[str] = Field(
|
||||
None,
|
||||
alias="_credentials_id",
|
||||
description="Internal: credential ID for authentication",
|
||||
)
|
||||
|
||||
|
||||
def GoogleDriveFileField(
|
||||
*,
|
||||
title: str,
|
||||
description: str | None = None,
|
||||
credentials_kwarg: str = "credentials",
|
||||
credentials_scopes: list[str] | None = None,
|
||||
allowed_views: list[AttachmentView] | None = None,
|
||||
allowed_mime_types: list[str] | None = None,
|
||||
placeholder: str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""
|
||||
Creates a Google Drive Picker input field.
|
||||
Creates a Google Drive file input field with auto-generated credentials.
|
||||
|
||||
This field type produces a single UI element that handles both:
|
||||
1. Google OAuth authentication
|
||||
2. File selection via Google Drive Picker
|
||||
|
||||
The system automatically generates a credentials field, and the credentials
|
||||
are passed to the run() method using the specified kwarg name.
|
||||
|
||||
Args:
|
||||
multiselect: Allow selecting multiple files/folders (default: False)
|
||||
allow_folder_selection: Allow selecting folders (default: False)
|
||||
allowed_views: List of view types to show in picker (default: ["DOCS"])
|
||||
allowed_mime_types: Filter by MIME types (e.g., ["application/pdf"])
|
||||
title: Field title shown in UI
|
||||
description: Field description/help text
|
||||
credentials_kwarg: Name of the kwarg that will receive GoogleCredentials
|
||||
in the run() method (default: "credentials")
|
||||
credentials_scopes: OAuth scopes required (default: drive.file)
|
||||
allowed_views: List of view types to show in picker (default: ["DOCS"])
|
||||
allowed_mime_types: Filter by MIME types
|
||||
placeholder: Placeholder text for the button
|
||||
**kwargs: Additional SchemaField arguments (advanced, hidden, etc.)
|
||||
**kwargs: Additional SchemaField arguments
|
||||
|
||||
Returns:
|
||||
Field definition that produces:
|
||||
- Single GoogleDriveFile when multiselect=False
|
||||
- list[GoogleDriveFile] when multiselect=True
|
||||
Field definition that produces GoogleDriveFile
|
||||
|
||||
Example:
|
||||
>>> class MyBlock(Block):
|
||||
... class Input(BlockSchema):
|
||||
... document: GoogleDriveFile = GoogleDrivePickerField(
|
||||
... title="Select Document",
|
||||
... allowed_views=["DOCUMENTS"],
|
||||
... class Input(BlockSchemaInput):
|
||||
... spreadsheet: GoogleDriveFile = GoogleDriveFileField(
|
||||
... title="Select Spreadsheet",
|
||||
... credentials_kwarg="creds",
|
||||
... allowed_views=["SPREADSHEETS"],
|
||||
... )
|
||||
...
|
||||
... files: list[GoogleDriveFile] = GoogleDrivePickerField(
|
||||
... title="Select Multiple Files",
|
||||
... multiselect=True,
|
||||
... allow_folder_selection=True,
|
||||
... )
|
||||
... async def run(
|
||||
... self, input_data: Input, *, creds: GoogleCredentials, **kwargs
|
||||
... ):
|
||||
... # creds is automatically populated
|
||||
... file = input_data.spreadsheet
|
||||
"""
|
||||
# Build configuration that will be sent to frontend
|
||||
|
||||
# Determine scopes - drive.file is sufficient for picker-selected files
|
||||
scopes = credentials_scopes or ["https://www.googleapis.com/auth/drive.file"]
|
||||
|
||||
# Build picker configuration with auto_credentials embedded
|
||||
picker_config = {
|
||||
"multiselect": multiselect,
|
||||
"allow_folder_selection": allow_folder_selection,
|
||||
"multiselect": False,
|
||||
"allow_folder_selection": False,
|
||||
"allowed_views": list(allowed_views) if allowed_views else ["DOCS"],
|
||||
"scopes": scopes,
|
||||
# Auto-credentials config tells frontend to include _credentials_id in output
|
||||
"auto_credentials": {
|
||||
"provider": "google",
|
||||
"type": "oauth2",
|
||||
"scopes": scopes,
|
||||
"kwarg_name": credentials_kwarg,
|
||||
},
|
||||
}
|
||||
|
||||
# Add optional configurations
|
||||
if allowed_mime_types:
|
||||
picker_config["allowed_mime_types"] = list(allowed_mime_types)
|
||||
|
||||
# Determine required scopes based on config
|
||||
base_scopes = scopes if scopes is not None else []
|
||||
picker_scopes: set[str] = set(base_scopes)
|
||||
if allow_folder_selection:
|
||||
picker_scopes.add("https://www.googleapis.com/auth/drive")
|
||||
else:
|
||||
# Use drive.file for minimal scope - only access files selected by user in picker
|
||||
picker_scopes.add("https://www.googleapis.com/auth/drive.file")
|
||||
|
||||
picker_config["scopes"] = sorted(picker_scopes)
|
||||
|
||||
# Set appropriate default value
|
||||
default_value = [] if multiselect else None
|
||||
|
||||
# Use SchemaField to handle format properly
|
||||
return SchemaField(
|
||||
default=default_value,
|
||||
default=None,
|
||||
title=title,
|
||||
description=description,
|
||||
placeholder=placeholder or "Choose from Google Drive",
|
||||
placeholder=placeholder or "Select from Google Drive",
|
||||
# Use google-drive-picker format so frontend renders existing component
|
||||
format="google-drive-picker",
|
||||
advanced=False,
|
||||
json_schema_extra={
|
||||
"google_drive_picker_config": picker_config,
|
||||
# Also keep auto_credentials at top level for backend detection
|
||||
"auto_credentials": {
|
||||
"provider": "google",
|
||||
"type": "oauth2",
|
||||
"scopes": scopes,
|
||||
"kwarg_name": credentials_kwarg,
|
||||
},
|
||||
**kwargs,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
DRIVE_API_URL = "https://www.googleapis.com/drive/v3/files"
|
||||
_requests = Requests(trusted_origins=["https://www.googleapis.com"])
|
||||
|
||||
|
||||
def GoogleDriveAttachmentField(
|
||||
*,
|
||||
title: str,
|
||||
description: str | None = None,
|
||||
placeholder: str | None = None,
|
||||
multiselect: bool = True,
|
||||
allowed_mime_types: list[str] | None = None,
|
||||
**extra: Any,
|
||||
) -> Any:
|
||||
return GoogleDrivePickerField(
|
||||
multiselect=multiselect,
|
||||
allowed_views=list(ATTACHMENT_VIEWS),
|
||||
allowed_mime_types=allowed_mime_types,
|
||||
title=title,
|
||||
description=description,
|
||||
placeholder=placeholder or "Choose files from Google Drive",
|
||||
**extra,
|
||||
)
|
||||
|
||||
|
||||
async def drive_file_to_media_file(
|
||||
drive_file: GoogleDriveFile, *, graph_exec_id: str, access_token: str
|
||||
) -> MediaFileType:
|
||||
if drive_file.is_folder:
|
||||
raise ValueError("Google Drive selection must be a file.")
|
||||
if not access_token:
|
||||
raise ValueError("Google Drive access token is required for file download.")
|
||||
|
||||
url = f"{DRIVE_API_URL}/{drive_file.id}?alt=media"
|
||||
response = await _requests.get(
|
||||
url, headers={"Authorization": f"Bearer {access_token}"}
|
||||
)
|
||||
|
||||
mime_type = drive_file.mime_type or response.headers.get(
|
||||
"content-type", "application/octet-stream"
|
||||
)
|
||||
|
||||
MAX_FILE_SIZE = 100 * 1024 * 1024
|
||||
if len(response.content) > MAX_FILE_SIZE:
|
||||
raise ValueError(
|
||||
f"File too large: {len(response.content)} bytes > {MAX_FILE_SIZE} bytes"
|
||||
)
|
||||
|
||||
base_path = Path(get_exec_file_path(graph_exec_id, ""))
|
||||
base_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
extension = mimetypes.guess_extension(mime_type, strict=False) or ".bin"
|
||||
filename = f"{uuid.uuid4()}{extension}"
|
||||
target_path = base_path / filename
|
||||
|
||||
await scan_content_safe(response.content, filename=filename)
|
||||
await asyncio.to_thread(target_path.write_bytes, response.content)
|
||||
|
||||
return MediaFileType(str(target_path.relative_to(base_path)))
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -184,7 +184,13 @@ class SendWebRequestBlock(Block):
|
||||
)
|
||||
|
||||
# ─── Execute request ─────────────────────────────────────────
|
||||
response = await Requests().request(
|
||||
# Use raise_for_status=False so HTTP errors (4xx, 5xx) are returned
|
||||
# as response objects instead of raising exceptions, allowing proper
|
||||
# handling via client_error and server_error outputs
|
||||
response = await Requests(
|
||||
raise_for_status=False,
|
||||
retry_max_attempts=1, # allow callers to handle HTTP errors immediately
|
||||
).request(
|
||||
input_data.method.value,
|
||||
input_data.url,
|
||||
headers=input_data.headers,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import logging
|
||||
from typing import Any, Literal
|
||||
from typing import Any
|
||||
|
||||
from prisma.enums import ReviewStatus
|
||||
|
||||
@@ -45,11 +45,11 @@ class HumanInTheLoopBlock(Block):
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
reviewed_data: Any = SchemaField(
|
||||
description="The data after human review (may be modified)"
|
||||
approved_data: Any = SchemaField(
|
||||
description="The data when approved (may be modified by reviewer)"
|
||||
)
|
||||
status: Literal["approved", "rejected"] = SchemaField(
|
||||
description="Status of the review: 'approved' or 'rejected'"
|
||||
rejected_data: Any = SchemaField(
|
||||
description="The data when rejected (may be modified by reviewer)"
|
||||
)
|
||||
review_message: str = SchemaField(
|
||||
description="Any message provided by the reviewer", default=""
|
||||
@@ -69,8 +69,7 @@ class HumanInTheLoopBlock(Block):
|
||||
"editable": True,
|
||||
},
|
||||
test_output=[
|
||||
("status", "approved"),
|
||||
("reviewed_data", {"name": "John Doe", "age": 30}),
|
||||
("approved_data", {"name": "John Doe", "age": 30}),
|
||||
],
|
||||
test_mock={
|
||||
"get_or_create_human_review": lambda *_args, **_kwargs: ReviewResult(
|
||||
@@ -116,8 +115,7 @@ class HumanInTheLoopBlock(Block):
|
||||
logger.info(
|
||||
f"HITL block skipping review for node {node_exec_id} - safe mode disabled"
|
||||
)
|
||||
yield "status", "approved"
|
||||
yield "reviewed_data", input_data.data
|
||||
yield "approved_data", input_data.data
|
||||
yield "review_message", "Auto-approved (safe mode disabled)"
|
||||
return
|
||||
|
||||
@@ -158,12 +156,11 @@ class HumanInTheLoopBlock(Block):
|
||||
)
|
||||
|
||||
if result.status == ReviewStatus.APPROVED:
|
||||
yield "status", "approved"
|
||||
yield "reviewed_data", result.data
|
||||
yield "approved_data", result.data
|
||||
if result.message:
|
||||
yield "review_message", result.message
|
||||
|
||||
elif result.status == ReviewStatus.REJECTED:
|
||||
yield "status", "rejected"
|
||||
yield "rejected_data", result.data
|
||||
if result.message:
|
||||
yield "review_message", result.message
|
||||
|
||||
@@ -2,6 +2,8 @@ import copy
|
||||
from datetime import date, time
|
||||
from typing import Any, Optional
|
||||
|
||||
# Import for Google Drive file input block
|
||||
from backend.blocks.google._drive import AttachmentView, GoogleDriveFile
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
@@ -646,6 +648,119 @@ class AgentTableInputBlock(AgentInputBlock):
|
||||
yield "result", input_data.value if input_data.value is not None else []
|
||||
|
||||
|
||||
class AgentGoogleDriveFileInputBlock(AgentInputBlock):
|
||||
"""
|
||||
This block allows users to select a file from Google Drive.
|
||||
|
||||
It provides a Google Drive file picker UI that handles both authentication
|
||||
and file selection. The selected file information (ID, name, URL, etc.)
|
||||
is output for use by other blocks like Google Sheets Read.
|
||||
"""
|
||||
|
||||
class Input(AgentInputBlock.Input):
|
||||
value: Optional[GoogleDriveFile] = SchemaField(
|
||||
description="The selected Google Drive file.",
|
||||
default=None,
|
||||
advanced=False,
|
||||
title="Selected File",
|
||||
)
|
||||
allowed_views: list[AttachmentView] = SchemaField(
|
||||
description="Which views to show in the file picker (DOCS, SPREADSHEETS, PRESENTATIONS, etc.).",
|
||||
default_factory=lambda: ["DOCS", "SPREADSHEETS", "PRESENTATIONS"],
|
||||
advanced=False,
|
||||
title="Allowed Views",
|
||||
)
|
||||
allow_folder_selection: bool = SchemaField(
|
||||
description="Whether to allow selecting folders.",
|
||||
default=False,
|
||||
advanced=True,
|
||||
title="Allow Folder Selection",
|
||||
)
|
||||
|
||||
def generate_schema(self):
|
||||
"""Generate schema for the value field with Google Drive picker format."""
|
||||
schema = super().generate_schema()
|
||||
|
||||
# Default scopes for drive.file access
|
||||
scopes = ["https://www.googleapis.com/auth/drive.file"]
|
||||
|
||||
# Build picker configuration
|
||||
picker_config = {
|
||||
"multiselect": False, # Single file selection only for now
|
||||
"allow_folder_selection": self.allow_folder_selection,
|
||||
"allowed_views": (
|
||||
list(self.allowed_views) if self.allowed_views else ["DOCS"]
|
||||
),
|
||||
"scopes": scopes,
|
||||
# Auto-credentials config tells frontend to include _credentials_id in output
|
||||
"auto_credentials": {
|
||||
"provider": "google",
|
||||
"type": "oauth2",
|
||||
"scopes": scopes,
|
||||
"kwarg_name": "credentials",
|
||||
},
|
||||
}
|
||||
|
||||
# Set format and config for frontend to render Google Drive picker
|
||||
schema["format"] = "google-drive-picker"
|
||||
schema["google_drive_picker_config"] = picker_config
|
||||
# Also keep auto_credentials at top level for backend detection
|
||||
schema["auto_credentials"] = {
|
||||
"provider": "google",
|
||||
"type": "oauth2",
|
||||
"scopes": scopes,
|
||||
"kwarg_name": "credentials",
|
||||
}
|
||||
|
||||
if self.value is not None:
|
||||
schema["default"] = self.value.model_dump()
|
||||
|
||||
return schema
|
||||
|
||||
class Output(AgentInputBlock.Output):
|
||||
result: GoogleDriveFile = SchemaField(
|
||||
description="The selected Google Drive file with ID, name, URL, and other metadata."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
test_file = GoogleDriveFile.model_validate(
|
||||
{
|
||||
"id": "test-file-id",
|
||||
"name": "Test Spreadsheet",
|
||||
"mimeType": "application/vnd.google-apps.spreadsheet",
|
||||
"url": "https://docs.google.com/spreadsheets/d/test-file-id",
|
||||
}
|
||||
)
|
||||
super().__init__(
|
||||
id="d3b32f15-6fd7-40e3-be52-e083f51b19a2",
|
||||
description="Block for selecting a file from Google Drive.",
|
||||
disabled=not config.enable_agent_input_subtype_blocks,
|
||||
input_schema=AgentGoogleDriveFileInputBlock.Input,
|
||||
output_schema=AgentGoogleDriveFileInputBlock.Output,
|
||||
test_input=[
|
||||
{
|
||||
"name": "spreadsheet_input",
|
||||
"description": "Select a spreadsheet from Google Drive",
|
||||
"allowed_views": ["SPREADSHEETS"],
|
||||
"value": {
|
||||
"id": "test-file-id",
|
||||
"name": "Test Spreadsheet",
|
||||
"mimeType": "application/vnd.google-apps.spreadsheet",
|
||||
"url": "https://docs.google.com/spreadsheets/d/test-file-id",
|
||||
},
|
||||
}
|
||||
],
|
||||
test_output=[("result", test_file)],
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, *args, **kwargs) -> BlockOutput:
|
||||
"""
|
||||
Yields the selected Google Drive file.
|
||||
"""
|
||||
if input_data.value is not None:
|
||||
yield "result", input_data.value
|
||||
|
||||
|
||||
IO_BLOCK_IDs = [
|
||||
AgentInputBlock().id,
|
||||
AgentOutputBlock().id,
|
||||
@@ -658,4 +773,5 @@ IO_BLOCK_IDs = [
|
||||
AgentDropdownInputBlock().id,
|
||||
AgentToggleInputBlock().id,
|
||||
AgentTableInputBlock().id,
|
||||
AgentGoogleDriveFileInputBlock().id,
|
||||
]
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from typing import Iterator, Literal
|
||||
|
||||
@@ -64,6 +65,7 @@ class RedditComment(BaseModel):
|
||||
|
||||
|
||||
settings = Settings()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_praw(creds: RedditCredentials) -> praw.Reddit:
|
||||
@@ -77,7 +79,7 @@ def get_praw(creds: RedditCredentials) -> praw.Reddit:
|
||||
me = client.user.me()
|
||||
if not me:
|
||||
raise ValueError("Invalid Reddit credentials.")
|
||||
print(f"Logged in as Reddit user: {me.name}")
|
||||
logger.info(f"Logged in as Reddit user: {me.name}")
|
||||
return client
|
||||
|
||||
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
import logging
|
||||
import re
|
||||
from collections import Counter
|
||||
from concurrent.futures import Future
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
import backend.blocks.llm as llm
|
||||
from backend.blocks.agent import AgentExecutorBlock
|
||||
from backend.data.block import (
|
||||
@@ -20,16 +23,41 @@ from backend.data.dynamic_fields import (
|
||||
is_dynamic_field,
|
||||
is_tool_pin,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import NodeExecutionStats, SchemaField
|
||||
from backend.util import json
|
||||
from backend.util.clients import get_database_manager_async_client
|
||||
from backend.util.prompt import MAIN_OBJECTIVE_PREFIX
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.data.graph import Link, Node
|
||||
from backend.executor.manager import ExecutionProcessor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ToolInfo(BaseModel):
|
||||
"""Processed tool call information."""
|
||||
|
||||
tool_call: Any # The original tool call object from LLM response
|
||||
tool_name: str # The function name
|
||||
tool_def: dict[str, Any] # The tool definition from tool_functions
|
||||
input_data: dict[str, Any] # Processed input data ready for tool execution
|
||||
field_mapping: dict[str, str] # Field name mapping for the tool
|
||||
|
||||
|
||||
class ExecutionParams(BaseModel):
|
||||
"""Tool execution parameters."""
|
||||
|
||||
user_id: str
|
||||
graph_id: str
|
||||
node_id: str
|
||||
graph_version: int
|
||||
graph_exec_id: str
|
||||
node_exec_id: str
|
||||
execution_context: "ExecutionContext"
|
||||
|
||||
|
||||
def _get_tool_requests(entry: dict[str, Any]) -> list[str]:
|
||||
"""
|
||||
Return a list of tool_call_ids if the entry is a tool request.
|
||||
@@ -105,6 +133,50 @@ def _create_tool_response(call_id: str, output: Any) -> dict[str, Any]:
|
||||
return {"role": "tool", "tool_call_id": call_id, "content": content}
|
||||
|
||||
|
||||
def _combine_tool_responses(tool_outputs: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
||||
"""
|
||||
Combine multiple Anthropic tool responses into a single user message.
|
||||
For non-Anthropic formats, returns the original list unchanged.
|
||||
"""
|
||||
if len(tool_outputs) <= 1:
|
||||
return tool_outputs
|
||||
|
||||
# Anthropic responses have role="user", type="message", and content is a list with tool_result items
|
||||
anthropic_responses = [
|
||||
output
|
||||
for output in tool_outputs
|
||||
if (
|
||||
output.get("role") == "user"
|
||||
and output.get("type") == "message"
|
||||
and isinstance(output.get("content"), list)
|
||||
and any(
|
||||
item.get("type") == "tool_result"
|
||||
for item in output.get("content", [])
|
||||
if isinstance(item, dict)
|
||||
)
|
||||
)
|
||||
]
|
||||
|
||||
if len(anthropic_responses) > 1:
|
||||
combined_content = [
|
||||
item for response in anthropic_responses for item in response["content"]
|
||||
]
|
||||
|
||||
combined_response = {
|
||||
"role": "user",
|
||||
"type": "message",
|
||||
"content": combined_content,
|
||||
}
|
||||
|
||||
non_anthropic_responses = [
|
||||
output for output in tool_outputs if output not in anthropic_responses
|
||||
]
|
||||
|
||||
return [combined_response] + non_anthropic_responses
|
||||
|
||||
return tool_outputs
|
||||
|
||||
|
||||
def _convert_raw_response_to_dict(raw_response: Any) -> dict[str, Any]:
|
||||
"""
|
||||
Safely convert raw_response to dictionary format for conversation history.
|
||||
@@ -204,6 +276,17 @@ class SmartDecisionMakerBlock(Block):
|
||||
default="localhost:11434",
|
||||
description="Ollama host for local models",
|
||||
)
|
||||
agent_mode_max_iterations: int = SchemaField(
|
||||
title="Agent Mode Max Iterations",
|
||||
description="Maximum iterations for agent mode. 0 = traditional mode (single LLM call, yield tool calls for external execution), -1 = infinite agent mode (loop until finished), 1+ = agent mode with max iterations limit.",
|
||||
advanced=True,
|
||||
default=0,
|
||||
)
|
||||
conversation_compaction: bool = SchemaField(
|
||||
default=True,
|
||||
title="Context window auto-compaction",
|
||||
description="Automatically compact the context window once it hits the limit",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_missing_links(cls, data: BlockInput, links: list["Link"]) -> set[str]:
|
||||
@@ -506,6 +589,7 @@ class SmartDecisionMakerBlock(Block):
|
||||
Returns the response if successful, raises ValueError if validation fails.
|
||||
"""
|
||||
resp = await llm.llm_call(
|
||||
compress_prompt_to_fit=input_data.conversation_compaction,
|
||||
credentials=credentials,
|
||||
llm_model=input_data.model,
|
||||
prompt=current_prompt,
|
||||
@@ -593,6 +677,291 @@ class SmartDecisionMakerBlock(Block):
|
||||
|
||||
return resp
|
||||
|
||||
def _process_tool_calls(
|
||||
self, response, tool_functions: list[dict[str, Any]]
|
||||
) -> list[ToolInfo]:
|
||||
"""Process tool calls and extract tool definitions, arguments, and input data.
|
||||
|
||||
Returns a list of tool info dicts with:
|
||||
- tool_call: The original tool call object
|
||||
- tool_name: The function name
|
||||
- tool_def: The tool definition from tool_functions
|
||||
- input_data: Processed input data dict (includes None values)
|
||||
- field_mapping: Field name mapping for the tool
|
||||
"""
|
||||
if not response.tool_calls:
|
||||
return []
|
||||
|
||||
processed_tools = []
|
||||
for tool_call in response.tool_calls:
|
||||
tool_name = tool_call.function.name
|
||||
tool_args = json.loads(tool_call.function.arguments)
|
||||
|
||||
tool_def = next(
|
||||
(
|
||||
tool
|
||||
for tool in tool_functions
|
||||
if tool["function"]["name"] == tool_name
|
||||
),
|
||||
None,
|
||||
)
|
||||
if not tool_def:
|
||||
if len(tool_functions) == 1:
|
||||
tool_def = tool_functions[0]
|
||||
else:
|
||||
continue
|
||||
|
||||
# Build input data for the tool
|
||||
input_data = {}
|
||||
field_mapping = tool_def["function"].get("_field_mapping", {})
|
||||
if "function" in tool_def and "parameters" in tool_def["function"]:
|
||||
expected_args = tool_def["function"]["parameters"].get("properties", {})
|
||||
for clean_arg_name in expected_args:
|
||||
original_field_name = field_mapping.get(
|
||||
clean_arg_name, clean_arg_name
|
||||
)
|
||||
arg_value = tool_args.get(clean_arg_name)
|
||||
# Include all expected parameters, even if None (for backward compatibility with tests)
|
||||
input_data[original_field_name] = arg_value
|
||||
|
||||
processed_tools.append(
|
||||
ToolInfo(
|
||||
tool_call=tool_call,
|
||||
tool_name=tool_name,
|
||||
tool_def=tool_def,
|
||||
input_data=input_data,
|
||||
field_mapping=field_mapping,
|
||||
)
|
||||
)
|
||||
|
||||
return processed_tools
|
||||
|
||||
def _update_conversation(
|
||||
self, prompt: list[dict], response, tool_outputs: list | None = None
|
||||
):
|
||||
"""Update conversation history with response and tool outputs."""
|
||||
# Don't add separate reasoning message with tool calls (breaks Anthropic's tool_use->tool_result pairing)
|
||||
assistant_message = _convert_raw_response_to_dict(response.raw_response)
|
||||
has_tool_calls = isinstance(assistant_message.get("content"), list) and any(
|
||||
item.get("type") == "tool_use"
|
||||
for item in assistant_message.get("content", [])
|
||||
)
|
||||
|
||||
if response.reasoning and not has_tool_calls:
|
||||
prompt.append(
|
||||
{"role": "assistant", "content": f"[Reasoning]: {response.reasoning}"}
|
||||
)
|
||||
|
||||
prompt.append(assistant_message)
|
||||
|
||||
if tool_outputs:
|
||||
prompt.extend(tool_outputs)
|
||||
|
||||
async def _execute_single_tool_with_manager(
|
||||
self,
|
||||
tool_info: ToolInfo,
|
||||
execution_params: ExecutionParams,
|
||||
execution_processor: "ExecutionProcessor",
|
||||
) -> dict:
|
||||
"""Execute a single tool using the execution manager for proper integration."""
|
||||
# Lazy imports to avoid circular dependencies
|
||||
from backend.data.execution import NodeExecutionEntry
|
||||
|
||||
tool_call = tool_info.tool_call
|
||||
tool_def = tool_info.tool_def
|
||||
raw_input_data = tool_info.input_data
|
||||
|
||||
# Get sink node and field mapping
|
||||
sink_node_id = tool_def["function"]["_sink_node_id"]
|
||||
|
||||
# Use proper database operations for tool execution
|
||||
db_client = get_database_manager_async_client()
|
||||
|
||||
# Get target node
|
||||
target_node = await db_client.get_node(sink_node_id)
|
||||
if not target_node:
|
||||
raise ValueError(f"Target node {sink_node_id} not found")
|
||||
|
||||
# Create proper node execution using upsert_execution_input
|
||||
node_exec_result = None
|
||||
final_input_data = None
|
||||
|
||||
# Add all inputs to the execution
|
||||
if not raw_input_data:
|
||||
raise ValueError(f"Tool call has no input data: {tool_call}")
|
||||
|
||||
for input_name, input_value in raw_input_data.items():
|
||||
node_exec_result, final_input_data = await db_client.upsert_execution_input(
|
||||
node_id=sink_node_id,
|
||||
graph_exec_id=execution_params.graph_exec_id,
|
||||
input_name=input_name,
|
||||
input_data=input_value,
|
||||
)
|
||||
|
||||
assert node_exec_result is not None, "node_exec_result should not be None"
|
||||
|
||||
# Create NodeExecutionEntry for execution manager
|
||||
node_exec_entry = NodeExecutionEntry(
|
||||
user_id=execution_params.user_id,
|
||||
graph_exec_id=execution_params.graph_exec_id,
|
||||
graph_id=execution_params.graph_id,
|
||||
graph_version=execution_params.graph_version,
|
||||
node_exec_id=node_exec_result.node_exec_id,
|
||||
node_id=sink_node_id,
|
||||
block_id=target_node.block_id,
|
||||
inputs=final_input_data or {},
|
||||
execution_context=execution_params.execution_context,
|
||||
)
|
||||
|
||||
# Use the execution manager to execute the tool node
|
||||
try:
|
||||
# Get NodeExecutionProgress from the execution manager's running nodes
|
||||
node_exec_progress = execution_processor.running_node_execution[
|
||||
sink_node_id
|
||||
]
|
||||
|
||||
# Use the execution manager's own graph stats
|
||||
graph_stats_pair = (
|
||||
execution_processor.execution_stats,
|
||||
execution_processor.execution_stats_lock,
|
||||
)
|
||||
|
||||
# Create a completed future for the task tracking system
|
||||
node_exec_future = Future()
|
||||
node_exec_progress.add_task(
|
||||
node_exec_id=node_exec_result.node_exec_id,
|
||||
task=node_exec_future,
|
||||
)
|
||||
|
||||
# Execute the node directly since we're in the SmartDecisionMaker context
|
||||
node_exec_future.set_result(
|
||||
await execution_processor.on_node_execution(
|
||||
node_exec=node_exec_entry,
|
||||
node_exec_progress=node_exec_progress,
|
||||
nodes_input_masks=None,
|
||||
graph_stats_pair=graph_stats_pair,
|
||||
)
|
||||
)
|
||||
|
||||
# Get outputs from database after execution completes using database manager client
|
||||
node_outputs = await db_client.get_execution_outputs_by_node_exec_id(
|
||||
node_exec_result.node_exec_id
|
||||
)
|
||||
|
||||
# Create tool response
|
||||
tool_response_content = (
|
||||
json.dumps(node_outputs)
|
||||
if node_outputs
|
||||
else "Tool executed successfully"
|
||||
)
|
||||
return _create_tool_response(tool_call.id, tool_response_content)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Tool execution with manager failed: {e}")
|
||||
# Return error response
|
||||
return _create_tool_response(
|
||||
tool_call.id, f"Tool execution failed: {str(e)}"
|
||||
)
|
||||
|
||||
async def _execute_tools_agent_mode(
|
||||
self,
|
||||
input_data,
|
||||
credentials,
|
||||
tool_functions: list[dict[str, Any]],
|
||||
prompt: list[dict],
|
||||
graph_exec_id: str,
|
||||
node_id: str,
|
||||
node_exec_id: str,
|
||||
user_id: str,
|
||||
graph_id: str,
|
||||
graph_version: int,
|
||||
execution_context: ExecutionContext,
|
||||
execution_processor: "ExecutionProcessor",
|
||||
):
|
||||
"""Execute tools in agent mode with a loop until finished."""
|
||||
max_iterations = input_data.agent_mode_max_iterations
|
||||
iteration = 0
|
||||
|
||||
# Execution parameters for tool execution
|
||||
execution_params = ExecutionParams(
|
||||
user_id=user_id,
|
||||
graph_id=graph_id,
|
||||
node_id=node_id,
|
||||
graph_version=graph_version,
|
||||
graph_exec_id=graph_exec_id,
|
||||
node_exec_id=node_exec_id,
|
||||
execution_context=execution_context,
|
||||
)
|
||||
|
||||
current_prompt = list(prompt)
|
||||
|
||||
while max_iterations < 0 or iteration < max_iterations:
|
||||
iteration += 1
|
||||
logger.debug(f"Agent mode iteration {iteration}")
|
||||
|
||||
# Prepare prompt for this iteration
|
||||
iteration_prompt = list(current_prompt)
|
||||
|
||||
# On the last iteration, add a special system message to encourage completion
|
||||
if max_iterations > 0 and iteration == max_iterations:
|
||||
last_iteration_message = {
|
||||
"role": "system",
|
||||
"content": f"{MAIN_OBJECTIVE_PREFIX}This is your last iteration ({iteration}/{max_iterations}). "
|
||||
"Try to complete the task with the information you have. If you cannot fully complete it, "
|
||||
"provide a summary of what you've accomplished and what remains to be done. "
|
||||
"Prefer finishing with a clear response rather than making additional tool calls.",
|
||||
}
|
||||
iteration_prompt.append(last_iteration_message)
|
||||
|
||||
# Get LLM response
|
||||
try:
|
||||
response = await self._attempt_llm_call_with_validation(
|
||||
credentials, input_data, iteration_prompt, tool_functions
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"LLM call failed in agent mode iteration {iteration}: {str(e)}"
|
||||
return
|
||||
|
||||
# Process tool calls
|
||||
processed_tools = self._process_tool_calls(response, tool_functions)
|
||||
|
||||
# If no tool calls, we're done
|
||||
if not processed_tools:
|
||||
yield "finished", response.response
|
||||
self._update_conversation(current_prompt, response)
|
||||
yield "conversations", current_prompt
|
||||
return
|
||||
|
||||
# Execute tools and collect responses
|
||||
tool_outputs = []
|
||||
for tool_info in processed_tools:
|
||||
try:
|
||||
tool_response = await self._execute_single_tool_with_manager(
|
||||
tool_info, execution_params, execution_processor
|
||||
)
|
||||
tool_outputs.append(tool_response)
|
||||
except Exception as e:
|
||||
logger.error(f"Tool execution failed: {e}")
|
||||
# Create error response for the tool
|
||||
error_response = _create_tool_response(
|
||||
tool_info.tool_call.id, f"Error: {str(e)}"
|
||||
)
|
||||
tool_outputs.append(error_response)
|
||||
|
||||
tool_outputs = _combine_tool_responses(tool_outputs)
|
||||
|
||||
self._update_conversation(current_prompt, response, tool_outputs)
|
||||
|
||||
# Yield intermediate conversation state
|
||||
yield "conversations", current_prompt
|
||||
|
||||
# If we reach max iterations, yield the current state
|
||||
if max_iterations < 0:
|
||||
yield "finished", f"Agent mode completed after {iteration} iterations"
|
||||
else:
|
||||
yield "finished", f"Agent mode completed after {max_iterations} iterations (limit reached)"
|
||||
yield "conversations", current_prompt
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
@@ -603,8 +972,12 @@ class SmartDecisionMakerBlock(Block):
|
||||
graph_exec_id: str,
|
||||
node_exec_id: str,
|
||||
user_id: str,
|
||||
graph_version: int,
|
||||
execution_context: ExecutionContext,
|
||||
execution_processor: "ExecutionProcessor",
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
|
||||
tool_functions = await self._create_tool_node_signatures(node_id)
|
||||
yield "tool_functions", json.dumps(tool_functions)
|
||||
|
||||
@@ -648,24 +1021,52 @@ class SmartDecisionMakerBlock(Block):
|
||||
input_data.prompt = llm.fmt.format_string(input_data.prompt, values)
|
||||
input_data.sys_prompt = llm.fmt.format_string(input_data.sys_prompt, values)
|
||||
|
||||
prefix = "[Main Objective Prompt]: "
|
||||
|
||||
if input_data.sys_prompt and not any(
|
||||
p["role"] == "system" and p["content"].startswith(prefix) for p in prompt
|
||||
p["role"] == "system" and p["content"].startswith(MAIN_OBJECTIVE_PREFIX)
|
||||
for p in prompt
|
||||
):
|
||||
prompt.append({"role": "system", "content": prefix + input_data.sys_prompt})
|
||||
prompt.append(
|
||||
{
|
||||
"role": "system",
|
||||
"content": MAIN_OBJECTIVE_PREFIX + input_data.sys_prompt,
|
||||
}
|
||||
)
|
||||
|
||||
if input_data.prompt and not any(
|
||||
p["role"] == "user" and p["content"].startswith(prefix) for p in prompt
|
||||
p["role"] == "user" and p["content"].startswith(MAIN_OBJECTIVE_PREFIX)
|
||||
for p in prompt
|
||||
):
|
||||
prompt.append({"role": "user", "content": prefix + input_data.prompt})
|
||||
prompt.append(
|
||||
{"role": "user", "content": MAIN_OBJECTIVE_PREFIX + input_data.prompt}
|
||||
)
|
||||
|
||||
# Execute tools based on the selected mode
|
||||
if input_data.agent_mode_max_iterations != 0:
|
||||
# In agent mode, execute tools directly in a loop until finished
|
||||
async for result in self._execute_tools_agent_mode(
|
||||
input_data=input_data,
|
||||
credentials=credentials,
|
||||
tool_functions=tool_functions,
|
||||
prompt=prompt,
|
||||
graph_exec_id=graph_exec_id,
|
||||
node_id=node_id,
|
||||
node_exec_id=node_exec_id,
|
||||
user_id=user_id,
|
||||
graph_id=graph_id,
|
||||
graph_version=graph_version,
|
||||
execution_context=execution_context,
|
||||
execution_processor=execution_processor,
|
||||
):
|
||||
yield result
|
||||
return
|
||||
|
||||
# One-off mode: single LLM call and yield tool calls for external execution
|
||||
current_prompt = list(prompt)
|
||||
max_attempts = max(1, int(input_data.retry))
|
||||
response = None
|
||||
|
||||
last_error = None
|
||||
for attempt in range(max_attempts):
|
||||
for _ in range(max_attempts):
|
||||
try:
|
||||
response = await self._attempt_llm_call_with_validation(
|
||||
credentials, input_data, current_prompt, tool_functions
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
from typing import Type
|
||||
from typing import Any, Type
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.data.block import Block, get_blocks
|
||||
from backend.data.block import Block, BlockSchemaInput, get_blocks
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.test import execute_block_test
|
||||
|
||||
SKIP_BLOCK_TESTS = {
|
||||
@@ -132,3 +133,148 @@ async def test_block_ids_valid(block: Type[Block]):
|
||||
), f"Block {block.name} ID is UUID version {parsed_uuid.version}, expected version 4"
|
||||
except ValueError:
|
||||
pytest.fail(f"Block {block.name} has invalid UUID format: {block_instance.id}")
|
||||
|
||||
|
||||
class TestAutoCredentialsFieldsValidation:
|
||||
"""Tests for auto_credentials field validation in BlockSchema."""
|
||||
|
||||
def test_duplicate_auto_credentials_kwarg_name_raises_error(self):
|
||||
"""Test that duplicate kwarg_name in auto_credentials raises ValueError."""
|
||||
|
||||
class DuplicateKwargSchema(BlockSchemaInput):
|
||||
"""Schema with duplicate auto_credentials kwarg_name."""
|
||||
|
||||
# Both fields explicitly use the same kwarg_name "credentials"
|
||||
file1: dict[str, Any] | None = SchemaField(
|
||||
description="First file input",
|
||||
default=None,
|
||||
json_schema_extra={
|
||||
"auto_credentials": {
|
||||
"provider": "google",
|
||||
"type": "oauth2",
|
||||
"scopes": ["https://www.googleapis.com/auth/drive.file"],
|
||||
"kwarg_name": "credentials",
|
||||
}
|
||||
},
|
||||
)
|
||||
file2: dict[str, Any] | None = SchemaField(
|
||||
description="Second file input",
|
||||
default=None,
|
||||
json_schema_extra={
|
||||
"auto_credentials": {
|
||||
"provider": "google",
|
||||
"type": "oauth2",
|
||||
"scopes": ["https://www.googleapis.com/auth/drive.file"],
|
||||
"kwarg_name": "credentials", # Duplicate kwarg_name!
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
DuplicateKwargSchema.get_auto_credentials_fields()
|
||||
|
||||
error_message = str(exc_info.value)
|
||||
assert "Duplicate auto_credentials kwarg_name 'credentials'" in error_message
|
||||
assert "file1" in error_message
|
||||
assert "file2" in error_message
|
||||
|
||||
def test_unique_auto_credentials_kwarg_names_succeed(self):
|
||||
"""Test that unique kwarg_name values work correctly."""
|
||||
|
||||
class UniqueKwargSchema(BlockSchemaInput):
|
||||
"""Schema with unique auto_credentials kwarg_name values."""
|
||||
|
||||
file1: dict[str, Any] | None = SchemaField(
|
||||
description="First file input",
|
||||
default=None,
|
||||
json_schema_extra={
|
||||
"auto_credentials": {
|
||||
"provider": "google",
|
||||
"type": "oauth2",
|
||||
"scopes": ["https://www.googleapis.com/auth/drive.file"],
|
||||
"kwarg_name": "file1_credentials",
|
||||
}
|
||||
},
|
||||
)
|
||||
file2: dict[str, Any] | None = SchemaField(
|
||||
description="Second file input",
|
||||
default=None,
|
||||
json_schema_extra={
|
||||
"auto_credentials": {
|
||||
"provider": "google",
|
||||
"type": "oauth2",
|
||||
"scopes": ["https://www.googleapis.com/auth/drive.file"],
|
||||
"kwarg_name": "file2_credentials", # Different kwarg_name
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
# Should not raise
|
||||
result = UniqueKwargSchema.get_auto_credentials_fields()
|
||||
|
||||
assert "file1_credentials" in result
|
||||
assert "file2_credentials" in result
|
||||
assert result["file1_credentials"]["field_name"] == "file1"
|
||||
assert result["file2_credentials"]["field_name"] == "file2"
|
||||
|
||||
def test_default_kwarg_name_is_credentials(self):
|
||||
"""Test that missing kwarg_name defaults to 'credentials'."""
|
||||
|
||||
class DefaultKwargSchema(BlockSchemaInput):
|
||||
"""Schema with auto_credentials missing kwarg_name."""
|
||||
|
||||
file: dict[str, Any] | None = SchemaField(
|
||||
description="File input",
|
||||
default=None,
|
||||
json_schema_extra={
|
||||
"auto_credentials": {
|
||||
"provider": "google",
|
||||
"type": "oauth2",
|
||||
"scopes": ["https://www.googleapis.com/auth/drive.file"],
|
||||
# No kwarg_name specified - should default to "credentials"
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
result = DefaultKwargSchema.get_auto_credentials_fields()
|
||||
|
||||
assert "credentials" in result
|
||||
assert result["credentials"]["field_name"] == "file"
|
||||
|
||||
def test_duplicate_default_kwarg_name_raises_error(self):
|
||||
"""Test that two fields with default kwarg_name raises ValueError."""
|
||||
|
||||
class DefaultDuplicateSchema(BlockSchemaInput):
|
||||
"""Schema where both fields omit kwarg_name, defaulting to 'credentials'."""
|
||||
|
||||
file1: dict[str, Any] | None = SchemaField(
|
||||
description="First file input",
|
||||
default=None,
|
||||
json_schema_extra={
|
||||
"auto_credentials": {
|
||||
"provider": "google",
|
||||
"type": "oauth2",
|
||||
"scopes": ["https://www.googleapis.com/auth/drive.file"],
|
||||
# No kwarg_name - defaults to "credentials"
|
||||
}
|
||||
},
|
||||
)
|
||||
file2: dict[str, Any] | None = SchemaField(
|
||||
description="Second file input",
|
||||
default=None,
|
||||
json_schema_extra={
|
||||
"auto_credentials": {
|
||||
"provider": "google",
|
||||
"type": "oauth2",
|
||||
"scopes": ["https://www.googleapis.com/auth/drive.file"],
|
||||
# No kwarg_name - also defaults to "credentials"
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
DefaultDuplicateSchema.get_auto_credentials_fields()
|
||||
|
||||
assert "Duplicate auto_credentials kwarg_name 'credentials'" in str(
|
||||
exc_info.value
|
||||
)
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
import logging
|
||||
import threading
|
||||
from collections import defaultdict
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import ProviderName, User
|
||||
from backend.server.model import CreateGraph
|
||||
from backend.server.rest_api import AgentServer
|
||||
@@ -17,10 +21,10 @@ async def create_graph(s: SpinTestServer, g, u: User):
|
||||
|
||||
|
||||
async def create_credentials(s: SpinTestServer, u: User):
|
||||
import backend.blocks.llm as llm
|
||||
import backend.blocks.llm as llm_module
|
||||
|
||||
provider = ProviderName.OPENAI
|
||||
credentials = llm.TEST_CREDENTIALS
|
||||
credentials = llm_module.TEST_CREDENTIALS
|
||||
return await s.agent_server.test_create_credentials(u.id, provider, credentials)
|
||||
|
||||
|
||||
@@ -196,8 +200,6 @@ async def test_smart_decision_maker_function_signature(server: SpinTestServer):
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_tracks_llm_stats():
|
||||
"""Test that SmartDecisionMakerBlock correctly tracks LLM usage stats."""
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import backend.blocks.llm as llm_module
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
|
||||
@@ -216,7 +218,6 @@ async def test_smart_decision_maker_tracks_llm_stats():
|
||||
}
|
||||
|
||||
# Mock the _create_tool_node_signatures method to avoid database calls
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
@@ -234,10 +235,19 @@ async def test_smart_decision_maker_tracks_llm_stats():
|
||||
prompt="Should I continue with this task?",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
|
||||
# Execute the block
|
||||
outputs = {}
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
@@ -246,6 +256,9 @@ async def test_smart_decision_maker_tracks_llm_stats():
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
@@ -263,8 +276,6 @@ async def test_smart_decision_maker_tracks_llm_stats():
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_parameter_validation():
|
||||
"""Test that SmartDecisionMakerBlock correctly validates tool call parameters."""
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import backend.blocks.llm as llm_module
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
|
||||
@@ -311,8 +322,6 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
mock_response_with_typo.reasoning = None
|
||||
mock_response_with_typo.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
@@ -329,8 +338,17 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
retry=2, # Set retry to 2 for testing
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
# Should raise ValueError after retries due to typo'd parameter name
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
outputs = {}
|
||||
@@ -342,6 +360,9 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
@@ -368,8 +389,6 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
mock_response_missing_required.reasoning = None
|
||||
mock_response_missing_required.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
@@ -385,8 +404,17 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
# Should raise ValueError due to missing required parameter
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
outputs = {}
|
||||
@@ -398,6 +426,9 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
@@ -418,8 +449,6 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
mock_response_valid.reasoning = None
|
||||
mock_response_valid.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
@@ -435,10 +464,19 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
|
||||
# Should succeed - optional parameter missing is OK
|
||||
outputs = {}
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
@@ -447,6 +485,9 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
@@ -472,8 +513,6 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
mock_response_all_params.reasoning = None
|
||||
mock_response_all_params.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
@@ -489,10 +528,19 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
|
||||
# Should succeed with all parameters
|
||||
outputs = {}
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
@@ -501,6 +549,9 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
@@ -513,8 +564,6 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_raw_response_conversion():
|
||||
"""Test that SmartDecisionMaker correctly handles different raw_response types with retry mechanism."""
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import backend.blocks.llm as llm_module
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
|
||||
@@ -584,7 +633,6 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
)
|
||||
|
||||
# Mock llm_call to return different responses on different calls
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call", new_callable=AsyncMock
|
||||
@@ -603,10 +651,19 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
retry=2,
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
|
||||
# Should succeed after retry, demonstrating our helper function works
|
||||
outputs = {}
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
@@ -615,6 +672,9 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
@@ -650,8 +710,6 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
"I'll help you with that." # Ollama returns string
|
||||
)
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
@@ -666,9 +724,18 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
prompt="Simple prompt",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
|
||||
outputs = {}
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
@@ -677,6 +744,9 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
@@ -696,8 +766,6 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
"content": "Test response",
|
||||
} # Dict format
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
@@ -712,6 +780,160 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
prompt="Another test",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
|
||||
outputs = {}
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
graph_id="test-graph-id",
|
||||
node_id="test-node-id",
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
assert "finished" in outputs
|
||||
assert outputs["finished"] == "Test response"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_agent_mode():
|
||||
"""Test that agent mode executes tools directly and loops until finished."""
|
||||
import backend.blocks.llm as llm_module
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
|
||||
block = SmartDecisionMakerBlock()
|
||||
|
||||
# Mock tool call that requires multiple iterations
|
||||
mock_tool_call_1 = MagicMock()
|
||||
mock_tool_call_1.id = "call_1"
|
||||
mock_tool_call_1.function.name = "search_keywords"
|
||||
mock_tool_call_1.function.arguments = (
|
||||
'{"query": "test", "max_keyword_difficulty": 50}'
|
||||
)
|
||||
|
||||
mock_response_1 = MagicMock()
|
||||
mock_response_1.response = None
|
||||
mock_response_1.tool_calls = [mock_tool_call_1]
|
||||
mock_response_1.prompt_tokens = 50
|
||||
mock_response_1.completion_tokens = 25
|
||||
mock_response_1.reasoning = "Using search tool"
|
||||
mock_response_1.raw_response = {
|
||||
"role": "assistant",
|
||||
"content": None,
|
||||
"tool_calls": [{"id": "call_1", "type": "function"}],
|
||||
}
|
||||
|
||||
# Final response with no tool calls (finished)
|
||||
mock_response_2 = MagicMock()
|
||||
mock_response_2.response = "Task completed successfully"
|
||||
mock_response_2.tool_calls = []
|
||||
mock_response_2.prompt_tokens = 30
|
||||
mock_response_2.completion_tokens = 15
|
||||
mock_response_2.reasoning = None
|
||||
mock_response_2.raw_response = {
|
||||
"role": "assistant",
|
||||
"content": "Task completed successfully",
|
||||
}
|
||||
|
||||
# Mock the LLM call to return different responses on each iteration
|
||||
llm_call_mock = AsyncMock()
|
||||
llm_call_mock.side_effect = [mock_response_1, mock_response_2]
|
||||
|
||||
# Mock tool node signatures
|
||||
mock_tool_signatures = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "search_keywords",
|
||||
"_sink_node_id": "test-sink-node-id",
|
||||
"_field_mapping": {},
|
||||
"parameters": {
|
||||
"properties": {
|
||||
"query": {"type": "string"},
|
||||
"max_keyword_difficulty": {"type": "integer"},
|
||||
},
|
||||
"required": ["query", "max_keyword_difficulty"],
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
# Mock database and execution components
|
||||
mock_db_client = AsyncMock()
|
||||
mock_node = MagicMock()
|
||||
mock_node.block_id = "test-block-id"
|
||||
mock_db_client.get_node.return_value = mock_node
|
||||
|
||||
# Mock upsert_execution_input to return proper NodeExecutionResult and input data
|
||||
mock_node_exec_result = MagicMock()
|
||||
mock_node_exec_result.node_exec_id = "test-tool-exec-id"
|
||||
mock_input_data = {"query": "test", "max_keyword_difficulty": 50}
|
||||
mock_db_client.upsert_execution_input.return_value = (
|
||||
mock_node_exec_result,
|
||||
mock_input_data,
|
||||
)
|
||||
|
||||
# No longer need mock_execute_node since we use execution_processor.on_node_execution
|
||||
|
||||
with patch("backend.blocks.llm.llm_call", llm_call_mock), patch.object(
|
||||
block, "_create_tool_node_signatures", return_value=mock_tool_signatures
|
||||
), patch(
|
||||
"backend.blocks.smart_decision_maker.get_database_manager_async_client",
|
||||
return_value=mock_db_client,
|
||||
), patch(
|
||||
"backend.executor.manager.async_update_node_execution_status",
|
||||
new_callable=AsyncMock,
|
||||
), patch(
|
||||
"backend.integrations.creds_manager.IntegrationCredentialsManager"
|
||||
):
|
||||
|
||||
# Create a mock execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(
|
||||
safe_mode=False,
|
||||
)
|
||||
|
||||
# Create a mock execution processor for agent mode tests
|
||||
|
||||
mock_execution_processor = AsyncMock()
|
||||
# Configure the execution processor mock with required attributes
|
||||
mock_execution_processor.running_node_execution = defaultdict(MagicMock)
|
||||
mock_execution_processor.execution_stats = MagicMock()
|
||||
mock_execution_processor.execution_stats_lock = threading.Lock()
|
||||
|
||||
# Mock the on_node_execution method to return successful stats
|
||||
mock_node_stats = MagicMock()
|
||||
mock_node_stats.error = None # No error
|
||||
mock_execution_processor.on_node_execution = AsyncMock(
|
||||
return_value=mock_node_stats
|
||||
)
|
||||
|
||||
# Mock the get_execution_outputs_by_node_exec_id method
|
||||
mock_db_client.get_execution_outputs_by_node_exec_id.return_value = {
|
||||
"result": {"status": "success", "data": "search completed"}
|
||||
}
|
||||
|
||||
# Test agent mode with max_iterations = 3
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Complete this task using tools",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=3, # Enable agent mode with 3 max iterations
|
||||
)
|
||||
|
||||
outputs = {}
|
||||
@@ -723,8 +945,115 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
# Verify agent mode behavior
|
||||
assert "tool_functions" in outputs # tool_functions is yielded in both modes
|
||||
assert "finished" in outputs
|
||||
assert outputs["finished"] == "Test response"
|
||||
assert outputs["finished"] == "Task completed successfully"
|
||||
assert "conversations" in outputs
|
||||
|
||||
# Verify the conversation includes tool responses
|
||||
conversations = outputs["conversations"]
|
||||
assert len(conversations) > 2 # Should have multiple conversation entries
|
||||
|
||||
# Verify LLM was called twice (once for tool call, once for finish)
|
||||
assert llm_call_mock.call_count == 2
|
||||
|
||||
# Verify tool was executed via execution processor
|
||||
assert mock_execution_processor.on_node_execution.call_count == 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_traditional_mode_default():
|
||||
"""Test that default behavior (agent_mode_max_iterations=0) works as traditional mode."""
|
||||
import backend.blocks.llm as llm_module
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
|
||||
block = SmartDecisionMakerBlock()
|
||||
|
||||
# Mock tool call
|
||||
mock_tool_call = MagicMock()
|
||||
mock_tool_call.function.name = "search_keywords"
|
||||
mock_tool_call.function.arguments = (
|
||||
'{"query": "test", "max_keyword_difficulty": 50}'
|
||||
)
|
||||
|
||||
mock_response = MagicMock()
|
||||
mock_response.response = None
|
||||
mock_response.tool_calls = [mock_tool_call]
|
||||
mock_response.prompt_tokens = 50
|
||||
mock_response.completion_tokens = 25
|
||||
mock_response.reasoning = None
|
||||
mock_response.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
mock_tool_signatures = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "search_keywords",
|
||||
"_sink_node_id": "test-sink-node-id",
|
||||
"_field_mapping": {},
|
||||
"parameters": {
|
||||
"properties": {
|
||||
"query": {"type": "string"},
|
||||
"max_keyword_difficulty": {"type": "integer"},
|
||||
},
|
||||
"required": ["query", "max_keyword_difficulty"],
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response,
|
||||
), patch.object(
|
||||
block, "_create_tool_node_signatures", return_value=mock_tool_signatures
|
||||
):
|
||||
|
||||
# Test default behavior (traditional mode)
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Test prompt",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0, # Traditional mode
|
||||
)
|
||||
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
outputs = {}
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
graph_id="test-graph-id",
|
||||
node_id="test-node-id",
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
# Verify traditional mode behavior
|
||||
assert (
|
||||
"tool_functions" in outputs
|
||||
) # Should yield tool_functions in traditional mode
|
||||
assert (
|
||||
"tools_^_test-sink-node-id_~_query" in outputs
|
||||
) # Should yield individual tool parameters
|
||||
assert "tools_^_test-sink-node-id_~_max_keyword_difficulty" in outputs
|
||||
assert "conversations" in outputs
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""Comprehensive tests for SmartDecisionMakerBlock dynamic field handling."""
|
||||
|
||||
import json
|
||||
from unittest.mock import AsyncMock, Mock, patch
|
||||
from unittest.mock import AsyncMock, MagicMock, Mock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -308,10 +308,47 @@ async def test_output_yielding_with_dynamic_fields():
|
||||
) as mock_llm:
|
||||
mock_llm.return_value = mock_response
|
||||
|
||||
# Mock the function signature creation
|
||||
with patch.object(
|
||||
# Mock the database manager to avoid HTTP calls during tool execution
|
||||
with patch(
|
||||
"backend.blocks.smart_decision_maker.get_database_manager_async_client"
|
||||
) as mock_db_manager, patch.object(
|
||||
block, "_create_tool_node_signatures", new_callable=AsyncMock
|
||||
) as mock_sig:
|
||||
# Set up the mock database manager
|
||||
mock_db_client = AsyncMock()
|
||||
mock_db_manager.return_value = mock_db_client
|
||||
|
||||
# Mock the node retrieval
|
||||
mock_target_node = Mock()
|
||||
mock_target_node.id = "test-sink-node-id"
|
||||
mock_target_node.block_id = "CreateDictionaryBlock"
|
||||
mock_target_node.block = Mock()
|
||||
mock_target_node.block.name = "Create Dictionary"
|
||||
mock_db_client.get_node.return_value = mock_target_node
|
||||
|
||||
# Mock the execution result creation
|
||||
mock_node_exec_result = Mock()
|
||||
mock_node_exec_result.node_exec_id = "mock-node-exec-id"
|
||||
mock_final_input_data = {
|
||||
"values_#_name": "Alice",
|
||||
"values_#_age": 30,
|
||||
"values_#_email": "alice@example.com",
|
||||
}
|
||||
mock_db_client.upsert_execution_input.return_value = (
|
||||
mock_node_exec_result,
|
||||
mock_final_input_data,
|
||||
)
|
||||
|
||||
# Mock the output retrieval
|
||||
mock_outputs = {
|
||||
"values_#_name": "Alice",
|
||||
"values_#_age": 30,
|
||||
"values_#_email": "alice@example.com",
|
||||
}
|
||||
mock_db_client.get_execution_outputs_by_node_exec_id.return_value = (
|
||||
mock_outputs
|
||||
)
|
||||
|
||||
mock_sig.return_value = [
|
||||
{
|
||||
"type": "function",
|
||||
@@ -337,10 +374,16 @@ async def test_output_yielding_with_dynamic_fields():
|
||||
prompt="Create a user dictionary",
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT,
|
||||
model=llm.LlmModel.GPT4O,
|
||||
agent_mode_max_iterations=0, # Use traditional mode to test output yielding
|
||||
)
|
||||
|
||||
# Run the block
|
||||
outputs = {}
|
||||
from backend.data.execution import ExecutionContext
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
async for output_name, output_value in block.run(
|
||||
input_data,
|
||||
credentials=llm.TEST_CREDENTIALS,
|
||||
@@ -349,6 +392,9 @@ async def test_output_yielding_with_dynamic_fields():
|
||||
graph_exec_id="test_exec",
|
||||
node_exec_id="test_node_exec",
|
||||
user_id="test_user",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_value
|
||||
|
||||
@@ -511,45 +557,108 @@ async def test_validation_errors_dont_pollute_conversation():
|
||||
}
|
||||
]
|
||||
|
||||
# Create input data
|
||||
from backend.blocks import llm
|
||||
# Mock the database manager to avoid HTTP calls during tool execution
|
||||
with patch(
|
||||
"backend.blocks.smart_decision_maker.get_database_manager_async_client"
|
||||
) as mock_db_manager:
|
||||
# Set up the mock database manager for agent mode
|
||||
mock_db_client = AsyncMock()
|
||||
mock_db_manager.return_value = mock_db_client
|
||||
|
||||
input_data = block.input_schema(
|
||||
prompt="Test prompt",
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT,
|
||||
model=llm.LlmModel.GPT4O,
|
||||
retry=3, # Allow retries
|
||||
)
|
||||
# Mock the node retrieval
|
||||
mock_target_node = Mock()
|
||||
mock_target_node.id = "test-sink-node-id"
|
||||
mock_target_node.block_id = "TestBlock"
|
||||
mock_target_node.block = Mock()
|
||||
mock_target_node.block.name = "Test Block"
|
||||
mock_db_client.get_node.return_value = mock_target_node
|
||||
|
||||
# Run the block
|
||||
outputs = {}
|
||||
async for output_name, output_value in block.run(
|
||||
input_data,
|
||||
credentials=llm.TEST_CREDENTIALS,
|
||||
graph_id="test_graph",
|
||||
node_id="test_node",
|
||||
graph_exec_id="test_exec",
|
||||
node_exec_id="test_node_exec",
|
||||
user_id="test_user",
|
||||
):
|
||||
outputs[output_name] = output_value
|
||||
# Mock the execution result creation
|
||||
mock_node_exec_result = Mock()
|
||||
mock_node_exec_result.node_exec_id = "mock-node-exec-id"
|
||||
mock_final_input_data = {"correct_param": "value"}
|
||||
mock_db_client.upsert_execution_input.return_value = (
|
||||
mock_node_exec_result,
|
||||
mock_final_input_data,
|
||||
)
|
||||
|
||||
# Verify we had 2 LLM calls (initial + retry)
|
||||
assert call_count == 2
|
||||
# Mock the output retrieval
|
||||
mock_outputs = {"correct_param": "value"}
|
||||
mock_db_client.get_execution_outputs_by_node_exec_id.return_value = (
|
||||
mock_outputs
|
||||
)
|
||||
|
||||
# Check the final conversation output
|
||||
final_conversation = outputs.get("conversations", [])
|
||||
# Create input data
|
||||
from backend.blocks import llm
|
||||
|
||||
# The final conversation should NOT contain the validation error message
|
||||
error_messages = [
|
||||
msg
|
||||
for msg in final_conversation
|
||||
if msg.get("role") == "user"
|
||||
and "parameter errors" in msg.get("content", "")
|
||||
]
|
||||
assert (
|
||||
len(error_messages) == 0
|
||||
), "Validation error leaked into final conversation"
|
||||
input_data = block.input_schema(
|
||||
prompt="Test prompt",
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT,
|
||||
model=llm.LlmModel.GPT4O,
|
||||
retry=3, # Allow retries
|
||||
agent_mode_max_iterations=1,
|
||||
)
|
||||
|
||||
# The final conversation should only have the successful response
|
||||
assert final_conversation[-1]["content"] == "valid"
|
||||
# Run the block
|
||||
outputs = {}
|
||||
from backend.data.execution import ExecutionContext
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a proper mock execution processor for agent mode
|
||||
from collections import defaultdict
|
||||
|
||||
mock_execution_processor = AsyncMock()
|
||||
mock_execution_processor.execution_stats = MagicMock()
|
||||
mock_execution_processor.execution_stats_lock = MagicMock()
|
||||
|
||||
# Create a mock NodeExecutionProgress for the sink node
|
||||
mock_node_exec_progress = MagicMock()
|
||||
mock_node_exec_progress.add_task = MagicMock()
|
||||
mock_node_exec_progress.pop_output = MagicMock(
|
||||
return_value=None
|
||||
) # No outputs to process
|
||||
|
||||
# Set up running_node_execution as a defaultdict that returns our mock for any key
|
||||
mock_execution_processor.running_node_execution = defaultdict(
|
||||
lambda: mock_node_exec_progress
|
||||
)
|
||||
|
||||
# Mock the on_node_execution method that gets called during tool execution
|
||||
mock_node_stats = MagicMock()
|
||||
mock_node_stats.error = None
|
||||
mock_execution_processor.on_node_execution.return_value = (
|
||||
mock_node_stats
|
||||
)
|
||||
|
||||
async for output_name, output_value in block.run(
|
||||
input_data,
|
||||
credentials=llm.TEST_CREDENTIALS,
|
||||
graph_id="test_graph",
|
||||
node_id="test_node",
|
||||
graph_exec_id="test_exec",
|
||||
node_exec_id="test_node_exec",
|
||||
user_id="test_user",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_value
|
||||
|
||||
# Verify we had at least 1 LLM call
|
||||
assert call_count >= 1
|
||||
|
||||
# Check the final conversation output
|
||||
final_conversation = outputs.get("conversations", [])
|
||||
|
||||
# The final conversation should NOT contain validation error messages
|
||||
# Even if retries don't happen in agent mode, we should not leak errors
|
||||
error_messages = [
|
||||
msg
|
||||
for msg in final_conversation
|
||||
if msg.get("role") == "user"
|
||||
and "parameter errors" in msg.get("content", "")
|
||||
]
|
||||
assert (
|
||||
len(error_messages) == 0
|
||||
), "Validation error leaked into final conversation"
|
||||
|
||||
@@ -1,12 +1,45 @@
|
||||
import logging
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Optional
|
||||
|
||||
import prisma.types
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.db import query_raw_with_schema
|
||||
from backend.util.json import SafeJson
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AccuracyAlertData(BaseModel):
|
||||
"""Alert data when accuracy drops significantly."""
|
||||
|
||||
graph_id: str
|
||||
user_id: Optional[str]
|
||||
drop_percent: float
|
||||
three_day_avg: float
|
||||
seven_day_avg: float
|
||||
detected_at: datetime
|
||||
|
||||
|
||||
class AccuracyLatestData(BaseModel):
|
||||
"""Latest execution accuracy data point."""
|
||||
|
||||
date: datetime
|
||||
daily_score: Optional[float]
|
||||
three_day_avg: Optional[float]
|
||||
seven_day_avg: Optional[float]
|
||||
fourteen_day_avg: Optional[float]
|
||||
|
||||
|
||||
class AccuracyTrendsResponse(BaseModel):
|
||||
"""Response model for accuracy trends and alerts."""
|
||||
|
||||
latest_data: AccuracyLatestData
|
||||
alert: Optional[AccuracyAlertData]
|
||||
historical_data: Optional[list[AccuracyLatestData]] = None
|
||||
|
||||
|
||||
async def log_raw_analytics(
|
||||
user_id: str,
|
||||
type: str,
|
||||
@@ -43,3 +76,217 @@ async def log_raw_metric(
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
async def get_accuracy_trends_and_alerts(
|
||||
graph_id: str,
|
||||
days_back: int = 30,
|
||||
user_id: Optional[str] = None,
|
||||
drop_threshold: float = 10.0,
|
||||
include_historical: bool = False,
|
||||
) -> AccuracyTrendsResponse:
|
||||
"""Get accuracy trends and detect alerts for a specific graph."""
|
||||
query_template = """
|
||||
WITH daily_scores AS (
|
||||
SELECT
|
||||
DATE(e."createdAt") as execution_date,
|
||||
AVG(CASE
|
||||
WHEN e.stats IS NOT NULL
|
||||
AND e.stats::json->>'correctness_score' IS NOT NULL
|
||||
AND e.stats::json->>'correctness_score' != 'null'
|
||||
THEN (e.stats::json->>'correctness_score')::float * 100
|
||||
ELSE NULL
|
||||
END) as daily_score
|
||||
FROM {schema_prefix}"AgentGraphExecution" e
|
||||
WHERE e."agentGraphId" = $1::text
|
||||
AND e."isDeleted" = false
|
||||
AND e."createdAt" >= $2::timestamp
|
||||
AND e."executionStatus" IN ('COMPLETED', 'FAILED', 'TERMINATED')
|
||||
{user_filter}
|
||||
GROUP BY DATE(e."createdAt")
|
||||
HAVING COUNT(*) >= 3 -- Need at least 3 executions per day
|
||||
),
|
||||
trends AS (
|
||||
SELECT
|
||||
execution_date,
|
||||
daily_score,
|
||||
AVG(daily_score) OVER (
|
||||
ORDER BY execution_date
|
||||
ROWS BETWEEN 2 PRECEDING AND CURRENT ROW
|
||||
) as three_day_avg,
|
||||
AVG(daily_score) OVER (
|
||||
ORDER BY execution_date
|
||||
ROWS BETWEEN 6 PRECEDING AND CURRENT ROW
|
||||
) as seven_day_avg,
|
||||
AVG(daily_score) OVER (
|
||||
ORDER BY execution_date
|
||||
ROWS BETWEEN 13 PRECEDING AND CURRENT ROW
|
||||
) as fourteen_day_avg
|
||||
FROM daily_scores
|
||||
)
|
||||
SELECT *,
|
||||
CASE
|
||||
WHEN three_day_avg IS NOT NULL AND seven_day_avg IS NOT NULL AND seven_day_avg > 0
|
||||
THEN ((seven_day_avg - three_day_avg) / seven_day_avg * 100)
|
||||
ELSE NULL
|
||||
END as drop_percent
|
||||
FROM trends
|
||||
ORDER BY execution_date DESC
|
||||
{limit_clause}
|
||||
"""
|
||||
|
||||
start_date = datetime.now(timezone.utc) - timedelta(days=days_back)
|
||||
params = [graph_id, start_date]
|
||||
user_filter = ""
|
||||
if user_id:
|
||||
user_filter = 'AND e."userId" = $3::text'
|
||||
params.append(user_id)
|
||||
|
||||
# Determine limit clause
|
||||
limit_clause = "" if include_historical else "LIMIT 1"
|
||||
|
||||
final_query = query_template.format(
|
||||
schema_prefix="{schema_prefix}",
|
||||
user_filter=user_filter,
|
||||
limit_clause=limit_clause,
|
||||
)
|
||||
|
||||
result = await query_raw_with_schema(final_query, *params)
|
||||
|
||||
if not result:
|
||||
return AccuracyTrendsResponse(
|
||||
latest_data=AccuracyLatestData(
|
||||
date=datetime.now(timezone.utc),
|
||||
daily_score=None,
|
||||
three_day_avg=None,
|
||||
seven_day_avg=None,
|
||||
fourteen_day_avg=None,
|
||||
),
|
||||
alert=None,
|
||||
)
|
||||
|
||||
latest = result[0]
|
||||
|
||||
alert = None
|
||||
if (
|
||||
latest["drop_percent"] is not None
|
||||
and latest["drop_percent"] >= drop_threshold
|
||||
and latest["three_day_avg"] is not None
|
||||
and latest["seven_day_avg"] is not None
|
||||
):
|
||||
alert = AccuracyAlertData(
|
||||
graph_id=graph_id,
|
||||
user_id=user_id,
|
||||
drop_percent=float(latest["drop_percent"]),
|
||||
three_day_avg=float(latest["three_day_avg"]),
|
||||
seven_day_avg=float(latest["seven_day_avg"]),
|
||||
detected_at=datetime.now(timezone.utc),
|
||||
)
|
||||
|
||||
# Prepare historical data if requested
|
||||
historical_data = None
|
||||
if include_historical:
|
||||
historical_data = []
|
||||
for row in result:
|
||||
historical_data.append(
|
||||
AccuracyLatestData(
|
||||
date=row["execution_date"],
|
||||
daily_score=(
|
||||
float(row["daily_score"])
|
||||
if row["daily_score"] is not None
|
||||
else None
|
||||
),
|
||||
three_day_avg=(
|
||||
float(row["three_day_avg"])
|
||||
if row["three_day_avg"] is not None
|
||||
else None
|
||||
),
|
||||
seven_day_avg=(
|
||||
float(row["seven_day_avg"])
|
||||
if row["seven_day_avg"] is not None
|
||||
else None
|
||||
),
|
||||
fourteen_day_avg=(
|
||||
float(row["fourteen_day_avg"])
|
||||
if row["fourteen_day_avg"] is not None
|
||||
else None
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
return AccuracyTrendsResponse(
|
||||
latest_data=AccuracyLatestData(
|
||||
date=latest["execution_date"],
|
||||
daily_score=(
|
||||
float(latest["daily_score"])
|
||||
if latest["daily_score"] is not None
|
||||
else None
|
||||
),
|
||||
three_day_avg=(
|
||||
float(latest["three_day_avg"])
|
||||
if latest["three_day_avg"] is not None
|
||||
else None
|
||||
),
|
||||
seven_day_avg=(
|
||||
float(latest["seven_day_avg"])
|
||||
if latest["seven_day_avg"] is not None
|
||||
else None
|
||||
),
|
||||
fourteen_day_avg=(
|
||||
float(latest["fourteen_day_avg"])
|
||||
if latest["fourteen_day_avg"] is not None
|
||||
else None
|
||||
),
|
||||
),
|
||||
alert=alert,
|
||||
historical_data=historical_data,
|
||||
)
|
||||
|
||||
|
||||
class MarketplaceGraphData(BaseModel):
|
||||
"""Data structure for marketplace graph monitoring."""
|
||||
|
||||
graph_id: str
|
||||
user_id: Optional[str]
|
||||
execution_count: int
|
||||
|
||||
|
||||
async def get_marketplace_graphs_for_monitoring(
|
||||
days_back: int = 30,
|
||||
min_executions: int = 10,
|
||||
) -> list[MarketplaceGraphData]:
|
||||
"""Get published marketplace graphs with recent executions for monitoring."""
|
||||
query_template = """
|
||||
WITH marketplace_graphs AS (
|
||||
SELECT DISTINCT
|
||||
slv."agentGraphId" as graph_id,
|
||||
slv."agentGraphVersion" as graph_version
|
||||
FROM {schema_prefix}"StoreListing" sl
|
||||
JOIN {schema_prefix}"StoreListingVersion" slv ON sl."activeVersionId" = slv."id"
|
||||
WHERE sl."hasApprovedVersion" = true
|
||||
AND sl."isDeleted" = false
|
||||
)
|
||||
SELECT DISTINCT
|
||||
mg.graph_id,
|
||||
NULL as user_id, -- Marketplace graphs don't have a specific user_id for monitoring
|
||||
COUNT(*) as execution_count
|
||||
FROM marketplace_graphs mg
|
||||
JOIN {schema_prefix}"AgentGraphExecution" e ON e."agentGraphId" = mg.graph_id
|
||||
WHERE e."createdAt" >= $1::timestamp
|
||||
AND e."isDeleted" = false
|
||||
AND e."executionStatus" IN ('COMPLETED', 'FAILED', 'TERMINATED')
|
||||
GROUP BY mg.graph_id
|
||||
HAVING COUNT(*) >= $2
|
||||
ORDER BY execution_count DESC
|
||||
"""
|
||||
start_date = datetime.now(timezone.utc) - timedelta(days=days_back)
|
||||
result = await query_raw_with_schema(query_template, start_date, min_executions)
|
||||
|
||||
return [
|
||||
MarketplaceGraphData(
|
||||
graph_id=row["graph_id"],
|
||||
user_id=row["user_id"],
|
||||
execution_count=int(row["execution_count"]),
|
||||
)
|
||||
for row in result
|
||||
]
|
||||
|
||||
@@ -266,14 +266,61 @@ class BlockSchema(BaseModel):
|
||||
)
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_auto_credentials_fields(cls) -> dict[str, dict[str, Any]]:
|
||||
"""
|
||||
Get fields that have auto_credentials metadata (e.g., GoogleDriveFileInput).
|
||||
|
||||
Returns a dict mapping kwarg_name -> {field_name, auto_credentials_config}
|
||||
|
||||
Raises:
|
||||
ValueError: If multiple fields have the same kwarg_name, as this would
|
||||
cause silent overwriting and only the last field would be processed.
|
||||
"""
|
||||
result: dict[str, dict[str, Any]] = {}
|
||||
schema = cls.jsonschema()
|
||||
properties = schema.get("properties", {})
|
||||
|
||||
for field_name, field_schema in properties.items():
|
||||
auto_creds = field_schema.get("auto_credentials")
|
||||
if auto_creds:
|
||||
kwarg_name = auto_creds.get("kwarg_name", "credentials")
|
||||
if kwarg_name in result:
|
||||
raise ValueError(
|
||||
f"Duplicate auto_credentials kwarg_name '{kwarg_name}' "
|
||||
f"in fields '{result[kwarg_name]['field_name']}' and "
|
||||
f"'{field_name}' on {cls.__qualname__}"
|
||||
)
|
||||
result[kwarg_name] = {
|
||||
"field_name": field_name,
|
||||
"config": auto_creds,
|
||||
}
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
def get_credentials_fields_info(cls) -> dict[str, CredentialsFieldInfo]:
|
||||
return {
|
||||
field_name: CredentialsFieldInfo.model_validate(
|
||||
result = {}
|
||||
|
||||
# Regular credentials fields
|
||||
for field_name in cls.get_credentials_fields().keys():
|
||||
result[field_name] = CredentialsFieldInfo.model_validate(
|
||||
cls.get_field_schema(field_name), by_alias=True
|
||||
)
|
||||
for field_name in cls.get_credentials_fields().keys()
|
||||
}
|
||||
|
||||
# Auto-generated credentials fields (from GoogleDriveFileInput etc.)
|
||||
for kwarg_name, info in cls.get_auto_credentials_fields().items():
|
||||
config = info["config"]
|
||||
# Build a schema-like dict that CredentialsFieldInfo can parse
|
||||
auto_schema = {
|
||||
"credentials_provider": [config.get("provider", "google")],
|
||||
"credentials_types": [config.get("type", "oauth2")],
|
||||
"credentials_scopes": config.get("scopes"),
|
||||
}
|
||||
result[kwarg_name] = CredentialsFieldInfo.model_validate(
|
||||
auto_schema, by_alias=True
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
|
||||
@@ -554,14 +601,18 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
||||
async for output_name, output_data in self._execute(input_data, **kwargs):
|
||||
yield output_name, output_data
|
||||
except Exception as ex:
|
||||
if not isinstance(ex, BlockError):
|
||||
raise BlockUnknownError(
|
||||
if isinstance(ex, BlockError):
|
||||
raise ex
|
||||
else:
|
||||
raise (
|
||||
BlockExecutionError
|
||||
if isinstance(ex, ValueError)
|
||||
else BlockUnknownError
|
||||
)(
|
||||
message=str(ex),
|
||||
block_name=self.name,
|
||||
block_id=self.id,
|
||||
) from ex
|
||||
else:
|
||||
raise ex
|
||||
|
||||
async def _execute(self, input_data: BlockInput, **kwargs) -> BlockOutput:
|
||||
if error := self.input_schema.validate_data(input_data):
|
||||
|
||||
@@ -5,6 +5,7 @@ from enum import Enum
|
||||
from multiprocessing import Manager
|
||||
from queue import Empty
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Annotated,
|
||||
Any,
|
||||
AsyncGenerator,
|
||||
@@ -65,6 +66,9 @@ from .includes import (
|
||||
)
|
||||
from .model import CredentialsMetaInput, GraphExecutionStats, NodeExecutionStats
|
||||
|
||||
if TYPE_CHECKING:
|
||||
pass
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -836,6 +840,30 @@ async def upsert_execution_output(
|
||||
await AgentNodeExecutionInputOutput.prisma().create(data=data)
|
||||
|
||||
|
||||
async def get_execution_outputs_by_node_exec_id(
|
||||
node_exec_id: str,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Get all execution outputs for a specific node execution ID.
|
||||
|
||||
Args:
|
||||
node_exec_id: The node execution ID to get outputs for
|
||||
|
||||
Returns:
|
||||
Dictionary mapping output names to their data values
|
||||
"""
|
||||
outputs = await AgentNodeExecutionInputOutput.prisma().find_many(
|
||||
where={"referencedByOutputExecId": node_exec_id}
|
||||
)
|
||||
|
||||
result = {}
|
||||
for output in outputs:
|
||||
if output.data is not None:
|
||||
result[output.name] = type_utils.convert(output.data, JsonValue)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
async def update_graph_execution_start_time(
|
||||
graph_exec_id: str,
|
||||
) -> GraphExecution | None:
|
||||
@@ -1465,3 +1493,35 @@ async def get_graph_execution_by_share_token(
|
||||
created_at=execution.createdAt,
|
||||
outputs=outputs,
|
||||
)
|
||||
|
||||
|
||||
async def get_frequently_executed_graphs(
|
||||
days_back: int = 30,
|
||||
min_executions: int = 10,
|
||||
) -> list[dict]:
|
||||
"""Get graphs that have been frequently executed for monitoring."""
|
||||
query_template = """
|
||||
SELECT DISTINCT
|
||||
e."agentGraphId" as graph_id,
|
||||
e."userId" as user_id,
|
||||
COUNT(*) as execution_count
|
||||
FROM {schema_prefix}"AgentGraphExecution" e
|
||||
WHERE e."createdAt" >= $1::timestamp
|
||||
AND e."isDeleted" = false
|
||||
AND e."executionStatus" IN ('COMPLETED', 'FAILED', 'TERMINATED')
|
||||
GROUP BY e."agentGraphId", e."userId"
|
||||
HAVING COUNT(*) >= $2
|
||||
ORDER BY execution_count DESC
|
||||
"""
|
||||
|
||||
start_date = datetime.now(timezone.utc) - timedelta(days=days_back)
|
||||
result = await query_raw_with_schema(query_template, start_date, min_executions)
|
||||
|
||||
return [
|
||||
{
|
||||
"graph_id": row["graph_id"],
|
||||
"user_id": row["user_id"],
|
||||
"execution_count": int(row["execution_count"]),
|
||||
}
|
||||
for row in result
|
||||
]
|
||||
|
||||
@@ -100,7 +100,7 @@ async def get_or_create_human_review(
|
||||
return None
|
||||
else:
|
||||
return ReviewResult(
|
||||
data=review.payload if review.status == ReviewStatus.APPROVED else None,
|
||||
data=review.payload,
|
||||
status=review.status,
|
||||
message=review.reviewMessage or "",
|
||||
processed=review.processed,
|
||||
|
||||
@@ -22,7 +22,7 @@ from typing import (
|
||||
from urllib.parse import urlparse
|
||||
from uuid import uuid4
|
||||
|
||||
from prisma.enums import CreditTransactionType
|
||||
from prisma.enums import CreditTransactionType, OnboardingStep
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
ConfigDict,
|
||||
@@ -868,3 +868,20 @@ class UserExecutionSummaryStats(BaseModel):
|
||||
total_execution_time: float = Field(default=0)
|
||||
average_execution_time: float = Field(default=0)
|
||||
cost_breakdown: dict[str, float] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class UserOnboarding(BaseModel):
|
||||
userId: str
|
||||
completedSteps: list[OnboardingStep]
|
||||
walletShown: bool
|
||||
notified: list[OnboardingStep]
|
||||
rewardedFor: list[OnboardingStep]
|
||||
usageReason: Optional[str]
|
||||
integrations: list[str]
|
||||
otherIntegrations: Optional[str]
|
||||
selectedStoreListingVersionId: Optional[str]
|
||||
agentInput: Optional[dict[str, Any]]
|
||||
onboardingAgentExecutionId: Optional[str]
|
||||
agentRuns: int
|
||||
lastRunAt: Optional[datetime]
|
||||
consecutiveRunDays: int
|
||||
|
||||
@@ -2,7 +2,7 @@ from __future__ import annotations
|
||||
|
||||
from typing import AsyncGenerator
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, field_serializer
|
||||
|
||||
from backend.data.event_bus import AsyncRedisEventBus
|
||||
from backend.server.model import NotificationPayload
|
||||
@@ -15,6 +15,11 @@ class NotificationEvent(BaseModel):
|
||||
user_id: str
|
||||
payload: NotificationPayload
|
||||
|
||||
@field_serializer("payload")
|
||||
def serialize_payload(self, payload: NotificationPayload):
|
||||
"""Ensure extra fields survive Redis serialization."""
|
||||
return payload.model_dump()
|
||||
|
||||
|
||||
class AsyncRedisNotificationEventBus(AsyncRedisEventBus[NotificationEvent]):
|
||||
Model = NotificationEvent # type: ignore
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import re
|
||||
from datetime import datetime
|
||||
from typing import Any, Optional
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Any, Literal, Optional
|
||||
from zoneinfo import ZoneInfo
|
||||
|
||||
import prisma
|
||||
import pydantic
|
||||
@@ -8,17 +9,18 @@ from prisma.enums import OnboardingStep
|
||||
from prisma.models import UserOnboarding
|
||||
from prisma.types import UserOnboardingCreateInput, UserOnboardingUpdateInput
|
||||
|
||||
from backend.data.block import get_blocks
|
||||
from backend.data import execution as execution_db
|
||||
from backend.data.credit import get_user_credit_model
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.data.notification_bus import (
|
||||
AsyncRedisNotificationEventBus,
|
||||
NotificationEvent,
|
||||
)
|
||||
from backend.data.user import get_user_by_id
|
||||
from backend.server.model import OnboardingNotificationPayload
|
||||
from backend.server.v2.store.model import StoreAgentDetails
|
||||
from backend.util.cache import cached
|
||||
from backend.util.json import SafeJson
|
||||
from backend.util.timezone_utils import get_user_timezone_or_utc
|
||||
|
||||
# Mapping from user reason id to categories to search for when choosing agent to show
|
||||
REASON_MAPPING: dict[str, list[str]] = {
|
||||
@@ -31,9 +33,20 @@ REASON_MAPPING: dict[str, list[str]] = {
|
||||
POINTS_AGENT_COUNT = 50 # Number of agents to calculate points for
|
||||
MIN_AGENT_COUNT = 2 # Minimum number of marketplace agents to enable onboarding
|
||||
|
||||
FrontendOnboardingStep = Literal[
|
||||
OnboardingStep.WELCOME,
|
||||
OnboardingStep.USAGE_REASON,
|
||||
OnboardingStep.INTEGRATIONS,
|
||||
OnboardingStep.AGENT_CHOICE,
|
||||
OnboardingStep.AGENT_NEW_RUN,
|
||||
OnboardingStep.AGENT_INPUT,
|
||||
OnboardingStep.CONGRATS,
|
||||
OnboardingStep.MARKETPLACE_VISIT,
|
||||
OnboardingStep.BUILDER_OPEN,
|
||||
]
|
||||
|
||||
|
||||
class UserOnboardingUpdate(pydantic.BaseModel):
|
||||
completedSteps: Optional[list[OnboardingStep]] = None
|
||||
walletShown: Optional[bool] = None
|
||||
notified: Optional[list[OnboardingStep]] = None
|
||||
usageReason: Optional[str] = None
|
||||
@@ -42,9 +55,6 @@ class UserOnboardingUpdate(pydantic.BaseModel):
|
||||
selectedStoreListingVersionId: Optional[str] = None
|
||||
agentInput: Optional[dict[str, Any]] = None
|
||||
onboardingAgentExecutionId: Optional[str] = None
|
||||
agentRuns: Optional[int] = None
|
||||
lastRunAt: Optional[datetime] = None
|
||||
consecutiveRunDays: Optional[int] = None
|
||||
|
||||
|
||||
async def get_user_onboarding(user_id: str):
|
||||
@@ -83,14 +93,6 @@ async def reset_user_onboarding(user_id: str):
|
||||
async def update_user_onboarding(user_id: str, data: UserOnboardingUpdate):
|
||||
update: UserOnboardingUpdateInput = {}
|
||||
onboarding = await get_user_onboarding(user_id)
|
||||
if data.completedSteps is not None:
|
||||
update["completedSteps"] = list(
|
||||
set(data.completedSteps + onboarding.completedSteps)
|
||||
)
|
||||
for step in data.completedSteps:
|
||||
if step not in onboarding.completedSteps:
|
||||
await _reward_user(user_id, onboarding, step)
|
||||
await _send_onboarding_notification(user_id, step)
|
||||
if data.walletShown:
|
||||
update["walletShown"] = data.walletShown
|
||||
if data.notified is not None:
|
||||
@@ -107,12 +109,6 @@ async def update_user_onboarding(user_id: str, data: UserOnboardingUpdate):
|
||||
update["agentInput"] = SafeJson(data.agentInput)
|
||||
if data.onboardingAgentExecutionId is not None:
|
||||
update["onboardingAgentExecutionId"] = data.onboardingAgentExecutionId
|
||||
if data.agentRuns is not None and data.agentRuns > onboarding.agentRuns:
|
||||
update["agentRuns"] = data.agentRuns
|
||||
if data.lastRunAt is not None:
|
||||
update["lastRunAt"] = data.lastRunAt
|
||||
if data.consecutiveRunDays is not None:
|
||||
update["consecutiveRunDays"] = data.consecutiveRunDays
|
||||
|
||||
return await UserOnboarding.prisma().upsert(
|
||||
where={"userId": user_id},
|
||||
@@ -161,14 +157,12 @@ async def _reward_user(user_id: str, onboarding: UserOnboarding, step: Onboardin
|
||||
if step in onboarding.rewardedFor:
|
||||
return
|
||||
|
||||
onboarding.rewardedFor.append(step)
|
||||
user_credit_model = await get_user_credit_model(user_id)
|
||||
await user_credit_model.onboarding_reward(user_id, reward, step)
|
||||
await UserOnboarding.prisma().update(
|
||||
where={"userId": user_id},
|
||||
data={
|
||||
"completedSteps": list(set(onboarding.completedSteps + [step])),
|
||||
"rewardedFor": onboarding.rewardedFor,
|
||||
"rewardedFor": list(set(onboarding.rewardedFor + [step])),
|
||||
},
|
||||
)
|
||||
|
||||
@@ -177,31 +171,52 @@ async def complete_onboarding_step(user_id: str, step: OnboardingStep):
|
||||
"""
|
||||
Completes the specified onboarding step for the user if not already completed.
|
||||
"""
|
||||
|
||||
onboarding = await get_user_onboarding(user_id)
|
||||
if step not in onboarding.completedSteps:
|
||||
await update_user_onboarding(
|
||||
user_id,
|
||||
UserOnboardingUpdate(completedSteps=onboarding.completedSteps + [step]),
|
||||
await UserOnboarding.prisma().update(
|
||||
where={"userId": user_id},
|
||||
data={
|
||||
"completedSteps": list(set(onboarding.completedSteps + [step])),
|
||||
},
|
||||
)
|
||||
await _reward_user(user_id, onboarding, step)
|
||||
await _send_onboarding_notification(user_id, step)
|
||||
|
||||
|
||||
async def _send_onboarding_notification(user_id: str, step: OnboardingStep):
|
||||
async def _send_onboarding_notification(
|
||||
user_id: str, step: OnboardingStep | None, event: str = "step_completed"
|
||||
):
|
||||
"""
|
||||
Sends an onboarding notification to the user for the specified step.
|
||||
Sends an onboarding notification to the user.
|
||||
"""
|
||||
payload = OnboardingNotificationPayload(
|
||||
type="onboarding",
|
||||
event="step_completed",
|
||||
step=step.value,
|
||||
event=event,
|
||||
step=step,
|
||||
)
|
||||
await AsyncRedisNotificationEventBus().publish(
|
||||
NotificationEvent(user_id=user_id, payload=payload)
|
||||
)
|
||||
|
||||
|
||||
def clean_and_split(text: str) -> list[str]:
|
||||
async def complete_re_run_agent(user_id: str, graph_id: str) -> None:
|
||||
"""
|
||||
Complete RE_RUN_AGENT step when a user runs a graph they've run before.
|
||||
Keeps overhead low by only counting executions if the step is still pending.
|
||||
"""
|
||||
onboarding = await get_user_onboarding(user_id)
|
||||
if OnboardingStep.RE_RUN_AGENT in onboarding.completedSteps:
|
||||
return
|
||||
|
||||
# Includes current execution, so count > 1 means there was at least one prior run.
|
||||
previous_exec_count = await execution_db.get_graph_executions_count(
|
||||
user_id=user_id, graph_id=graph_id
|
||||
)
|
||||
if previous_exec_count > 1:
|
||||
await complete_onboarding_step(user_id, OnboardingStep.RE_RUN_AGENT)
|
||||
|
||||
|
||||
def _clean_and_split(text: str) -> list[str]:
|
||||
"""
|
||||
Removes all special characters from a string, truncates it to 100 characters,
|
||||
and splits it by whitespace and commas.
|
||||
@@ -224,7 +239,7 @@ def clean_and_split(text: str) -> list[str]:
|
||||
return words
|
||||
|
||||
|
||||
def calculate_points(
|
||||
def _calculate_points(
|
||||
agent, categories: list[str], custom: list[str], integrations: list[str]
|
||||
) -> int:
|
||||
"""
|
||||
@@ -268,18 +283,85 @@ def calculate_points(
|
||||
return int(points)
|
||||
|
||||
|
||||
def get_credentials_blocks() -> dict[str, str]:
|
||||
# Returns a dictionary of block id to credentials field name
|
||||
creds: dict[str, str] = {}
|
||||
blocks = get_blocks()
|
||||
for id, block in blocks.items():
|
||||
for field_name, field_info in block().input_schema.model_fields.items():
|
||||
if field_info.annotation == CredentialsMetaInput:
|
||||
creds[id] = field_name
|
||||
return creds
|
||||
def _normalize_datetime(value: datetime | None) -> datetime | None:
|
||||
if value is None:
|
||||
return None
|
||||
if value.tzinfo is None:
|
||||
return value.replace(tzinfo=timezone.utc)
|
||||
return value.astimezone(timezone.utc)
|
||||
|
||||
|
||||
CREDENTIALS_FIELDS: dict[str, str] = get_credentials_blocks()
|
||||
def _calculate_consecutive_run_days(
|
||||
last_run_at: datetime | None, current_consecutive_days: int, user_timezone: str
|
||||
) -> tuple[datetime, int]:
|
||||
tz = ZoneInfo(user_timezone)
|
||||
local_now = datetime.now(tz)
|
||||
normalized_last_run = _normalize_datetime(last_run_at)
|
||||
|
||||
if normalized_last_run is None:
|
||||
return local_now.astimezone(timezone.utc), 1
|
||||
|
||||
last_run_local = normalized_last_run.astimezone(tz)
|
||||
last_run_date = last_run_local.date()
|
||||
today = local_now.date()
|
||||
|
||||
if last_run_date == today:
|
||||
return local_now.astimezone(timezone.utc), current_consecutive_days
|
||||
|
||||
if last_run_date == today - timedelta(days=1):
|
||||
return local_now.astimezone(timezone.utc), current_consecutive_days + 1
|
||||
|
||||
return local_now.astimezone(timezone.utc), 1
|
||||
|
||||
|
||||
def _get_run_milestone_steps(
|
||||
new_run_count: int, consecutive_days: int
|
||||
) -> list[OnboardingStep]:
|
||||
milestones: list[OnboardingStep] = []
|
||||
if new_run_count >= 10:
|
||||
milestones.append(OnboardingStep.RUN_AGENTS)
|
||||
if new_run_count >= 100:
|
||||
milestones.append(OnboardingStep.RUN_AGENTS_100)
|
||||
if consecutive_days >= 3:
|
||||
milestones.append(OnboardingStep.RUN_3_DAYS)
|
||||
if consecutive_days >= 14:
|
||||
milestones.append(OnboardingStep.RUN_14_DAYS)
|
||||
return milestones
|
||||
|
||||
|
||||
async def _get_user_timezone(user_id: str) -> str:
|
||||
user = await get_user_by_id(user_id)
|
||||
return get_user_timezone_or_utc(user.timezone if user else None)
|
||||
|
||||
|
||||
async def increment_runs(user_id: str):
|
||||
"""
|
||||
Increment a user's run counters and trigger any onboarding milestones.
|
||||
"""
|
||||
user_timezone = await _get_user_timezone(user_id)
|
||||
onboarding = await get_user_onboarding(user_id)
|
||||
new_run_count = onboarding.agentRuns + 1
|
||||
last_run_at, consecutive_run_days = _calculate_consecutive_run_days(
|
||||
onboarding.lastRunAt, onboarding.consecutiveRunDays, user_timezone
|
||||
)
|
||||
|
||||
await UserOnboarding.prisma().update(
|
||||
where={"userId": user_id},
|
||||
data={
|
||||
"agentRuns": {"increment": 1},
|
||||
"lastRunAt": last_run_at,
|
||||
"consecutiveRunDays": consecutive_run_days,
|
||||
},
|
||||
)
|
||||
|
||||
milestones = _get_run_milestone_steps(new_run_count, consecutive_run_days)
|
||||
new_steps = [step for step in milestones if step not in onboarding.completedSteps]
|
||||
|
||||
for step in new_steps:
|
||||
await complete_onboarding_step(user_id, step)
|
||||
# Send progress notification if no steps were completed, so client refetches onboarding state
|
||||
if not new_steps:
|
||||
await _send_onboarding_notification(user_id, None, event="increment_runs")
|
||||
|
||||
|
||||
async def get_recommended_agents(user_id: str) -> list[StoreAgentDetails]:
|
||||
@@ -288,7 +370,7 @@ async def get_recommended_agents(user_id: str) -> list[StoreAgentDetails]:
|
||||
|
||||
where_clause: dict[str, Any] = {}
|
||||
|
||||
custom = clean_and_split((user_onboarding.usageReason or "").lower())
|
||||
custom = _clean_and_split((user_onboarding.usageReason or "").lower())
|
||||
|
||||
if categories:
|
||||
where_clause["OR"] = [
|
||||
@@ -336,7 +418,7 @@ async def get_recommended_agents(user_id: str) -> list[StoreAgentDetails]:
|
||||
# Calculate points for the first X agents and choose the top 2
|
||||
agent_points = []
|
||||
for agent in storeAgents[:POINTS_AGENT_COUNT]:
|
||||
points = calculate_points(
|
||||
points = _calculate_points(
|
||||
agent, categories, custom, user_onboarding.integrations
|
||||
)
|
||||
agent_points.append((agent, points))
|
||||
@@ -350,6 +432,7 @@ async def get_recommended_agents(user_id: str) -> list[StoreAgentDetails]:
|
||||
slug=agent.slug,
|
||||
agent_name=agent.agent_name,
|
||||
agent_video=agent.agent_video or "",
|
||||
agent_output_demo=agent.agent_output_demo or "",
|
||||
agent_image=agent.agent_image,
|
||||
creator=agent.creator_username,
|
||||
creator_avatar=agent.creator_avatar,
|
||||
|
||||
@@ -3,12 +3,18 @@ from contextlib import asynccontextmanager
|
||||
from typing import TYPE_CHECKING, Callable, Concatenate, ParamSpec, TypeVar, cast
|
||||
|
||||
from backend.data import db
|
||||
from backend.data.analytics import (
|
||||
get_accuracy_trends_and_alerts,
|
||||
get_marketplace_graphs_for_monitoring,
|
||||
)
|
||||
from backend.data.credit import UsageTransactionMetadata, get_user_credit_model
|
||||
from backend.data.execution import (
|
||||
create_graph_execution,
|
||||
get_block_error_stats,
|
||||
get_child_graph_executions,
|
||||
get_execution_kv_data,
|
||||
get_execution_outputs_by_node_exec_id,
|
||||
get_frequently_executed_graphs,
|
||||
get_graph_execution_meta,
|
||||
get_graph_executions,
|
||||
get_graph_executions_count,
|
||||
@@ -142,9 +148,13 @@ class DatabaseManager(AppService):
|
||||
update_graph_execution_stats = _(update_graph_execution_stats)
|
||||
upsert_execution_input = _(upsert_execution_input)
|
||||
upsert_execution_output = _(upsert_execution_output)
|
||||
get_execution_outputs_by_node_exec_id = _(get_execution_outputs_by_node_exec_id)
|
||||
get_execution_kv_data = _(get_execution_kv_data)
|
||||
set_execution_kv_data = _(set_execution_kv_data)
|
||||
get_block_error_stats = _(get_block_error_stats)
|
||||
get_accuracy_trends_and_alerts = _(get_accuracy_trends_and_alerts)
|
||||
get_frequently_executed_graphs = _(get_frequently_executed_graphs)
|
||||
get_marketplace_graphs_for_monitoring = _(get_marketplace_graphs_for_monitoring)
|
||||
|
||||
# Graphs
|
||||
get_node = _(get_node)
|
||||
@@ -226,6 +236,10 @@ class DatabaseManagerClient(AppServiceClient):
|
||||
|
||||
# Block error monitoring
|
||||
get_block_error_stats = _(d.get_block_error_stats)
|
||||
# Execution accuracy monitoring
|
||||
get_accuracy_trends_and_alerts = _(d.get_accuracy_trends_and_alerts)
|
||||
get_frequently_executed_graphs = _(d.get_frequently_executed_graphs)
|
||||
get_marketplace_graphs_for_monitoring = _(d.get_marketplace_graphs_for_monitoring)
|
||||
|
||||
# Human In The Loop
|
||||
has_pending_reviews_for_graph_exec = _(d.has_pending_reviews_for_graph_exec)
|
||||
@@ -265,6 +279,7 @@ class DatabaseManagerAsyncClient(AppServiceClient):
|
||||
get_user_integrations = d.get_user_integrations
|
||||
upsert_execution_input = d.upsert_execution_input
|
||||
upsert_execution_output = d.upsert_execution_output
|
||||
get_execution_outputs_by_node_exec_id = d.get_execution_outputs_by_node_exec_id
|
||||
update_graph_execution_stats = d.update_graph_execution_stats
|
||||
update_node_execution_status = d.update_node_execution_status
|
||||
update_node_execution_status_batch = d.update_node_execution_status_batch
|
||||
|
||||
@@ -133,9 +133,8 @@ def execute_graph(
|
||||
cluster_lock: ClusterLock,
|
||||
):
|
||||
"""Execute graph using thread-local ExecutionProcessor instance"""
|
||||
return _tls.processor.on_graph_execution(
|
||||
graph_exec_entry, cancel_event, cluster_lock
|
||||
)
|
||||
processor: ExecutionProcessor = _tls.processor
|
||||
return processor.on_graph_execution(graph_exec_entry, cancel_event, cluster_lock)
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
@@ -143,8 +142,8 @@ T = TypeVar("T")
|
||||
|
||||
async def execute_node(
|
||||
node: Node,
|
||||
creds_manager: IntegrationCredentialsManager,
|
||||
data: NodeExecutionEntry,
|
||||
execution_processor: "ExecutionProcessor",
|
||||
execution_stats: NodeExecutionStats | None = None,
|
||||
nodes_input_masks: Optional[NodesInputMasks] = None,
|
||||
) -> BlockOutput:
|
||||
@@ -169,6 +168,7 @@ async def execute_node(
|
||||
node_id = data.node_id
|
||||
node_block = node.block
|
||||
execution_context = data.execution_context
|
||||
creds_manager = execution_processor.creds_manager
|
||||
|
||||
log_metadata = LogMetadata(
|
||||
logger=_logger,
|
||||
@@ -212,21 +212,60 @@ async def execute_node(
|
||||
"node_exec_id": node_exec_id,
|
||||
"user_id": user_id,
|
||||
"execution_context": execution_context,
|
||||
"execution_processor": execution_processor,
|
||||
}
|
||||
|
||||
# Last-minute fetch credentials + acquire a system-wide read-write lock to prevent
|
||||
# changes during execution. ⚠️ This means a set of credentials can only be used by
|
||||
# one (running) block at a time; simultaneous execution of blocks using same
|
||||
# credentials is not supported.
|
||||
creds_lock = None
|
||||
creds_locks: list[AsyncRedisLock] = []
|
||||
input_model = cast(type[BlockSchema], node_block.input_schema)
|
||||
|
||||
# Handle regular credentials fields
|
||||
for field_name, input_type in input_model.get_credentials_fields().items():
|
||||
credentials_meta = input_type(**input_data[field_name])
|
||||
credentials, creds_lock = await creds_manager.acquire(
|
||||
user_id, credentials_meta.id
|
||||
)
|
||||
credentials, lock = await creds_manager.acquire(user_id, credentials_meta.id)
|
||||
creds_locks.append(lock)
|
||||
extra_exec_kwargs[field_name] = credentials
|
||||
|
||||
# Handle auto-generated credentials (e.g., from GoogleDriveFileInput)
|
||||
for kwarg_name, info in input_model.get_auto_credentials_fields().items():
|
||||
field_name = info["field_name"]
|
||||
field_data = input_data.get(field_name)
|
||||
if field_data and isinstance(field_data, dict):
|
||||
# Check if _credentials_id key exists in the field data
|
||||
if "_credentials_id" in field_data:
|
||||
cred_id = field_data["_credentials_id"]
|
||||
if cred_id:
|
||||
# Credential ID provided - acquire credentials
|
||||
provider = info.get("config", {}).get(
|
||||
"provider", "external service"
|
||||
)
|
||||
file_name = field_data.get("name", "selected file")
|
||||
try:
|
||||
credentials, lock = await creds_manager.acquire(
|
||||
user_id, cred_id
|
||||
)
|
||||
creds_locks.append(lock)
|
||||
extra_exec_kwargs[kwarg_name] = credentials
|
||||
except ValueError:
|
||||
# Credential was deleted or doesn't exist
|
||||
raise ValueError(
|
||||
f"Authentication expired for '{file_name}' in field '{field_name}'. "
|
||||
f"The saved {provider.capitalize()} credentials no longer exist. "
|
||||
f"Please re-select the file to re-authenticate."
|
||||
)
|
||||
# else: _credentials_id is explicitly None, skip credentials (for chained data)
|
||||
else:
|
||||
# _credentials_id key missing entirely - this is an error
|
||||
provider = info.get("config", {}).get("provider", "external service")
|
||||
file_name = field_data.get("name", "selected file")
|
||||
raise ValueError(
|
||||
f"Authentication missing for '{file_name}' in field '{field_name}'. "
|
||||
f"Please re-select the file to authenticate with {provider.capitalize()}."
|
||||
)
|
||||
|
||||
output_size = 0
|
||||
|
||||
# sentry tracking nonsense to get user counts for blocks because isolation scopes don't work :(
|
||||
@@ -260,12 +299,17 @@ async def execute_node(
|
||||
# Re-raise to maintain normal error flow
|
||||
raise
|
||||
finally:
|
||||
# Ensure credentials are released even if execution fails
|
||||
if creds_lock and (await creds_lock.locked()) and (await creds_lock.owned()):
|
||||
try:
|
||||
await creds_lock.release()
|
||||
except Exception as e:
|
||||
log_metadata.error(f"Failed to release credentials lock: {e}")
|
||||
# Ensure all credentials are released even if execution fails
|
||||
for creds_lock in creds_locks:
|
||||
if (
|
||||
creds_lock
|
||||
and (await creds_lock.locked())
|
||||
and (await creds_lock.owned())
|
||||
):
|
||||
try:
|
||||
await creds_lock.release()
|
||||
except Exception as e:
|
||||
log_metadata.error(f"Failed to release credentials lock: {e}")
|
||||
|
||||
# Update execution stats
|
||||
if execution_stats is not None:
|
||||
@@ -565,8 +609,8 @@ class ExecutionProcessor:
|
||||
|
||||
async for output_name, output_data in execute_node(
|
||||
node=node,
|
||||
creds_manager=self.creds_manager,
|
||||
data=node_exec,
|
||||
execution_processor=self,
|
||||
execution_stats=stats,
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
):
|
||||
@@ -817,12 +861,17 @@ class ExecutionProcessor:
|
||||
execution_stats_lock = threading.Lock()
|
||||
|
||||
# State holders ----------------------------------------------------
|
||||
running_node_execution: dict[str, NodeExecutionProgress] = defaultdict(
|
||||
self.running_node_execution: dict[str, NodeExecutionProgress] = defaultdict(
|
||||
NodeExecutionProgress
|
||||
)
|
||||
running_node_evaluation: dict[str, Future] = {}
|
||||
self.running_node_evaluation: dict[str, Future] = {}
|
||||
self.execution_stats = execution_stats
|
||||
self.execution_stats_lock = execution_stats_lock
|
||||
execution_queue = ExecutionQueue[NodeExecutionEntry]()
|
||||
|
||||
running_node_execution = self.running_node_execution
|
||||
running_node_evaluation = self.running_node_evaluation
|
||||
|
||||
try:
|
||||
if db_client.get_credits(graph_exec.user_id) <= 0:
|
||||
raise InsufficientBalanceError(
|
||||
|
||||
@@ -26,12 +26,14 @@ from sqlalchemy import MetaData, create_engine
|
||||
from backend.data.block import BlockInput
|
||||
from backend.data.execution import GraphExecutionWithNodes
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.data.onboarding import increment_runs
|
||||
from backend.executor import utils as execution_utils
|
||||
from backend.monitoring import (
|
||||
NotificationJobArgs,
|
||||
process_existing_batches,
|
||||
process_weekly_summary,
|
||||
report_block_error_rates,
|
||||
report_execution_accuracy_alerts,
|
||||
report_late_executions,
|
||||
)
|
||||
from backend.util.clients import get_scheduler_client
|
||||
@@ -153,6 +155,7 @@ async def _execute_graph(**kwargs):
|
||||
inputs=args.input_data,
|
||||
graph_credentials_inputs=args.input_credentials,
|
||||
)
|
||||
await increment_runs(args.user_id)
|
||||
elapsed = asyncio.get_event_loop().time() - start_time
|
||||
logger.info(
|
||||
f"Graph execution started with ID {graph_exec.id} for graph {args.graph_id} "
|
||||
@@ -239,6 +242,11 @@ def cleanup_expired_files():
|
||||
run_async(cleanup_expired_files_async())
|
||||
|
||||
|
||||
def execution_accuracy_alerts():
|
||||
"""Check execution accuracy and send alerts if drops are detected."""
|
||||
return report_execution_accuracy_alerts()
|
||||
|
||||
|
||||
# Monitoring functions are now imported from monitoring module
|
||||
|
||||
|
||||
@@ -438,6 +446,17 @@ class Scheduler(AppService):
|
||||
jobstore=Jobstores.EXECUTION.value,
|
||||
)
|
||||
|
||||
# Execution Accuracy Monitoring - configurable interval
|
||||
self.scheduler.add_job(
|
||||
execution_accuracy_alerts,
|
||||
id="report_execution_accuracy_alerts",
|
||||
trigger="interval",
|
||||
replace_existing=True,
|
||||
seconds=config.execution_accuracy_check_interval_hours
|
||||
* 3600, # Convert hours to seconds
|
||||
jobstore=Jobstores.EXECUTION.value,
|
||||
)
|
||||
|
||||
self.scheduler.add_listener(job_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
|
||||
self.scheduler.add_listener(job_missed_listener, EVENT_JOB_MISSED)
|
||||
self.scheduler.add_listener(job_max_instances_listener, EVENT_JOB_MAX_INSTANCES)
|
||||
@@ -585,6 +604,11 @@ class Scheduler(AppService):
|
||||
"""Manually trigger cleanup of expired cloud storage files."""
|
||||
return cleanup_expired_files()
|
||||
|
||||
@expose
|
||||
def execute_report_execution_accuracy_alerts(self):
|
||||
"""Manually trigger execution accuracy alert checking."""
|
||||
return execution_accuracy_alerts()
|
||||
|
||||
|
||||
class SchedulerClient(AppServiceClient):
|
||||
@classmethod
|
||||
|
||||
@@ -18,7 +18,9 @@ class ManualWebhookManagerBase(BaseWebhooksManager[WT]):
|
||||
ingress_url: str,
|
||||
secret: str,
|
||||
) -> tuple[str, dict]:
|
||||
print(ingress_url) # FIXME: pass URL to user in front end
|
||||
# TODO: pass ingress_url to user in frontend
|
||||
# See: https://github.com/Significant-Gravitas/AutoGPT/issues/8537
|
||||
logger.debug(f"Manual webhook registered with ingress URL: {ingress_url}")
|
||||
|
||||
return "", {}
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"""Monitoring module for platform health and alerting."""
|
||||
|
||||
from .accuracy_monitor import AccuracyMonitor, report_execution_accuracy_alerts
|
||||
from .block_error_monitor import BlockErrorMonitor, report_block_error_rates
|
||||
from .late_execution_monitor import (
|
||||
LateExecutionException,
|
||||
@@ -13,10 +14,12 @@ from .notification_monitor import (
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"AccuracyMonitor",
|
||||
"BlockErrorMonitor",
|
||||
"LateExecutionMonitor",
|
||||
"LateExecutionException",
|
||||
"NotificationJobArgs",
|
||||
"report_execution_accuracy_alerts",
|
||||
"report_block_error_rates",
|
||||
"report_late_executions",
|
||||
"process_existing_batches",
|
||||
|
||||
107
autogpt_platform/backend/backend/monitoring/accuracy_monitor.py
Normal file
107
autogpt_platform/backend/backend/monitoring/accuracy_monitor.py
Normal file
@@ -0,0 +1,107 @@
|
||||
"""Execution accuracy monitoring module."""
|
||||
|
||||
import logging
|
||||
|
||||
from backend.util.clients import (
|
||||
get_database_manager_client,
|
||||
get_notification_manager_client,
|
||||
)
|
||||
from backend.util.metrics import DiscordChannel, sentry_capture_error
|
||||
from backend.util.settings import Config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
config = Config()
|
||||
|
||||
|
||||
class AccuracyMonitor:
|
||||
"""Monitor execution accuracy trends and send alerts for drops."""
|
||||
|
||||
def __init__(self, drop_threshold: float = 10.0):
|
||||
self.config = config
|
||||
self.notification_client = get_notification_manager_client()
|
||||
self.database_client = get_database_manager_client()
|
||||
self.drop_threshold = drop_threshold
|
||||
|
||||
def check_execution_accuracy_alerts(self) -> str:
|
||||
"""Check marketplace agents for accuracy drops and send alerts."""
|
||||
try:
|
||||
logger.info("Checking execution accuracy for marketplace agents")
|
||||
|
||||
# Get marketplace graphs using database client
|
||||
graphs = self.database_client.get_marketplace_graphs_for_monitoring(
|
||||
days_back=30, min_executions=10
|
||||
)
|
||||
|
||||
alerts_found = 0
|
||||
|
||||
for graph_data in graphs:
|
||||
result = self.database_client.get_accuracy_trends_and_alerts(
|
||||
graph_id=graph_data.graph_id,
|
||||
user_id=graph_data.user_id,
|
||||
days_back=21, # 3 weeks
|
||||
drop_threshold=self.drop_threshold,
|
||||
)
|
||||
|
||||
if result.alert:
|
||||
alert = result.alert
|
||||
|
||||
# Get graph details for better alert info
|
||||
try:
|
||||
graph_info = self.database_client.get_graph_metadata(
|
||||
graph_id=alert.graph_id
|
||||
)
|
||||
graph_name = graph_info.name if graph_info else "Unknown Agent"
|
||||
except Exception:
|
||||
graph_name = "Unknown Agent"
|
||||
|
||||
# Create detailed alert message
|
||||
alert_msg = (
|
||||
f"🚨 **AGENT ACCURACY DROP DETECTED**\n\n"
|
||||
f"**Agent:** {graph_name}\n"
|
||||
f"**Graph ID:** `{alert.graph_id}`\n"
|
||||
f"**Accuracy Drop:** {alert.drop_percent:.1f}%\n"
|
||||
f"**Recent Performance:**\n"
|
||||
f" • 3-day average: {alert.three_day_avg:.1f}%\n"
|
||||
f" • 7-day average: {alert.seven_day_avg:.1f}%\n"
|
||||
)
|
||||
|
||||
if alert.user_id:
|
||||
alert_msg += f"**Owner:** {alert.user_id}\n"
|
||||
|
||||
# Send individual alert for each agent (not batched)
|
||||
self.notification_client.discord_system_alert(
|
||||
alert_msg, DiscordChannel.PRODUCT
|
||||
)
|
||||
alerts_found += 1
|
||||
logger.warning(
|
||||
f"Sent accuracy alert for agent: {graph_name} ({alert.graph_id})"
|
||||
)
|
||||
|
||||
if alerts_found > 0:
|
||||
return f"Alert sent for {alerts_found} agents with accuracy drops"
|
||||
|
||||
logger.info("No execution accuracy alerts detected")
|
||||
return "No accuracy alerts detected"
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(f"Error checking execution accuracy alerts: {e}")
|
||||
|
||||
error = Exception(f"Error checking execution accuracy alerts: {e}")
|
||||
msg = str(error)
|
||||
sentry_capture_error(error)
|
||||
self.notification_client.discord_system_alert(msg, DiscordChannel.PRODUCT)
|
||||
return msg
|
||||
|
||||
|
||||
def report_execution_accuracy_alerts(drop_threshold: float = 10.0) -> str:
|
||||
"""
|
||||
Check execution accuracy and send alerts if drops are detected.
|
||||
|
||||
Args:
|
||||
drop_threshold: Percentage drop threshold to trigger alerts (default 10.0%)
|
||||
|
||||
Returns:
|
||||
Status message indicating results of the check
|
||||
"""
|
||||
monitor = AccuracyMonitor(drop_threshold=drop_threshold)
|
||||
return monitor.check_execution_accuracy_alerts()
|
||||
@@ -143,6 +143,9 @@ def instrument_fastapi(
|
||||
)
|
||||
|
||||
# Create instrumentator with default metrics
|
||||
# Use service-specific inprogress_name to avoid duplicate registration
|
||||
# when multiple FastAPI apps are instrumented in the same process
|
||||
service_subsystem = service_name.replace("-", "_")
|
||||
instrumentator = Instrumentator(
|
||||
should_group_status_codes=True,
|
||||
should_ignore_untemplated=True,
|
||||
@@ -150,7 +153,7 @@ def instrument_fastapi(
|
||||
should_instrument_requests_inprogress=True,
|
||||
excluded_handlers=excluded_handlers or ["/health", "/readiness"],
|
||||
env_var_name="ENABLE_METRICS",
|
||||
inprogress_name="autogpt_http_requests_inprogress",
|
||||
inprogress_name=f"autogpt_{service_subsystem}_http_requests_inprogress",
|
||||
inprogress_labels=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -273,6 +273,8 @@ async def list_providers(
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to load blocks: {e}")
|
||||
|
||||
from backend.sdk.registry import AutoRegistry
|
||||
|
||||
providers = []
|
||||
for name in get_all_provider_names():
|
||||
supports_oauth = name in HANDLERS_BY_NAME
|
||||
@@ -281,13 +283,27 @@ async def list_providers(
|
||||
getattr(handler_class, "DEFAULT_SCOPES", []) if handler_class else []
|
||||
)
|
||||
|
||||
# Check if provider has specific auth types from SDK registration
|
||||
sdk_provider = AutoRegistry.get_provider(name)
|
||||
if sdk_provider and sdk_provider.supported_auth_types:
|
||||
supports_api_key = "api_key" in sdk_provider.supported_auth_types
|
||||
supports_user_password = (
|
||||
"user_password" in sdk_provider.supported_auth_types
|
||||
)
|
||||
supports_host_scoped = "host_scoped" in sdk_provider.supported_auth_types
|
||||
else:
|
||||
# Fallback for legacy providers
|
||||
supports_api_key = True # All providers can accept API keys
|
||||
supports_user_password = name in ("smtp",)
|
||||
supports_host_scoped = name == "http"
|
||||
|
||||
providers.append(
|
||||
ProviderInfo(
|
||||
name=name,
|
||||
supports_oauth=supports_oauth,
|
||||
supports_api_key=True, # All providers can accept API keys
|
||||
supports_user_password=name in ("smtp",), # SMTP uses user/password
|
||||
supports_host_scoped=name == "http", # HTTP block uses host-scoped
|
||||
supports_api_key=supports_api_key,
|
||||
supports_user_password=supports_user_password,
|
||||
supports_host_scoped=supports_host_scoped,
|
||||
default_scopes=default_scopes,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -33,7 +33,11 @@ from backend.data.model import (
|
||||
OAuth2Credentials,
|
||||
UserIntegrations,
|
||||
)
|
||||
from backend.data.onboarding import OnboardingStep, complete_onboarding_step
|
||||
from backend.data.onboarding import (
|
||||
OnboardingStep,
|
||||
complete_onboarding_step,
|
||||
increment_runs,
|
||||
)
|
||||
from backend.data.user import get_user_integrations
|
||||
from backend.executor.utils import add_graph_execution
|
||||
from backend.integrations.ayrshare import AyrshareClient, SocialPlatform
|
||||
@@ -377,6 +381,7 @@ async def webhook_ingress_generic(
|
||||
return
|
||||
|
||||
await complete_onboarding_step(user_id, OnboardingStep.TRIGGER_WEBHOOK)
|
||||
await increment_runs(user_id)
|
||||
|
||||
# Execute all triggers concurrently for better performance
|
||||
tasks = []
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import enum
|
||||
from typing import Any, Optional
|
||||
from typing import Any, Literal, Optional
|
||||
|
||||
import pydantic
|
||||
from prisma.enums import OnboardingStep
|
||||
|
||||
from backend.data.api_key import APIKeyInfo, APIKeyPermission
|
||||
from backend.data.graph import Graph
|
||||
@@ -35,8 +36,13 @@ class WSSubscribeGraphExecutionsRequest(pydantic.BaseModel):
|
||||
graph_id: str
|
||||
|
||||
|
||||
GraphCreationSource = Literal["builder", "upload"]
|
||||
GraphExecutionSource = Literal["builder", "library", "onboarding"]
|
||||
|
||||
|
||||
class CreateGraph(pydantic.BaseModel):
|
||||
graph: Graph
|
||||
source: GraphCreationSource | None = None
|
||||
|
||||
|
||||
class CreateAPIKeyRequest(pydantic.BaseModel):
|
||||
@@ -83,6 +89,8 @@ class NotificationPayload(pydantic.BaseModel):
|
||||
type: str
|
||||
event: str
|
||||
|
||||
model_config = pydantic.ConfigDict(extra="allow")
|
||||
|
||||
|
||||
class OnboardingNotificationPayload(NotificationPayload):
|
||||
step: str
|
||||
step: OnboardingStep | None
|
||||
|
||||
@@ -5,7 +5,7 @@ import time
|
||||
import uuid
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timezone
|
||||
from typing import Annotated, Any, Sequence
|
||||
from typing import Annotated, Any, Sequence, get_args
|
||||
|
||||
import pydantic
|
||||
import stripe
|
||||
@@ -45,12 +45,17 @@ from backend.data.credit import (
|
||||
set_auto_top_up,
|
||||
)
|
||||
from backend.data.graph import GraphSettings
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.data.model import CredentialsMetaInput, UserOnboarding
|
||||
from backend.data.notifications import NotificationPreference, NotificationPreferenceDTO
|
||||
from backend.data.onboarding import (
|
||||
FrontendOnboardingStep,
|
||||
OnboardingStep,
|
||||
UserOnboardingUpdate,
|
||||
complete_onboarding_step,
|
||||
complete_re_run_agent,
|
||||
get_recommended_agents,
|
||||
get_user_onboarding,
|
||||
increment_runs,
|
||||
onboarding_enabled,
|
||||
reset_user_onboarding,
|
||||
update_user_onboarding,
|
||||
@@ -78,6 +83,7 @@ from backend.server.model import (
|
||||
CreateAPIKeyRequest,
|
||||
CreateAPIKeyResponse,
|
||||
CreateGraph,
|
||||
GraphExecutionSource,
|
||||
RequestTopUp,
|
||||
SetGraphActiveVersion,
|
||||
TimezoneResponse,
|
||||
@@ -85,6 +91,7 @@ from backend.server.model import (
|
||||
UpdateTimezoneRequest,
|
||||
UploadFileResponse,
|
||||
)
|
||||
from backend.server.v2.store.model import StoreAgentDetails
|
||||
from backend.util.cache import cached
|
||||
from backend.util.clients import get_scheduler_client
|
||||
from backend.util.cloud_storage import get_cloud_storage_handler
|
||||
@@ -274,9 +281,10 @@ async def update_preferences(
|
||||
|
||||
@v1_router.get(
|
||||
"/onboarding",
|
||||
summary="Get onboarding status",
|
||||
summary="Onboarding state",
|
||||
tags=["onboarding"],
|
||||
dependencies=[Security(requires_user)],
|
||||
response_model=UserOnboarding,
|
||||
)
|
||||
async def get_onboarding(user_id: Annotated[str, Security(get_user_id)]):
|
||||
return await get_user_onboarding(user_id)
|
||||
@@ -284,9 +292,10 @@ async def get_onboarding(user_id: Annotated[str, Security(get_user_id)]):
|
||||
|
||||
@v1_router.patch(
|
||||
"/onboarding",
|
||||
summary="Update onboarding progress",
|
||||
summary="Update onboarding state",
|
||||
tags=["onboarding"],
|
||||
dependencies=[Security(requires_user)],
|
||||
response_model=UserOnboarding,
|
||||
)
|
||||
async def update_onboarding(
|
||||
user_id: Annotated[str, Security(get_user_id)], data: UserOnboardingUpdate
|
||||
@@ -294,25 +303,39 @@ async def update_onboarding(
|
||||
return await update_user_onboarding(user_id, data)
|
||||
|
||||
|
||||
@v1_router.post(
|
||||
"/onboarding/step",
|
||||
summary="Complete onboarding step",
|
||||
tags=["onboarding"],
|
||||
dependencies=[Security(requires_user)],
|
||||
)
|
||||
async def onboarding_complete_step(
|
||||
user_id: Annotated[str, Security(get_user_id)], step: FrontendOnboardingStep
|
||||
):
|
||||
if step not in get_args(FrontendOnboardingStep):
|
||||
raise HTTPException(status_code=400, detail="Invalid onboarding step")
|
||||
return await complete_onboarding_step(user_id, step)
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
"/onboarding/agents",
|
||||
summary="Get recommended agents",
|
||||
summary="Recommended onboarding agents",
|
||||
tags=["onboarding"],
|
||||
dependencies=[Security(requires_user)],
|
||||
)
|
||||
async def get_onboarding_agents(
|
||||
user_id: Annotated[str, Security(get_user_id)],
|
||||
):
|
||||
) -> list[StoreAgentDetails]:
|
||||
return await get_recommended_agents(user_id)
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
"/onboarding/enabled",
|
||||
summary="Check onboarding enabled",
|
||||
summary="Is onboarding enabled",
|
||||
tags=["onboarding", "public"],
|
||||
dependencies=[Security(requires_user)],
|
||||
)
|
||||
async def is_onboarding_enabled():
|
||||
async def is_onboarding_enabled() -> bool:
|
||||
return await onboarding_enabled()
|
||||
|
||||
|
||||
@@ -321,6 +344,7 @@ async def is_onboarding_enabled():
|
||||
summary="Reset onboarding progress",
|
||||
tags=["onboarding"],
|
||||
dependencies=[Security(requires_user)],
|
||||
response_model=UserOnboarding,
|
||||
)
|
||||
async def reset_onboarding(user_id: Annotated[str, Security(get_user_id)]):
|
||||
return await reset_user_onboarding(user_id)
|
||||
@@ -809,7 +833,12 @@ async def create_new_graph(
|
||||
# as the graph already valid and no sub-graphs are returned back.
|
||||
await graph_db.create_graph(graph, user_id=user_id)
|
||||
await library_db.create_library_agent(graph, user_id=user_id)
|
||||
return await on_graph_activate(graph, user_id=user_id)
|
||||
activated_graph = await on_graph_activate(graph, user_id=user_id)
|
||||
|
||||
if create_graph.source == "builder":
|
||||
await complete_onboarding_step(user_id, OnboardingStep.BUILDER_SAVE_AGENT)
|
||||
|
||||
return activated_graph
|
||||
|
||||
|
||||
@v1_router.delete(
|
||||
@@ -967,6 +996,7 @@ async def execute_graph(
|
||||
credentials_inputs: Annotated[
|
||||
dict[str, CredentialsMetaInput], Body(..., embed=True, default_factory=dict)
|
||||
],
|
||||
source: Annotated[GraphExecutionSource | None, Body(embed=True)] = None,
|
||||
graph_version: Optional[int] = None,
|
||||
preset_id: Optional[str] = None,
|
||||
) -> execution_db.GraphExecutionMeta:
|
||||
@@ -990,6 +1020,14 @@ async def execute_graph(
|
||||
# Record successful graph execution
|
||||
record_graph_execution(graph_id=graph_id, status="success", user_id=user_id)
|
||||
record_graph_operation(operation="execute", status="success")
|
||||
await increment_runs(user_id)
|
||||
await complete_re_run_agent(user_id, graph_id)
|
||||
if source == "library":
|
||||
await complete_onboarding_step(
|
||||
user_id, OnboardingStep.MARKETPLACE_RUN_AGENT
|
||||
)
|
||||
elif source == "builder":
|
||||
await complete_onboarding_step(user_id, OnboardingStep.BUILDER_RUN_AGENT)
|
||||
return result
|
||||
except GraphValidationError as e:
|
||||
# Record failed graph execution
|
||||
@@ -1103,6 +1141,15 @@ async def list_graph_executions(
|
||||
filtered_executions = await hide_activity_summaries_if_disabled(
|
||||
paginated_result.executions, user_id
|
||||
)
|
||||
onboarding = await get_user_onboarding(user_id)
|
||||
if (
|
||||
onboarding.onboardingAgentExecutionId
|
||||
and onboarding.onboardingAgentExecutionId
|
||||
in [exec.id for exec in filtered_executions]
|
||||
and OnboardingStep.GET_RESULTS not in onboarding.completedSteps
|
||||
):
|
||||
await complete_onboarding_step(user_id, OnboardingStep.GET_RESULTS)
|
||||
|
||||
return execution_db.GraphExecutionsPaginated(
|
||||
executions=filtered_executions, pagination=paginated_result.pagination
|
||||
)
|
||||
@@ -1140,6 +1187,12 @@ async def get_graph_execution(
|
||||
|
||||
# Apply feature flags to filter out disabled features
|
||||
result = await hide_activity_summary_if_disabled(result, user_id)
|
||||
onboarding = await get_user_onboarding(user_id)
|
||||
if (
|
||||
onboarding.onboardingAgentExecutionId == graph_exec_id
|
||||
and OnboardingStep.GET_RESULTS not in onboarding.completedSteps
|
||||
):
|
||||
await complete_onboarding_step(user_id, OnboardingStep.GET_RESULTS)
|
||||
|
||||
return result
|
||||
|
||||
@@ -1316,6 +1369,8 @@ async def create_graph_execution_schedule(
|
||||
result.next_run_time, user_timezone
|
||||
)
|
||||
|
||||
await complete_onboarding_step(user_id, OnboardingStep.SCHEDULE_AGENT)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
|
||||
@@ -8,6 +8,10 @@ from fastapi import APIRouter, HTTPException, Security
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from backend.blocks.llm import LlmModel
|
||||
from backend.data.analytics import (
|
||||
AccuracyTrendsResponse,
|
||||
get_accuracy_trends_and_alerts,
|
||||
)
|
||||
from backend.data.execution import (
|
||||
ExecutionStatus,
|
||||
GraphExecutionMeta,
|
||||
@@ -83,6 +87,18 @@ class ExecutionAnalyticsConfig(BaseModel):
|
||||
recommended_model: str
|
||||
|
||||
|
||||
class AccuracyTrendsRequest(BaseModel):
|
||||
graph_id: str = Field(..., description="Graph ID to analyze", min_length=1)
|
||||
user_id: Optional[str] = Field(None, description="Optional user ID filter")
|
||||
days_back: int = Field(30, description="Number of days to look back", ge=7, le=90)
|
||||
drop_threshold: float = Field(
|
||||
10.0, description="Alert threshold percentage", ge=1.0, le=50.0
|
||||
)
|
||||
include_historical: bool = Field(
|
||||
False, description="Include historical data for charts"
|
||||
)
|
||||
|
||||
|
||||
router = APIRouter(
|
||||
prefix="/admin",
|
||||
tags=["admin", "execution_analytics"],
|
||||
@@ -419,3 +435,40 @@ async def _process_batch(
|
||||
return await asyncio.gather(
|
||||
*[process_single_execution(execution) for execution in executions]
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/execution_accuracy_trends",
|
||||
response_model=AccuracyTrendsResponse,
|
||||
summary="Get Execution Accuracy Trends and Alerts",
|
||||
)
|
||||
async def get_execution_accuracy_trends(
|
||||
graph_id: str,
|
||||
user_id: Optional[str] = None,
|
||||
days_back: int = 30,
|
||||
drop_threshold: float = 10.0,
|
||||
include_historical: bool = False,
|
||||
admin_user_id: str = Security(get_user_id),
|
||||
) -> AccuracyTrendsResponse:
|
||||
"""
|
||||
Get execution accuracy trends with moving averages and alert detection.
|
||||
Simple single-query approach.
|
||||
"""
|
||||
logger.info(
|
||||
f"Admin user {admin_user_id} requesting accuracy trends for graph {graph_id}"
|
||||
)
|
||||
|
||||
try:
|
||||
result = await get_accuracy_trends_and_alerts(
|
||||
graph_id=graph_id,
|
||||
days_back=days_back,
|
||||
user_id=user_id,
|
||||
drop_threshold=drop_threshold,
|
||||
include_historical=include_historical,
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(f"Error getting accuracy trends for graph {graph_id}: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@@ -1,9 +1,16 @@
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from difflib import SequenceMatcher
|
||||
from typing import Sequence
|
||||
|
||||
import prisma
|
||||
|
||||
import backend.data.block
|
||||
import backend.server.v2.library.db as library_db
|
||||
import backend.server.v2.library.model as library_model
|
||||
import backend.server.v2.store.db as store_db
|
||||
import backend.server.v2.store.model as store_model
|
||||
from backend.blocks import load_all_blocks
|
||||
from backend.blocks.llm import LlmModel
|
||||
from backend.data.block import AnyBlockSchema, BlockCategory, BlockInfo, BlockSchema
|
||||
@@ -14,17 +21,36 @@ from backend.server.v2.builder.model import (
|
||||
BlockResponse,
|
||||
BlockType,
|
||||
CountResponse,
|
||||
FilterType,
|
||||
Provider,
|
||||
ProviderResponse,
|
||||
SearchBlocksResponse,
|
||||
SearchEntry,
|
||||
)
|
||||
from backend.util.cache import cached
|
||||
from backend.util.models import Pagination
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
llm_models = [name.name.lower().replace("_", " ") for name in LlmModel]
|
||||
_static_counts_cache: dict | None = None
|
||||
_suggested_blocks: list[BlockInfo] | None = None
|
||||
|
||||
MAX_LIBRARY_AGENT_RESULTS = 100
|
||||
MAX_MARKETPLACE_AGENT_RESULTS = 100
|
||||
MIN_SCORE_FOR_FILTERED_RESULTS = 10.0
|
||||
|
||||
SearchResultItem = BlockInfo | library_model.LibraryAgent | store_model.StoreAgent
|
||||
|
||||
|
||||
@dataclass
|
||||
class _ScoredItem:
|
||||
item: SearchResultItem
|
||||
filter_type: FilterType
|
||||
score: float
|
||||
sort_key: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class _SearchCacheEntry:
|
||||
items: list[SearchResultItem]
|
||||
total_items: dict[FilterType, int]
|
||||
|
||||
|
||||
def get_block_categories(category_blocks: int = 3) -> list[BlockCategoryResponse]:
|
||||
@@ -130,71 +156,244 @@ def get_block_by_id(block_id: str) -> BlockInfo | None:
|
||||
return None
|
||||
|
||||
|
||||
def search_blocks(
|
||||
include_blocks: bool = True,
|
||||
include_integrations: bool = True,
|
||||
query: str = "",
|
||||
page: int = 1,
|
||||
page_size: int = 50,
|
||||
) -> SearchBlocksResponse:
|
||||
async def update_search(user_id: str, search: SearchEntry) -> str:
|
||||
"""
|
||||
Get blocks based on the filter and query.
|
||||
`providers` only applies for `integrations` filter.
|
||||
Upsert a search request for the user and return the search ID.
|
||||
"""
|
||||
blocks: list[AnyBlockSchema] = []
|
||||
query = query.lower()
|
||||
if search.search_id:
|
||||
# Update existing search
|
||||
await prisma.models.BuilderSearchHistory.prisma().update(
|
||||
where={
|
||||
"id": search.search_id,
|
||||
},
|
||||
data={
|
||||
"searchQuery": search.search_query or "",
|
||||
"filter": search.filter or [], # type: ignore
|
||||
"byCreator": search.by_creator or [],
|
||||
},
|
||||
)
|
||||
return search.search_id
|
||||
else:
|
||||
# Create new search
|
||||
new_search = await prisma.models.BuilderSearchHistory.prisma().create(
|
||||
data={
|
||||
"userId": user_id,
|
||||
"searchQuery": search.search_query or "",
|
||||
"filter": search.filter or [], # type: ignore
|
||||
"byCreator": search.by_creator or [],
|
||||
}
|
||||
)
|
||||
return new_search.id
|
||||
|
||||
total = 0
|
||||
skip = (page - 1) * page_size
|
||||
take = page_size
|
||||
|
||||
async def get_recent_searches(user_id: str, limit: int = 5) -> list[SearchEntry]:
|
||||
"""
|
||||
Get the user's most recent search requests.
|
||||
"""
|
||||
searches = await prisma.models.BuilderSearchHistory.prisma().find_many(
|
||||
where={
|
||||
"userId": user_id,
|
||||
},
|
||||
order={
|
||||
"updatedAt": "desc",
|
||||
},
|
||||
take=limit,
|
||||
)
|
||||
return [
|
||||
SearchEntry(
|
||||
search_query=s.searchQuery,
|
||||
filter=s.filter, # type: ignore
|
||||
by_creator=s.byCreator,
|
||||
search_id=s.id,
|
||||
)
|
||||
for s in searches
|
||||
]
|
||||
|
||||
|
||||
async def get_sorted_search_results(
|
||||
*,
|
||||
user_id: str,
|
||||
search_query: str | None,
|
||||
filters: Sequence[FilterType],
|
||||
by_creator: Sequence[str] | None = None,
|
||||
) -> _SearchCacheEntry:
|
||||
normalized_filters: tuple[FilterType, ...] = tuple(sorted(set(filters or [])))
|
||||
normalized_creators: tuple[str, ...] = tuple(sorted(set(by_creator or [])))
|
||||
return await _build_cached_search_results(
|
||||
user_id=user_id,
|
||||
search_query=search_query or "",
|
||||
filters=normalized_filters,
|
||||
by_creator=normalized_creators,
|
||||
)
|
||||
|
||||
|
||||
@cached(ttl_seconds=300, shared_cache=True)
|
||||
async def _build_cached_search_results(
|
||||
user_id: str,
|
||||
search_query: str,
|
||||
filters: tuple[FilterType, ...],
|
||||
by_creator: tuple[str, ...],
|
||||
) -> _SearchCacheEntry:
|
||||
normalized_query = (search_query or "").strip().lower()
|
||||
|
||||
include_blocks = "blocks" in filters
|
||||
include_integrations = "integrations" in filters
|
||||
include_library_agents = "my_agents" in filters
|
||||
include_marketplace_agents = "marketplace_agents" in filters
|
||||
|
||||
scored_items: list[_ScoredItem] = []
|
||||
total_items: dict[FilterType, int] = {
|
||||
"blocks": 0,
|
||||
"integrations": 0,
|
||||
"marketplace_agents": 0,
|
||||
"my_agents": 0,
|
||||
}
|
||||
|
||||
block_results, block_total, integration_total = _collect_block_results(
|
||||
normalized_query=normalized_query,
|
||||
include_blocks=include_blocks,
|
||||
include_integrations=include_integrations,
|
||||
)
|
||||
scored_items.extend(block_results)
|
||||
total_items["blocks"] = block_total
|
||||
total_items["integrations"] = integration_total
|
||||
|
||||
if include_library_agents:
|
||||
library_response = await library_db.list_library_agents(
|
||||
user_id=user_id,
|
||||
search_term=search_query or None,
|
||||
page=1,
|
||||
page_size=MAX_LIBRARY_AGENT_RESULTS,
|
||||
)
|
||||
total_items["my_agents"] = library_response.pagination.total_items
|
||||
scored_items.extend(
|
||||
_build_library_items(
|
||||
agents=library_response.agents,
|
||||
normalized_query=normalized_query,
|
||||
)
|
||||
)
|
||||
|
||||
if include_marketplace_agents:
|
||||
marketplace_response = await store_db.get_store_agents(
|
||||
creators=list(by_creator) or None,
|
||||
search_query=search_query or None,
|
||||
page=1,
|
||||
page_size=MAX_MARKETPLACE_AGENT_RESULTS,
|
||||
)
|
||||
total_items["marketplace_agents"] = marketplace_response.pagination.total_items
|
||||
scored_items.extend(
|
||||
_build_marketplace_items(
|
||||
agents=marketplace_response.agents,
|
||||
normalized_query=normalized_query,
|
||||
)
|
||||
)
|
||||
|
||||
sorted_items = sorted(
|
||||
scored_items,
|
||||
key=lambda entry: (-entry.score, entry.sort_key, entry.filter_type),
|
||||
)
|
||||
|
||||
return _SearchCacheEntry(
|
||||
items=[entry.item for entry in sorted_items],
|
||||
total_items=total_items,
|
||||
)
|
||||
|
||||
|
||||
def _collect_block_results(
|
||||
*,
|
||||
normalized_query: str,
|
||||
include_blocks: bool,
|
||||
include_integrations: bool,
|
||||
) -> tuple[list[_ScoredItem], int, int]:
|
||||
results: list[_ScoredItem] = []
|
||||
block_count = 0
|
||||
integration_count = 0
|
||||
|
||||
if not include_blocks and not include_integrations:
|
||||
return results, block_count, integration_count
|
||||
|
||||
for block_type in load_all_blocks().values():
|
||||
block: AnyBlockSchema = block_type()
|
||||
# Skip disabled blocks
|
||||
if block.disabled:
|
||||
continue
|
||||
# Skip blocks that don't match the query
|
||||
if (
|
||||
query not in block.name.lower()
|
||||
and query not in block.description.lower()
|
||||
and not _matches_llm_model(block.input_schema, query)
|
||||
):
|
||||
continue
|
||||
keep = False
|
||||
|
||||
block_info = block.get_info()
|
||||
credentials = list(block.input_schema.get_credentials_fields().values())
|
||||
if include_integrations and len(credentials) > 0:
|
||||
keep = True
|
||||
is_integration = len(credentials) > 0
|
||||
|
||||
if is_integration and not include_integrations:
|
||||
continue
|
||||
if not is_integration and not include_blocks:
|
||||
continue
|
||||
|
||||
score = _score_block(block, block_info, normalized_query)
|
||||
if not _should_include_item(score, normalized_query):
|
||||
continue
|
||||
|
||||
filter_type: FilterType = "integrations" if is_integration else "blocks"
|
||||
if is_integration:
|
||||
integration_count += 1
|
||||
if include_blocks and len(credentials) == 0:
|
||||
keep = True
|
||||
else:
|
||||
block_count += 1
|
||||
|
||||
if not keep:
|
||||
results.append(
|
||||
_ScoredItem(
|
||||
item=block_info,
|
||||
filter_type=filter_type,
|
||||
score=score,
|
||||
sort_key=_get_item_name(block_info),
|
||||
)
|
||||
)
|
||||
|
||||
return results, block_count, integration_count
|
||||
|
||||
|
||||
def _build_library_items(
|
||||
*,
|
||||
agents: list[library_model.LibraryAgent],
|
||||
normalized_query: str,
|
||||
) -> list[_ScoredItem]:
|
||||
results: list[_ScoredItem] = []
|
||||
|
||||
for agent in agents:
|
||||
score = _score_library_agent(agent, normalized_query)
|
||||
if not _should_include_item(score, normalized_query):
|
||||
continue
|
||||
|
||||
total += 1
|
||||
if skip > 0:
|
||||
skip -= 1
|
||||
continue
|
||||
if take > 0:
|
||||
take -= 1
|
||||
blocks.append(block)
|
||||
results.append(
|
||||
_ScoredItem(
|
||||
item=agent,
|
||||
filter_type="my_agents",
|
||||
score=score,
|
||||
sort_key=_get_item_name(agent),
|
||||
)
|
||||
)
|
||||
|
||||
return SearchBlocksResponse(
|
||||
blocks=BlockResponse(
|
||||
blocks=[b.get_info() for b in blocks],
|
||||
pagination=Pagination(
|
||||
total_items=total,
|
||||
total_pages=(total + page_size - 1) // page_size,
|
||||
current_page=page,
|
||||
page_size=page_size,
|
||||
),
|
||||
),
|
||||
total_block_count=block_count,
|
||||
total_integration_count=integration_count,
|
||||
)
|
||||
return results
|
||||
|
||||
|
||||
def _build_marketplace_items(
|
||||
*,
|
||||
agents: list[store_model.StoreAgent],
|
||||
normalized_query: str,
|
||||
) -> list[_ScoredItem]:
|
||||
results: list[_ScoredItem] = []
|
||||
|
||||
for agent in agents:
|
||||
score = _score_store_agent(agent, normalized_query)
|
||||
if not _should_include_item(score, normalized_query):
|
||||
continue
|
||||
|
||||
results.append(
|
||||
_ScoredItem(
|
||||
item=agent,
|
||||
filter_type="marketplace_agents",
|
||||
score=score,
|
||||
sort_key=_get_item_name(agent),
|
||||
)
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def get_providers(
|
||||
@@ -251,16 +450,12 @@ async def get_counts(user_id: str) -> CountResponse:
|
||||
)
|
||||
|
||||
|
||||
@cached(ttl_seconds=3600)
|
||||
async def _get_static_counts():
|
||||
"""
|
||||
Get counts of blocks, integrations, and marketplace agents.
|
||||
This is cached to avoid unnecessary database queries and calculations.
|
||||
Can't use functools.cache here because the function is async.
|
||||
"""
|
||||
global _static_counts_cache
|
||||
if _static_counts_cache is not None:
|
||||
return _static_counts_cache
|
||||
|
||||
all_blocks = 0
|
||||
input_blocks = 0
|
||||
action_blocks = 0
|
||||
@@ -287,7 +482,7 @@ async def _get_static_counts():
|
||||
|
||||
marketplace_agents = await prisma.models.StoreAgent.prisma().count()
|
||||
|
||||
_static_counts_cache = {
|
||||
return {
|
||||
"all_blocks": all_blocks,
|
||||
"input_blocks": input_blocks,
|
||||
"action_blocks": action_blocks,
|
||||
@@ -296,8 +491,6 @@ async def _get_static_counts():
|
||||
"marketplace_agents": marketplace_agents,
|
||||
}
|
||||
|
||||
return _static_counts_cache
|
||||
|
||||
|
||||
def _matches_llm_model(schema_cls: type[BlockSchema], query: str) -> bool:
|
||||
for field in schema_cls.model_fields.values():
|
||||
@@ -308,6 +501,123 @@ def _matches_llm_model(schema_cls: type[BlockSchema], query: str) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
def _score_block(
|
||||
block: AnyBlockSchema,
|
||||
block_info: BlockInfo,
|
||||
normalized_query: str,
|
||||
) -> float:
|
||||
if not normalized_query:
|
||||
return 0.0
|
||||
|
||||
name = block_info.name.lower()
|
||||
description = block_info.description.lower()
|
||||
score = _score_primary_fields(name, description, normalized_query)
|
||||
|
||||
category_text = " ".join(
|
||||
category.get("category", "").lower() for category in block_info.categories
|
||||
)
|
||||
score += _score_additional_field(category_text, normalized_query, 12, 6)
|
||||
|
||||
credentials_info = block.input_schema.get_credentials_fields_info().values()
|
||||
provider_names = [
|
||||
provider.value.lower()
|
||||
for info in credentials_info
|
||||
for provider in info.provider
|
||||
]
|
||||
provider_text = " ".join(provider_names)
|
||||
score += _score_additional_field(provider_text, normalized_query, 15, 6)
|
||||
|
||||
if _matches_llm_model(block.input_schema, normalized_query):
|
||||
score += 20
|
||||
|
||||
return score
|
||||
|
||||
|
||||
def _score_library_agent(
|
||||
agent: library_model.LibraryAgent,
|
||||
normalized_query: str,
|
||||
) -> float:
|
||||
if not normalized_query:
|
||||
return 0.0
|
||||
|
||||
name = agent.name.lower()
|
||||
description = (agent.description or "").lower()
|
||||
instructions = (agent.instructions or "").lower()
|
||||
|
||||
score = _score_primary_fields(name, description, normalized_query)
|
||||
score += _score_additional_field(instructions, normalized_query, 15, 6)
|
||||
score += _score_additional_field(
|
||||
agent.creator_name.lower(), normalized_query, 10, 5
|
||||
)
|
||||
|
||||
return score
|
||||
|
||||
|
||||
def _score_store_agent(
|
||||
agent: store_model.StoreAgent,
|
||||
normalized_query: str,
|
||||
) -> float:
|
||||
if not normalized_query:
|
||||
return 0.0
|
||||
|
||||
name = agent.agent_name.lower()
|
||||
description = agent.description.lower()
|
||||
sub_heading = agent.sub_heading.lower()
|
||||
|
||||
score = _score_primary_fields(name, description, normalized_query)
|
||||
score += _score_additional_field(sub_heading, normalized_query, 12, 6)
|
||||
score += _score_additional_field(agent.creator.lower(), normalized_query, 10, 5)
|
||||
|
||||
return score
|
||||
|
||||
|
||||
def _score_primary_fields(name: str, description: str, query: str) -> float:
|
||||
score = 0.0
|
||||
if name == query:
|
||||
score += 120
|
||||
elif name.startswith(query):
|
||||
score += 90
|
||||
elif query in name:
|
||||
score += 60
|
||||
|
||||
score += SequenceMatcher(None, name, query).ratio() * 50
|
||||
if description:
|
||||
if query in description:
|
||||
score += 30
|
||||
score += SequenceMatcher(None, description, query).ratio() * 25
|
||||
return score
|
||||
|
||||
|
||||
def _score_additional_field(
|
||||
value: str,
|
||||
query: str,
|
||||
contains_weight: float,
|
||||
similarity_weight: float,
|
||||
) -> float:
|
||||
if not value or not query:
|
||||
return 0.0
|
||||
|
||||
score = 0.0
|
||||
if query in value:
|
||||
score += contains_weight
|
||||
score += SequenceMatcher(None, value, query).ratio() * similarity_weight
|
||||
return score
|
||||
|
||||
|
||||
def _should_include_item(score: float, normalized_query: str) -> bool:
|
||||
if not normalized_query:
|
||||
return True
|
||||
return score >= MIN_SCORE_FOR_FILTERED_RESULTS
|
||||
|
||||
|
||||
def _get_item_name(item: SearchResultItem) -> str:
|
||||
if isinstance(item, BlockInfo):
|
||||
return item.name.lower()
|
||||
if isinstance(item, library_model.LibraryAgent):
|
||||
return item.name.lower()
|
||||
return item.agent_name.lower()
|
||||
|
||||
|
||||
@cached(ttl_seconds=3600)
|
||||
def _get_all_providers() -> dict[ProviderName, Provider]:
|
||||
providers: dict[ProviderName, Provider] = {}
|
||||
@@ -329,13 +639,9 @@ def _get_all_providers() -> dict[ProviderName, Provider]:
|
||||
return providers
|
||||
|
||||
|
||||
@cached(ttl_seconds=3600)
|
||||
async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]:
|
||||
global _suggested_blocks
|
||||
|
||||
if _suggested_blocks is not None and len(_suggested_blocks) >= count:
|
||||
return _suggested_blocks[:count]
|
||||
|
||||
_suggested_blocks = []
|
||||
suggested_blocks = []
|
||||
# Sum the number of executions for each block type
|
||||
# Prisma cannot group by nested relations, so we do a raw query
|
||||
# Calculate the cutoff timestamp
|
||||
@@ -376,7 +682,7 @@ async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]:
|
||||
# Sort blocks by execution count
|
||||
blocks.sort(key=lambda x: x[1], reverse=True)
|
||||
|
||||
_suggested_blocks = [block[0] for block in blocks]
|
||||
suggested_blocks = [block[0] for block in blocks]
|
||||
|
||||
# Return the top blocks
|
||||
return _suggested_blocks[:count]
|
||||
return suggested_blocks[:count]
|
||||
|
||||
@@ -18,10 +18,17 @@ FilterType = Literal[
|
||||
BlockType = Literal["all", "input", "action", "output"]
|
||||
|
||||
|
||||
class SearchEntry(BaseModel):
|
||||
search_query: str | None = None
|
||||
filter: list[FilterType] | None = None
|
||||
by_creator: list[str] | None = None
|
||||
search_id: str | None = None
|
||||
|
||||
|
||||
# Suggestions
|
||||
class SuggestionsResponse(BaseModel):
|
||||
otto_suggestions: list[str]
|
||||
recent_searches: list[str]
|
||||
recent_searches: list[SearchEntry]
|
||||
providers: list[ProviderName]
|
||||
top_blocks: list[BlockInfo]
|
||||
|
||||
@@ -32,7 +39,7 @@ class BlockCategoryResponse(BaseModel):
|
||||
total_blocks: int
|
||||
blocks: list[BlockInfo]
|
||||
|
||||
model_config = {"use_enum_values": False} # <== use enum names like "AI"
|
||||
model_config = {"use_enum_values": False} # Use enum names like "AI"
|
||||
|
||||
|
||||
# Input/Action/Output and see all for block categories
|
||||
@@ -53,17 +60,11 @@ class ProviderResponse(BaseModel):
|
||||
pagination: Pagination
|
||||
|
||||
|
||||
class SearchBlocksResponse(BaseModel):
|
||||
blocks: BlockResponse
|
||||
total_block_count: int
|
||||
total_integration_count: int
|
||||
|
||||
|
||||
class SearchResponse(BaseModel):
|
||||
items: list[BlockInfo | library_model.LibraryAgent | store_model.StoreAgent]
|
||||
search_id: str
|
||||
total_items: dict[FilterType, int]
|
||||
page: int
|
||||
more_pages: bool
|
||||
pagination: Pagination
|
||||
|
||||
|
||||
class CountResponse(BaseModel):
|
||||
|
||||
@@ -6,10 +6,6 @@ from autogpt_libs.auth.dependencies import get_user_id, requires_user
|
||||
|
||||
import backend.server.v2.builder.db as builder_db
|
||||
import backend.server.v2.builder.model as builder_model
|
||||
import backend.server.v2.library.db as library_db
|
||||
import backend.server.v2.library.model as library_model
|
||||
import backend.server.v2.store.db as store_db
|
||||
import backend.server.v2.store.model as store_model
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.models import Pagination
|
||||
|
||||
@@ -45,7 +41,9 @@ def sanitize_query(query: str | None) -> str | None:
|
||||
summary="Get Builder suggestions",
|
||||
response_model=builder_model.SuggestionsResponse,
|
||||
)
|
||||
async def get_suggestions() -> builder_model.SuggestionsResponse:
|
||||
async def get_suggestions(
|
||||
user_id: Annotated[str, fastapi.Security(get_user_id)],
|
||||
) -> builder_model.SuggestionsResponse:
|
||||
"""
|
||||
Get all suggestions for the Blocks Menu.
|
||||
"""
|
||||
@@ -55,11 +53,7 @@ async def get_suggestions() -> builder_model.SuggestionsResponse:
|
||||
"Help me create a list",
|
||||
"Help me feed my data to Google Maps",
|
||||
],
|
||||
recent_searches=[
|
||||
"image generation",
|
||||
"deepfake",
|
||||
"competitor analysis",
|
||||
],
|
||||
recent_searches=await builder_db.get_recent_searches(user_id),
|
||||
providers=[
|
||||
ProviderName.TWITTER,
|
||||
ProviderName.GITHUB,
|
||||
@@ -147,7 +141,6 @@ async def get_providers(
|
||||
)
|
||||
|
||||
|
||||
# Not using post method because on frontend, orval doesn't support Infinite Query with POST method.
|
||||
@router.get(
|
||||
"/search",
|
||||
summary="Builder search",
|
||||
@@ -157,7 +150,7 @@ async def get_providers(
|
||||
async def search(
|
||||
user_id: Annotated[str, fastapi.Security(get_user_id)],
|
||||
search_query: Annotated[str | None, fastapi.Query()] = None,
|
||||
filter: Annotated[list[str] | None, fastapi.Query()] = None,
|
||||
filter: Annotated[list[builder_model.FilterType] | None, fastapi.Query()] = None,
|
||||
search_id: Annotated[str | None, fastapi.Query()] = None,
|
||||
by_creator: Annotated[list[str] | None, fastapi.Query()] = None,
|
||||
page: Annotated[int, fastapi.Query()] = 1,
|
||||
@@ -176,69 +169,43 @@ async def search(
|
||||
]
|
||||
search_query = sanitize_query(search_query)
|
||||
|
||||
# Blocks&Integrations
|
||||
blocks = builder_model.SearchBlocksResponse(
|
||||
blocks=builder_model.BlockResponse(
|
||||
blocks=[],
|
||||
pagination=Pagination.empty(),
|
||||
),
|
||||
total_block_count=0,
|
||||
total_integration_count=0,
|
||||
# Get all possible results
|
||||
cached_results = await builder_db.get_sorted_search_results(
|
||||
user_id=user_id,
|
||||
search_query=search_query,
|
||||
filters=filter,
|
||||
by_creator=by_creator,
|
||||
)
|
||||
if "blocks" in filter or "integrations" in filter:
|
||||
blocks = builder_db.search_blocks(
|
||||
include_blocks="blocks" in filter,
|
||||
include_integrations="integrations" in filter,
|
||||
query=search_query or "",
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
|
||||
# Library Agents
|
||||
my_agents = library_model.LibraryAgentResponse(
|
||||
agents=[],
|
||||
pagination=Pagination.empty(),
|
||||
# Paginate results
|
||||
total_combined_items = len(cached_results.items)
|
||||
pagination = Pagination(
|
||||
total_items=total_combined_items,
|
||||
total_pages=(total_combined_items + page_size - 1) // page_size,
|
||||
current_page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
if "my_agents" in filter:
|
||||
my_agents = await library_db.list_library_agents(
|
||||
user_id=user_id,
|
||||
search_term=search_query,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
|
||||
# Marketplace Agents
|
||||
marketplace_agents = store_model.StoreAgentsResponse(
|
||||
agents=[],
|
||||
pagination=Pagination.empty(),
|
||||
)
|
||||
if "marketplace_agents" in filter:
|
||||
marketplace_agents = await store_db.get_store_agents(
|
||||
creators=by_creator,
|
||||
start_idx = (page - 1) * page_size
|
||||
end_idx = start_idx + page_size
|
||||
paginated_items = cached_results.items[start_idx:end_idx]
|
||||
|
||||
# Update the search entry by id
|
||||
search_id = await builder_db.update_search(
|
||||
user_id,
|
||||
builder_model.SearchEntry(
|
||||
search_query=search_query,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
|
||||
more_pages = False
|
||||
if (
|
||||
blocks.blocks.pagination.current_page < blocks.blocks.pagination.total_pages
|
||||
or my_agents.pagination.current_page < my_agents.pagination.total_pages
|
||||
or marketplace_agents.pagination.current_page
|
||||
< marketplace_agents.pagination.total_pages
|
||||
):
|
||||
more_pages = True
|
||||
filter=filter,
|
||||
by_creator=by_creator,
|
||||
search_id=search_id,
|
||||
),
|
||||
)
|
||||
|
||||
return builder_model.SearchResponse(
|
||||
items=blocks.blocks.blocks + my_agents.agents + marketplace_agents.agents,
|
||||
total_items={
|
||||
"blocks": blocks.total_block_count,
|
||||
"integrations": blocks.total_integration_count,
|
||||
"marketplace_agents": marketplace_agents.pagination.total_items,
|
||||
"my_agents": my_agents.pagination.total_items,
|
||||
},
|
||||
page=page,
|
||||
more_pages=more_pages,
|
||||
items=paginated_items,
|
||||
search_id=search_id,
|
||||
total_items=cached_results.total_items,
|
||||
pagination=pagination,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
router = APIRouter(
|
||||
tags=["executions", "review", "private"],
|
||||
tags=["v2", "executions", "review"],
|
||||
dependencies=[Security(autogpt_auth_lib.requires_user)],
|
||||
)
|
||||
|
||||
@@ -134,18 +134,14 @@ async def process_review_action(
|
||||
# Build review decisions map
|
||||
review_decisions = {}
|
||||
for review in request.reviews:
|
||||
if review.approved:
|
||||
review_decisions[review.node_exec_id] = (
|
||||
ReviewStatus.APPROVED,
|
||||
review.reviewed_data,
|
||||
review.message,
|
||||
)
|
||||
else:
|
||||
review_decisions[review.node_exec_id] = (
|
||||
ReviewStatus.REJECTED,
|
||||
None,
|
||||
review.message,
|
||||
)
|
||||
review_status = (
|
||||
ReviewStatus.APPROVED if review.approved else ReviewStatus.REJECTED
|
||||
)
|
||||
review_decisions[review.node_exec_id] = (
|
||||
review_status,
|
||||
review.reviewed_data,
|
||||
review.message,
|
||||
)
|
||||
|
||||
# Process all reviews
|
||||
updated_reviews = await process_all_reviews_for_execution(
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
import logging
|
||||
from typing import Optional
|
||||
from typing import Literal, Optional
|
||||
|
||||
import autogpt_libs.auth as autogpt_auth_lib
|
||||
from fastapi import APIRouter, Body, HTTPException, Query, Security, status
|
||||
from fastapi.responses import Response
|
||||
from prisma.enums import OnboardingStep
|
||||
|
||||
import backend.server.v2.library.db as library_db
|
||||
import backend.server.v2.library.model as library_model
|
||||
import backend.server.v2.store.exceptions as store_exceptions
|
||||
from backend.data.onboarding import complete_onboarding_step
|
||||
from backend.util.exceptions import DatabaseError, NotFoundError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -200,6 +202,9 @@ async def get_library_agent_by_store_listing_version_id(
|
||||
)
|
||||
async def add_marketplace_agent_to_library(
|
||||
store_listing_version_id: str = Body(embed=True),
|
||||
source: Literal["onboarding", "marketplace"] = Body(
|
||||
default="marketplace", embed=True
|
||||
),
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
) -> library_model.LibraryAgent:
|
||||
"""
|
||||
@@ -217,10 +222,15 @@ async def add_marketplace_agent_to_library(
|
||||
HTTPException(500): If a server/database error occurs.
|
||||
"""
|
||||
try:
|
||||
return await library_db.add_store_agent_to_library(
|
||||
agent = await library_db.add_store_agent_to_library(
|
||||
store_listing_version_id=store_listing_version_id,
|
||||
user_id=user_id,
|
||||
)
|
||||
if source != "onboarding":
|
||||
await complete_onboarding_step(
|
||||
user_id, OnboardingStep.MARKETPLACE_ADD_AGENT
|
||||
)
|
||||
return agent
|
||||
|
||||
except store_exceptions.AgentNotFoundError as e:
|
||||
logger.warning(
|
||||
|
||||
@@ -10,6 +10,7 @@ from backend.data.execution import GraphExecutionMeta
|
||||
from backend.data.graph import get_graph
|
||||
from backend.data.integrations import get_webhook
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.data.onboarding import increment_runs
|
||||
from backend.executor.utils import add_graph_execution, make_node_credentials_input_map
|
||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.integrations.webhooks import get_webhook_manager
|
||||
@@ -401,6 +402,8 @@ async def execute_preset(
|
||||
merged_node_input = preset.inputs | inputs
|
||||
merged_credential_inputs = preset.credentials | credential_inputs
|
||||
|
||||
await increment_runs(user_id)
|
||||
|
||||
return await add_graph_execution(
|
||||
user_id=user_id,
|
||||
graph_id=preset.graph_id,
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import datetime
|
||||
import json
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
import fastapi.testclient
|
||||
import pytest
|
||||
@@ -225,6 +226,10 @@ def test_add_agent_to_library_success(
|
||||
"backend.server.v2.library.db.add_store_agent_to_library"
|
||||
)
|
||||
mock_db_call.return_value = mock_library_agent
|
||||
mock_complete_onboarding = mocker.patch(
|
||||
"backend.server.v2.library.routes.agents.complete_onboarding_step",
|
||||
new_callable=AsyncMock,
|
||||
)
|
||||
|
||||
response = client.post(
|
||||
"/agents", json={"store_listing_version_id": "test-version-id"}
|
||||
@@ -239,6 +244,7 @@ def test_add_agent_to_library_success(
|
||||
mock_db_call.assert_called_once_with(
|
||||
store_listing_version_id="test-version-id", user_id=test_user_id
|
||||
)
|
||||
mock_complete_onboarding.assert_awaited_once()
|
||||
|
||||
|
||||
def test_add_agent_to_library_error(mocker: pytest_mock.MockFixture, test_user_id: str):
|
||||
|
||||
@@ -327,6 +327,7 @@ async def get_store_agent_details(
|
||||
slug=agent.slug,
|
||||
agent_name=agent.agent_name,
|
||||
agent_video=agent.agent_video or "",
|
||||
agent_output_demo=agent.agent_output_demo or "",
|
||||
agent_image=agent.agent_image,
|
||||
creator=agent.creator_username or "",
|
||||
creator_avatar=agent.creator_avatar or "",
|
||||
@@ -397,6 +398,7 @@ async def get_store_agent_by_version_id(
|
||||
slug=agent.slug,
|
||||
agent_name=agent.agent_name,
|
||||
agent_video=agent.agent_video or "",
|
||||
agent_output_demo=agent.agent_output_demo or "",
|
||||
agent_image=agent.agent_image,
|
||||
creator=agent.creator_username or "",
|
||||
creator_avatar=agent.creator_avatar or "",
|
||||
@@ -683,6 +685,7 @@ async def create_store_submission(
|
||||
slug: str,
|
||||
name: str,
|
||||
video_url: str | None = None,
|
||||
agent_output_demo_url: str | None = None,
|
||||
image_urls: list[str] = [],
|
||||
description: str = "",
|
||||
instructions: str | None = None,
|
||||
@@ -777,6 +780,7 @@ async def create_store_submission(
|
||||
agentGraphVersion=agent_version,
|
||||
name=name,
|
||||
videoUrl=video_url,
|
||||
agentOutputDemoUrl=agent_output_demo_url,
|
||||
imageUrls=image_urls,
|
||||
description=description,
|
||||
instructions=instructions,
|
||||
@@ -849,6 +853,7 @@ async def edit_store_submission(
|
||||
store_listing_version_id: str,
|
||||
name: str,
|
||||
video_url: str | None = None,
|
||||
agent_output_demo_url: str | None = None,
|
||||
image_urls: list[str] = [],
|
||||
description: str = "",
|
||||
sub_heading: str = "",
|
||||
@@ -930,6 +935,7 @@ async def edit_store_submission(
|
||||
store_listing_id=current_version.storeListingId,
|
||||
name=name,
|
||||
video_url=video_url,
|
||||
agent_output_demo_url=agent_output_demo_url,
|
||||
image_urls=image_urls,
|
||||
description=description,
|
||||
sub_heading=sub_heading,
|
||||
@@ -947,6 +953,7 @@ async def edit_store_submission(
|
||||
data=prisma.types.StoreListingVersionUpdateInput(
|
||||
name=name,
|
||||
videoUrl=video_url,
|
||||
agentOutputDemoUrl=agent_output_demo_url,
|
||||
imageUrls=image_urls,
|
||||
description=description,
|
||||
categories=categories,
|
||||
@@ -1008,6 +1015,7 @@ async def create_store_version(
|
||||
store_listing_id: str,
|
||||
name: str,
|
||||
video_url: str | None = None,
|
||||
agent_output_demo_url: str | None = None,
|
||||
image_urls: list[str] = [],
|
||||
description: str = "",
|
||||
instructions: str | None = None,
|
||||
@@ -1077,6 +1085,7 @@ async def create_store_version(
|
||||
agentGraphVersion=agent_version,
|
||||
name=name,
|
||||
videoUrl=video_url,
|
||||
agentOutputDemoUrl=agent_output_demo_url,
|
||||
imageUrls=image_urls,
|
||||
description=description,
|
||||
instructions=instructions,
|
||||
|
||||
@@ -44,6 +44,7 @@ class StoreAgentDetails(pydantic.BaseModel):
|
||||
slug: str
|
||||
agent_name: str
|
||||
agent_video: str
|
||||
agent_output_demo: str
|
||||
agent_image: list[str]
|
||||
creator: str
|
||||
creator_avatar: str
|
||||
@@ -121,6 +122,7 @@ class StoreSubmission(pydantic.BaseModel):
|
||||
|
||||
# Additional fields for editing
|
||||
video_url: str | None = None
|
||||
agent_output_demo_url: str | None = None
|
||||
categories: list[str] = []
|
||||
|
||||
|
||||
@@ -157,6 +159,7 @@ class StoreSubmissionRequest(pydantic.BaseModel):
|
||||
name: str
|
||||
sub_heading: str
|
||||
video_url: str | None = None
|
||||
agent_output_demo_url: str | None = None
|
||||
image_urls: list[str] = []
|
||||
description: str = ""
|
||||
instructions: str | None = None
|
||||
@@ -169,6 +172,7 @@ class StoreSubmissionEditRequest(pydantic.BaseModel):
|
||||
name: str
|
||||
sub_heading: str
|
||||
video_url: str | None = None
|
||||
agent_output_demo_url: str | None = None
|
||||
image_urls: list[str] = []
|
||||
description: str = ""
|
||||
instructions: str | None = None
|
||||
|
||||
@@ -62,6 +62,7 @@ def test_store_agent_details():
|
||||
slug="test-agent",
|
||||
agent_name="Test Agent",
|
||||
agent_video="video.mp4",
|
||||
agent_output_demo="demo.mp4",
|
||||
agent_image=["image1.jpg", "image2.jpg"],
|
||||
creator="creator1",
|
||||
creator_avatar="avatar.jpg",
|
||||
|
||||
@@ -438,6 +438,7 @@ async def create_submission(
|
||||
slug=submission_request.slug,
|
||||
name=submission_request.name,
|
||||
video_url=submission_request.video_url,
|
||||
agent_output_demo_url=submission_request.agent_output_demo_url,
|
||||
image_urls=submission_request.image_urls,
|
||||
description=submission_request.description,
|
||||
instructions=submission_request.instructions,
|
||||
@@ -481,6 +482,7 @@ async def edit_submission(
|
||||
store_listing_version_id=store_listing_version_id,
|
||||
name=submission_request.name,
|
||||
video_url=submission_request.video_url,
|
||||
agent_output_demo_url=submission_request.agent_output_demo_url,
|
||||
image_urls=submission_request.image_urls,
|
||||
description=submission_request.description,
|
||||
instructions=submission_request.instructions,
|
||||
|
||||
@@ -378,6 +378,7 @@ def test_get_agent_details(
|
||||
slug="test-agent",
|
||||
agent_name="Test Agent",
|
||||
agent_video="video.mp4",
|
||||
agent_output_demo="demo.mp4",
|
||||
agent_image=["image1.jpg", "image2.jpg"],
|
||||
creator="creator1",
|
||||
creator_avatar="avatar1.jpg",
|
||||
|
||||
@@ -5,6 +5,13 @@ from tiktoken import encoding_for_model
|
||||
|
||||
from backend.util import json
|
||||
|
||||
# ---------------------------------------------------------------------------#
|
||||
# CONSTANTS #
|
||||
# ---------------------------------------------------------------------------#
|
||||
|
||||
# Message prefixes for important system messages that should be protected during compression
|
||||
MAIN_OBJECTIVE_PREFIX = "[Main Objective Prompt]: "
|
||||
|
||||
# ---------------------------------------------------------------------------#
|
||||
# INTERNAL UTILITIES #
|
||||
# ---------------------------------------------------------------------------#
|
||||
@@ -63,6 +70,55 @@ def _msg_tokens(msg: dict, enc) -> int:
|
||||
return WRAPPER + content_tokens + tool_call_tokens
|
||||
|
||||
|
||||
def _is_tool_message(msg: dict) -> bool:
|
||||
"""Check if a message contains tool calls or results that should be protected."""
|
||||
content = msg.get("content")
|
||||
|
||||
# Check for Anthropic-style tool messages
|
||||
if isinstance(content, list) and any(
|
||||
isinstance(item, dict) and item.get("type") in ("tool_use", "tool_result")
|
||||
for item in content
|
||||
):
|
||||
return True
|
||||
|
||||
# Check for OpenAI-style tool calls in the message
|
||||
if "tool_calls" in msg or msg.get("role") == "tool":
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _is_objective_message(msg: dict) -> bool:
|
||||
"""Check if a message contains objective/system prompts that should be absolutely protected."""
|
||||
content = msg.get("content", "")
|
||||
if isinstance(content, str):
|
||||
# Protect any message with the main objective prefix
|
||||
return content.startswith(MAIN_OBJECTIVE_PREFIX)
|
||||
return False
|
||||
|
||||
|
||||
def _truncate_tool_message_content(msg: dict, enc, max_tokens: int) -> None:
|
||||
"""
|
||||
Carefully truncate tool message content while preserving tool structure.
|
||||
Only truncates tool_result content, leaves tool_use intact.
|
||||
"""
|
||||
content = msg.get("content")
|
||||
if not isinstance(content, list):
|
||||
return
|
||||
|
||||
for item in content:
|
||||
# Only process tool_result items, leave tool_use blocks completely intact
|
||||
if not (isinstance(item, dict) and item.get("type") == "tool_result"):
|
||||
continue
|
||||
|
||||
result_content = item.get("content", "")
|
||||
if (
|
||||
isinstance(result_content, str)
|
||||
and _tok_len(result_content, enc) > max_tokens
|
||||
):
|
||||
item["content"] = _truncate_middle_tokens(result_content, enc, max_tokens)
|
||||
|
||||
|
||||
def _truncate_middle_tokens(text: str, enc, max_tok: int) -> str:
|
||||
"""
|
||||
Return *text* shortened to ≈max_tok tokens by keeping the head & tail
|
||||
@@ -140,13 +196,21 @@ def compress_prompt(
|
||||
return sum(_msg_tokens(m, enc) for m in msgs)
|
||||
|
||||
original_token_count = total_tokens()
|
||||
|
||||
if original_token_count + reserve <= target_tokens:
|
||||
return msgs
|
||||
|
||||
# ---- STEP 0 : normalise content --------------------------------------
|
||||
# Convert non-string payloads to strings so token counting is coherent.
|
||||
for m in msgs[1:-1]: # keep the first & last intact
|
||||
for i, m in enumerate(msgs):
|
||||
if not isinstance(m.get("content"), str) and m.get("content") is not None:
|
||||
if _is_tool_message(m):
|
||||
continue
|
||||
|
||||
# Keep first and last messages intact (unless they're tool messages)
|
||||
if i == 0 or i == len(msgs) - 1:
|
||||
continue
|
||||
|
||||
# Reasonable 20k-char ceiling prevents pathological blobs
|
||||
content_str = json.dumps(m["content"], separators=(",", ":"))
|
||||
if len(content_str) > 20_000:
|
||||
@@ -157,34 +221,45 @@ def compress_prompt(
|
||||
cap = start_cap
|
||||
while total_tokens() + reserve > target_tokens and cap >= floor_cap:
|
||||
for m in msgs[1:-1]: # keep first & last intact
|
||||
if _tok_len(m.get("content") or "", enc) > cap:
|
||||
m["content"] = _truncate_middle_tokens(m["content"], enc, cap)
|
||||
if _is_tool_message(m):
|
||||
# For tool messages, only truncate tool result content, preserve structure
|
||||
_truncate_tool_message_content(m, enc, cap)
|
||||
continue
|
||||
|
||||
if _is_objective_message(m):
|
||||
# Never truncate objective messages - they contain the core task
|
||||
continue
|
||||
|
||||
content = m.get("content") or ""
|
||||
if _tok_len(content, enc) > cap:
|
||||
m["content"] = _truncate_middle_tokens(content, enc, cap)
|
||||
cap //= 2 # tighten the screw
|
||||
|
||||
# ---- STEP 2 : middle-out deletion -----------------------------------
|
||||
while total_tokens() + reserve > target_tokens and len(msgs) > 2:
|
||||
# Identify all deletable messages (not first/last, not tool messages, not objective messages)
|
||||
deletable_indices = []
|
||||
for i in range(1, len(msgs) - 1): # Skip first and last
|
||||
if not _is_tool_message(msgs[i]) and not _is_objective_message(msgs[i]):
|
||||
deletable_indices.append(i)
|
||||
|
||||
if not deletable_indices:
|
||||
break # nothing more we can drop
|
||||
|
||||
# Delete from center outward - find the index closest to center
|
||||
centre = len(msgs) // 2
|
||||
# Build a symmetrical centre-out index walk: centre, centre+1, centre-1, ...
|
||||
order = [centre] + [
|
||||
i
|
||||
for pair in zip(range(centre + 1, len(msgs) - 1), range(centre - 1, 0, -1))
|
||||
for i in pair
|
||||
]
|
||||
removed = False
|
||||
for i in order:
|
||||
msg = msgs[i]
|
||||
if "tool_calls" in msg or msg.get("role") == "tool":
|
||||
continue # protect tool shells
|
||||
del msgs[i]
|
||||
removed = True
|
||||
break
|
||||
if not removed: # nothing more we can drop
|
||||
break
|
||||
to_delete = min(deletable_indices, key=lambda i: abs(i - centre))
|
||||
del msgs[to_delete]
|
||||
|
||||
# ---- STEP 3 : final safety-net trim on first & last ------------------
|
||||
cap = start_cap
|
||||
while total_tokens() + reserve > target_tokens and cap >= floor_cap:
|
||||
for idx in (0, -1): # first and last
|
||||
if _is_tool_message(msgs[idx]):
|
||||
# For tool messages at first/last position, truncate tool result content only
|
||||
_truncate_tool_message_content(msgs[idx], enc, cap)
|
||||
continue
|
||||
|
||||
text = msgs[idx].get("content") or ""
|
||||
if _tok_len(text, enc) > cap:
|
||||
msgs[idx]["content"] = _truncate_middle_tokens(text, enc, cap)
|
||||
|
||||
@@ -11,7 +11,13 @@ from urllib.parse import quote, urljoin, urlparse
|
||||
import aiohttp
|
||||
import idna
|
||||
from aiohttp import FormData, abc
|
||||
from tenacity import retry, retry_if_result, wait_exponential_jitter
|
||||
from tenacity import (
|
||||
RetryCallState,
|
||||
retry,
|
||||
retry_if_result,
|
||||
stop_after_attempt,
|
||||
wait_exponential_jitter,
|
||||
)
|
||||
|
||||
from backend.util.json import loads
|
||||
|
||||
@@ -285,6 +291,20 @@ class Response:
|
||||
return 200 <= self.status < 300
|
||||
|
||||
|
||||
def _return_last_result(retry_state: RetryCallState) -> "Response":
|
||||
"""
|
||||
Ensure the final attempt's response is returned when retrying stops.
|
||||
"""
|
||||
if retry_state.outcome is None:
|
||||
raise RuntimeError("Retry state is missing an outcome.")
|
||||
|
||||
exception = retry_state.outcome.exception()
|
||||
if exception is not None:
|
||||
raise exception
|
||||
|
||||
return retry_state.outcome.result()
|
||||
|
||||
|
||||
class Requests:
|
||||
"""
|
||||
A wrapper around an aiohttp ClientSession that validates URLs before
|
||||
@@ -299,6 +319,7 @@ class Requests:
|
||||
extra_url_validator: Callable[[URL], URL] | None = None,
|
||||
extra_headers: dict[str, str] | None = None,
|
||||
retry_max_wait: float = 300.0,
|
||||
retry_max_attempts: int | None = None,
|
||||
):
|
||||
self.trusted_origins = []
|
||||
for url in trusted_origins or []:
|
||||
@@ -311,6 +332,9 @@ class Requests:
|
||||
self.extra_url_validator = extra_url_validator
|
||||
self.extra_headers = extra_headers
|
||||
self.retry_max_wait = retry_max_wait
|
||||
if retry_max_attempts is not None and retry_max_attempts < 1:
|
||||
raise ValueError("retry_max_attempts must be None or >= 1")
|
||||
self.retry_max_attempts = retry_max_attempts
|
||||
|
||||
async def request(
|
||||
self,
|
||||
@@ -325,11 +349,17 @@ class Requests:
|
||||
max_redirects: int = 10,
|
||||
**kwargs,
|
||||
) -> Response:
|
||||
@retry(
|
||||
wait=wait_exponential_jitter(max=self.retry_max_wait),
|
||||
retry=retry_if_result(lambda r: r.status in THROTTLE_RETRY_STATUS_CODES),
|
||||
reraise=True,
|
||||
)
|
||||
retry_kwargs: dict[str, Any] = {
|
||||
"wait": wait_exponential_jitter(max=self.retry_max_wait),
|
||||
"retry": retry_if_result(lambda r: r.status in THROTTLE_RETRY_STATUS_CODES),
|
||||
"reraise": True,
|
||||
}
|
||||
|
||||
if self.retry_max_attempts is not None:
|
||||
retry_kwargs["stop"] = stop_after_attempt(self.retry_max_attempts)
|
||||
retry_kwargs["retry_error_callback"] = _return_last_result
|
||||
|
||||
@retry(**retry_kwargs)
|
||||
async def _make_request() -> Response:
|
||||
return await self._request(
|
||||
method=method,
|
||||
|
||||
@@ -185,6 +185,12 @@ class Config(UpdateTrackingModel["Config"], BaseSettings):
|
||||
description="Number of top blocks with most errors to show when no blocks exceed threshold (0 to disable).",
|
||||
)
|
||||
|
||||
# Execution Accuracy Monitoring
|
||||
execution_accuracy_check_interval_hours: int = Field(
|
||||
default=24,
|
||||
description="Interval in hours between execution accuracy alert checks.",
|
||||
)
|
||||
|
||||
model_config = SettingsConfigDict(
|
||||
env_file=".env",
|
||||
extra="allow",
|
||||
|
||||
@@ -144,6 +144,8 @@ async def execute_block_test(block: Block):
|
||||
"execution_context": ExecutionContext(),
|
||||
}
|
||||
input_model = cast(type[BlockSchema], block.input_schema)
|
||||
|
||||
# Handle regular credentials fields
|
||||
credentials_input_fields = input_model.get_credentials_fields()
|
||||
if len(credentials_input_fields) == 1 and isinstance(
|
||||
block.test_credentials, _BaseCredentials
|
||||
@@ -158,6 +160,18 @@ async def execute_block_test(block: Block):
|
||||
if field_name in block.test_credentials:
|
||||
extra_exec_kwargs[field_name] = block.test_credentials[field_name]
|
||||
|
||||
# Handle auto-generated credentials (e.g., from GoogleDriveFileInput)
|
||||
auto_creds_fields = input_model.get_auto_credentials_fields()
|
||||
if auto_creds_fields and block.test_credentials:
|
||||
if isinstance(block.test_credentials, _BaseCredentials):
|
||||
# Single credentials object - use for all auto_credentials kwargs
|
||||
for kwarg_name in auto_creds_fields.keys():
|
||||
extra_exec_kwargs[kwarg_name] = block.test_credentials
|
||||
elif isinstance(block.test_credentials, dict):
|
||||
for kwarg_name in auto_creds_fields.keys():
|
||||
if kwarg_name in block.test_credentials:
|
||||
extra_exec_kwargs[kwarg_name] = block.test_credentials[kwarg_name]
|
||||
|
||||
for input_data in block.test_input:
|
||||
log.info(f"{prefix} in: {input_data}")
|
||||
|
||||
|
||||
@@ -0,0 +1,64 @@
|
||||
-- AlterTable
|
||||
ALTER TABLE "StoreListingVersion" ADD COLUMN "agentOutputDemoUrl" TEXT;
|
||||
|
||||
-- Drop and recreate the StoreAgent view with agentOutputDemoUrl field
|
||||
DROP VIEW IF EXISTS "StoreAgent";
|
||||
|
||||
CREATE OR REPLACE VIEW "StoreAgent" AS
|
||||
WITH latest_versions AS (
|
||||
SELECT
|
||||
"storeListingId",
|
||||
MAX(version) AS max_version
|
||||
FROM "StoreListingVersion"
|
||||
WHERE "submissionStatus" = 'APPROVED'
|
||||
GROUP BY "storeListingId"
|
||||
),
|
||||
agent_versions AS (
|
||||
SELECT
|
||||
"storeListingId",
|
||||
array_agg(DISTINCT version::text ORDER BY version::text) AS versions
|
||||
FROM "StoreListingVersion"
|
||||
WHERE "submissionStatus" = 'APPROVED'
|
||||
GROUP BY "storeListingId"
|
||||
)
|
||||
SELECT
|
||||
sl.id AS listing_id,
|
||||
slv.id AS "storeListingVersionId",
|
||||
slv."createdAt" AS updated_at,
|
||||
sl.slug,
|
||||
COALESCE(slv.name, '') AS agent_name,
|
||||
slv."videoUrl" AS agent_video,
|
||||
slv."agentOutputDemoUrl" AS agent_output_demo,
|
||||
COALESCE(slv."imageUrls", ARRAY[]::text[]) AS agent_image,
|
||||
slv."isFeatured" AS featured,
|
||||
p.username AS creator_username, -- Allow NULL for malformed sub-agents
|
||||
p."avatarUrl" AS creator_avatar, -- Allow NULL for malformed sub-agents
|
||||
slv."subHeading" AS sub_heading,
|
||||
slv.description,
|
||||
slv.categories,
|
||||
slv.search,
|
||||
COALESCE(ar.run_count, 0::bigint) AS runs,
|
||||
COALESCE(rs.avg_rating, 0.0)::double precision AS rating,
|
||||
COALESCE(av.versions, ARRAY[slv.version::text]) AS versions,
|
||||
slv."isAvailable" AS is_available,
|
||||
COALESCE(sl."useForOnboarding", false) AS "useForOnboarding"
|
||||
FROM "StoreListing" sl
|
||||
JOIN latest_versions lv
|
||||
ON sl.id = lv."storeListingId"
|
||||
JOIN "StoreListingVersion" slv
|
||||
ON slv."storeListingId" = lv."storeListingId"
|
||||
AND slv.version = lv.max_version
|
||||
AND slv."submissionStatus" = 'APPROVED'
|
||||
JOIN "AgentGraph" a
|
||||
ON slv."agentGraphId" = a.id
|
||||
AND slv."agentGraphVersion" = a.version
|
||||
LEFT JOIN "Profile" p
|
||||
ON sl."owningUserId" = p."userId"
|
||||
LEFT JOIN "mv_review_stats" rs
|
||||
ON sl.id = rs."storeListingId"
|
||||
LEFT JOIN "mv_agent_run_counts" ar
|
||||
ON a.id = ar."agentGraphId"
|
||||
LEFT JOIN agent_versions av
|
||||
ON sl.id = av."storeListingId"
|
||||
WHERE sl."isDeleted" = false
|
||||
AND sl."hasApprovedVersion" = true;
|
||||
@@ -0,0 +1,15 @@
|
||||
-- Create BuilderSearchHistory table
|
||||
CREATE TABLE "BuilderSearchHistory" (
|
||||
"id" TEXT NOT NULL,
|
||||
"userId" TEXT NOT NULL,
|
||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"searchQuery" TEXT NOT NULL,
|
||||
"filter" TEXT[] DEFAULT ARRAY[]::TEXT[],
|
||||
"byCreator" TEXT[] DEFAULT ARRAY[]::TEXT[],
|
||||
|
||||
CONSTRAINT "BuilderSearchHistory_pkey" PRIMARY KEY ("id")
|
||||
);
|
||||
|
||||
-- Define User foreign relation
|
||||
ALTER TABLE "BuilderSearchHistory" ADD CONSTRAINT "BuilderSearchHistory_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||
@@ -114,6 +114,7 @@ cli = "backend.cli:main"
|
||||
format = "linter:format"
|
||||
lint = "linter:lint"
|
||||
test = "run_tests:test"
|
||||
load-store-agents = "test.load_store_agents:run"
|
||||
|
||||
[tool.isort]
|
||||
profile = "black"
|
||||
|
||||
@@ -53,6 +53,7 @@ model User {
|
||||
|
||||
Profile Profile[]
|
||||
UserOnboarding UserOnboarding?
|
||||
BuilderSearchHistory BuilderSearchHistory[]
|
||||
StoreListings StoreListing[]
|
||||
StoreListingReviews StoreListingReview[]
|
||||
StoreVersionsReviewed StoreListingVersion[]
|
||||
@@ -114,6 +115,19 @@ model UserOnboarding {
|
||||
User User @relation(fields: [userId], references: [id], onDelete: Cascade)
|
||||
}
|
||||
|
||||
model BuilderSearchHistory {
|
||||
id String @id @default(uuid())
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @default(now()) @updatedAt
|
||||
|
||||
searchQuery String
|
||||
filter String[] @default([])
|
||||
byCreator String[] @default([])
|
||||
|
||||
userId String
|
||||
User User @relation(fields: [userId], references: [id], onDelete: Cascade)
|
||||
}
|
||||
|
||||
// This model describes the Agent Graph/Flow (Multi Agent System).
|
||||
model AgentGraph {
|
||||
id String @default(uuid())
|
||||
@@ -701,10 +715,11 @@ view StoreAgent {
|
||||
storeListingVersionId String
|
||||
updated_at DateTime
|
||||
|
||||
slug String
|
||||
agent_name String
|
||||
agent_video String?
|
||||
agent_image String[]
|
||||
slug String
|
||||
agent_name String
|
||||
agent_video String?
|
||||
agent_output_demo String?
|
||||
agent_image String[]
|
||||
|
||||
featured Boolean @default(false)
|
||||
creator_username String?
|
||||
@@ -833,13 +848,14 @@ model StoreListingVersion {
|
||||
AgentGraph AgentGraph @relation(fields: [agentGraphId, agentGraphVersion], references: [id, version])
|
||||
|
||||
// Content fields
|
||||
name String
|
||||
subHeading String
|
||||
videoUrl String?
|
||||
imageUrls String[]
|
||||
description String
|
||||
instructions String?
|
||||
categories String[]
|
||||
name String
|
||||
subHeading String
|
||||
videoUrl String?
|
||||
agentOutputDemoUrl String?
|
||||
imageUrls String[]
|
||||
description String
|
||||
instructions String?
|
||||
categories String[]
|
||||
|
||||
isFeatured Boolean @default(false)
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
"slug": "test-agent",
|
||||
"agent_name": "Test Agent",
|
||||
"agent_video": "video.mp4",
|
||||
"agent_output_demo": "demo.mp4",
|
||||
"agent_image": [
|
||||
"image1.jpg",
|
||||
"image2.jpg"
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
"reviewed_at": null,
|
||||
"changes_summary": null,
|
||||
"video_url": "test.mp4",
|
||||
"agent_output_demo_url": null,
|
||||
"categories": [
|
||||
"test-category"
|
||||
]
|
||||
|
||||
455
autogpt_platform/backend/test/load_store_agents.py
Normal file
455
autogpt_platform/backend/test/load_store_agents.py
Normal file
@@ -0,0 +1,455 @@
|
||||
"""
|
||||
Load Store Agents Script
|
||||
|
||||
This script loads the exported store agents from the agents/ folder into the test database.
|
||||
It creates:
|
||||
- A user and profile for the 'autogpt' creator
|
||||
- AgentGraph records from JSON files
|
||||
- StoreListing and StoreListingVersion records from CSV metadata
|
||||
- Approves agents that have is_available=true in the CSV
|
||||
|
||||
Usage:
|
||||
cd backend
|
||||
poetry run load-store-agents
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import csv
|
||||
import json
|
||||
import re
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
import prisma.enums
|
||||
from prisma import Json, Prisma
|
||||
from prisma.types import (
|
||||
AgentBlockCreateInput,
|
||||
AgentGraphCreateInput,
|
||||
AgentNodeCreateInput,
|
||||
AgentNodeLinkCreateInput,
|
||||
ProfileCreateInput,
|
||||
StoreListingCreateInput,
|
||||
StoreListingVersionCreateInput,
|
||||
UserCreateInput,
|
||||
)
|
||||
|
||||
# Path to agents folder (relative to backend directory)
|
||||
AGENTS_DIR = Path(__file__).parent.parent / "agents"
|
||||
CSV_FILE = AGENTS_DIR / "StoreAgent_rows.csv"
|
||||
|
||||
# User constants for the autogpt creator (test data, not production)
|
||||
# Fixed uuid4 for idempotency - same user is reused across script runs
|
||||
AUTOGPT_USER_ID = "79d96c73-e6f5-4656-a83a-185b41ee0d06"
|
||||
AUTOGPT_EMAIL = "autogpt-test@agpt.co"
|
||||
AUTOGPT_USERNAME = "autogpt"
|
||||
|
||||
|
||||
async def initialize_blocks(db: Prisma) -> set[str]:
|
||||
"""Initialize agent blocks in the database from the registered blocks.
|
||||
|
||||
Returns a set of block IDs that exist in the database.
|
||||
"""
|
||||
from backend.data.block import get_blocks
|
||||
|
||||
print(" Initializing agent blocks...")
|
||||
blocks = get_blocks()
|
||||
created_count = 0
|
||||
block_ids = set()
|
||||
|
||||
for block_cls in blocks.values():
|
||||
block = block_cls()
|
||||
block_ids.add(block.id)
|
||||
existing_block = await db.agentblock.find_first(
|
||||
where={"OR": [{"id": block.id}, {"name": block.name}]}
|
||||
)
|
||||
if not existing_block:
|
||||
await db.agentblock.create(
|
||||
data=AgentBlockCreateInput(
|
||||
id=block.id,
|
||||
name=block.name,
|
||||
inputSchema=json.dumps(block.input_schema.jsonschema()),
|
||||
outputSchema=json.dumps(block.output_schema.jsonschema()),
|
||||
)
|
||||
)
|
||||
created_count += 1
|
||||
elif block.id != existing_block.id or block.name != existing_block.name:
|
||||
await db.agentblock.update(
|
||||
where={"id": existing_block.id},
|
||||
data={
|
||||
"id": block.id,
|
||||
"name": block.name,
|
||||
"inputSchema": json.dumps(block.input_schema.jsonschema()),
|
||||
"outputSchema": json.dumps(block.output_schema.jsonschema()),
|
||||
},
|
||||
)
|
||||
|
||||
print(f" Initialized {len(blocks)} blocks ({created_count} new)")
|
||||
return block_ids
|
||||
|
||||
|
||||
async def ensure_block_exists(
|
||||
db: Prisma, block_id: str, known_blocks: set[str]
|
||||
) -> bool:
|
||||
"""Ensure a block exists in the database, create a placeholder if needed.
|
||||
|
||||
Returns True if the block exists (or was created), False otherwise.
|
||||
"""
|
||||
if block_id in known_blocks:
|
||||
return True
|
||||
|
||||
# Check if it already exists in the database
|
||||
existing = await db.agentblock.find_unique(where={"id": block_id})
|
||||
if existing:
|
||||
known_blocks.add(block_id)
|
||||
return True
|
||||
|
||||
# Create a placeholder block
|
||||
print(f" Creating placeholder block: {block_id}")
|
||||
try:
|
||||
await db.agentblock.create(
|
||||
data=AgentBlockCreateInput(
|
||||
id=block_id,
|
||||
name=f"Placeholder_{block_id[:8]}",
|
||||
inputSchema="{}",
|
||||
outputSchema="{}",
|
||||
)
|
||||
)
|
||||
known_blocks.add(block_id)
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f" Warning: Could not create placeholder block {block_id}: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def parse_image_urls(image_str: str) -> list[str]:
|
||||
"""Parse the image URLs from CSV format like ["url1","url2"]."""
|
||||
if not image_str or image_str == "[]":
|
||||
return []
|
||||
try:
|
||||
return json.loads(image_str)
|
||||
except json.JSONDecodeError:
|
||||
return []
|
||||
|
||||
|
||||
def parse_categories(categories_str: str) -> list[str]:
|
||||
"""Parse categories from CSV format like ["cat1","cat2"]."""
|
||||
if not categories_str or categories_str == "[]":
|
||||
return []
|
||||
try:
|
||||
return json.loads(categories_str)
|
||||
except json.JSONDecodeError:
|
||||
return []
|
||||
|
||||
|
||||
def sanitize_slug(slug: str) -> str:
|
||||
"""Ensure slug only contains valid characters."""
|
||||
return re.sub(r"[^a-z0-9-]", "", slug.lower())
|
||||
|
||||
|
||||
async def create_user_and_profile(db: Prisma) -> None:
|
||||
"""Create the autogpt user and profile if they don't exist."""
|
||||
# Check if user exists
|
||||
existing_user = await db.user.find_unique(where={"id": AUTOGPT_USER_ID})
|
||||
if existing_user:
|
||||
print(f"User {AUTOGPT_USER_ID} already exists, skipping user creation")
|
||||
else:
|
||||
print(f"Creating user {AUTOGPT_USER_ID}")
|
||||
await db.user.create(
|
||||
data=UserCreateInput(
|
||||
id=AUTOGPT_USER_ID,
|
||||
email=AUTOGPT_EMAIL,
|
||||
name="AutoGPT",
|
||||
metadata=Json({}),
|
||||
integrations="",
|
||||
)
|
||||
)
|
||||
|
||||
# Check if profile exists
|
||||
existing_profile = await db.profile.find_first(where={"userId": AUTOGPT_USER_ID})
|
||||
if existing_profile:
|
||||
print(
|
||||
f"Profile for user {AUTOGPT_USER_ID} already exists, skipping profile creation"
|
||||
)
|
||||
else:
|
||||
print(f"Creating profile for user {AUTOGPT_USER_ID}")
|
||||
await db.profile.create(
|
||||
data=ProfileCreateInput(
|
||||
userId=AUTOGPT_USER_ID,
|
||||
name="AutoGPT",
|
||||
username=AUTOGPT_USERNAME,
|
||||
description="Official AutoGPT agents and templates",
|
||||
links=["https://agpt.co"],
|
||||
avatarUrl="https://storage.googleapis.com/agpt-prod-website-artifacts/users/b3e41ea4-2f4c-4964-927c-fe682d857bad/images/4b5781a6-49e1-433c-9a75-65af1be5c02d.png",
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
async def load_csv_metadata() -> dict[str, dict]:
|
||||
"""Load CSV metadata and return a dict keyed by storeListingVersionId."""
|
||||
metadata = {}
|
||||
with open(CSV_FILE, "r", encoding="utf-8") as f:
|
||||
reader = csv.DictReader(f)
|
||||
for row in reader:
|
||||
version_id = row["storeListingVersionId"]
|
||||
metadata[version_id] = {
|
||||
"listing_id": row["listing_id"],
|
||||
"store_listing_version_id": version_id,
|
||||
"slug": sanitize_slug(row["slug"]),
|
||||
"agent_name": row["agent_name"],
|
||||
"agent_video": row["agent_video"] if row["agent_video"] else None,
|
||||
"agent_image": parse_image_urls(row["agent_image"]),
|
||||
"featured": row["featured"].lower() == "true",
|
||||
"sub_heading": row["sub_heading"],
|
||||
"description": row["description"],
|
||||
"categories": parse_categories(row["categories"]),
|
||||
"use_for_onboarding": row["useForOnboarding"].lower() == "true",
|
||||
"is_available": row["is_available"].lower() == "true",
|
||||
}
|
||||
return metadata
|
||||
|
||||
|
||||
async def load_agent_json(json_path: Path) -> dict:
|
||||
"""Load and parse an agent JSON file."""
|
||||
with open(json_path, "r", encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
async def create_agent_graph(
|
||||
db: Prisma, agent_data: dict, known_blocks: set[str]
|
||||
) -> tuple[str, int]:
|
||||
"""Create an AgentGraph and its nodes/links from JSON data."""
|
||||
graph_id = agent_data["id"]
|
||||
version = agent_data.get("version", 1)
|
||||
|
||||
# Check if graph already exists
|
||||
existing_graph = await db.agentgraph.find_unique(
|
||||
where={"graphVersionId": {"id": graph_id, "version": version}}
|
||||
)
|
||||
if existing_graph:
|
||||
print(f" Graph {graph_id} v{version} already exists, skipping")
|
||||
return graph_id, version
|
||||
|
||||
print(
|
||||
f" Creating graph {graph_id} v{version}: {agent_data.get('name', 'Unnamed')}"
|
||||
)
|
||||
|
||||
# Create the main graph
|
||||
await db.agentgraph.create(
|
||||
data=AgentGraphCreateInput(
|
||||
id=graph_id,
|
||||
version=version,
|
||||
name=agent_data.get("name"),
|
||||
description=agent_data.get("description"),
|
||||
instructions=agent_data.get("instructions"),
|
||||
recommendedScheduleCron=agent_data.get("recommended_schedule_cron"),
|
||||
isActive=agent_data.get("is_active", True),
|
||||
userId=AUTOGPT_USER_ID,
|
||||
forkedFromId=agent_data.get("forked_from_id"),
|
||||
forkedFromVersion=agent_data.get("forked_from_version"),
|
||||
)
|
||||
)
|
||||
|
||||
# Create nodes
|
||||
nodes = agent_data.get("nodes", [])
|
||||
for node in nodes:
|
||||
block_id = node["block_id"]
|
||||
# Ensure the block exists (create placeholder if needed)
|
||||
block_exists = await ensure_block_exists(db, block_id, known_blocks)
|
||||
if not block_exists:
|
||||
print(
|
||||
f" Skipping node {node['id']} - block {block_id} could not be created"
|
||||
)
|
||||
continue
|
||||
|
||||
await db.agentnode.create(
|
||||
data=AgentNodeCreateInput(
|
||||
id=node["id"],
|
||||
agentBlockId=block_id,
|
||||
agentGraphId=graph_id,
|
||||
agentGraphVersion=version,
|
||||
constantInput=Json(node.get("input_default", {})),
|
||||
metadata=Json(node.get("metadata", {})),
|
||||
)
|
||||
)
|
||||
|
||||
# Create links
|
||||
links = agent_data.get("links", [])
|
||||
for link in links:
|
||||
await db.agentnodelink.create(
|
||||
data=AgentNodeLinkCreateInput(
|
||||
id=link["id"],
|
||||
agentNodeSourceId=link["source_id"],
|
||||
agentNodeSinkId=link["sink_id"],
|
||||
sourceName=link["source_name"],
|
||||
sinkName=link["sink_name"],
|
||||
isStatic=link.get("is_static", False),
|
||||
)
|
||||
)
|
||||
|
||||
# Handle sub_graphs recursively
|
||||
sub_graphs = agent_data.get("sub_graphs", [])
|
||||
for sub_graph in sub_graphs:
|
||||
await create_agent_graph(db, sub_graph, known_blocks)
|
||||
|
||||
return graph_id, version
|
||||
|
||||
|
||||
async def create_store_listing(
|
||||
db: Prisma,
|
||||
graph_id: str,
|
||||
graph_version: int,
|
||||
metadata: dict,
|
||||
) -> None:
|
||||
"""Create StoreListing and StoreListingVersion for an agent."""
|
||||
listing_id = metadata["listing_id"]
|
||||
version_id = metadata["store_listing_version_id"]
|
||||
|
||||
# Check if listing already exists
|
||||
existing_listing = await db.storelisting.find_unique(where={"id": listing_id})
|
||||
if existing_listing:
|
||||
print(f" Store listing {listing_id} already exists, skipping")
|
||||
return
|
||||
|
||||
print(f" Creating store listing: {metadata['agent_name']}")
|
||||
|
||||
# Determine if this should be approved
|
||||
is_approved = metadata["is_available"]
|
||||
submission_status = (
|
||||
prisma.enums.SubmissionStatus.APPROVED
|
||||
if is_approved
|
||||
else prisma.enums.SubmissionStatus.PENDING
|
||||
)
|
||||
|
||||
# Create the store listing first (without activeVersionId - will update after)
|
||||
await db.storelisting.create(
|
||||
data=StoreListingCreateInput(
|
||||
id=listing_id,
|
||||
slug=metadata["slug"],
|
||||
agentGraphId=graph_id,
|
||||
agentGraphVersion=graph_version,
|
||||
owningUserId=AUTOGPT_USER_ID,
|
||||
hasApprovedVersion=is_approved,
|
||||
useForOnboarding=metadata["use_for_onboarding"],
|
||||
)
|
||||
)
|
||||
|
||||
# Create the store listing version
|
||||
await db.storelistingversion.create(
|
||||
data=StoreListingVersionCreateInput(
|
||||
id=version_id,
|
||||
version=1,
|
||||
agentGraphId=graph_id,
|
||||
agentGraphVersion=graph_version,
|
||||
name=metadata["agent_name"],
|
||||
subHeading=metadata["sub_heading"],
|
||||
videoUrl=metadata["agent_video"],
|
||||
imageUrls=metadata["agent_image"],
|
||||
description=metadata["description"],
|
||||
categories=metadata["categories"],
|
||||
isFeatured=metadata["featured"],
|
||||
isAvailable=metadata["is_available"],
|
||||
submissionStatus=submission_status,
|
||||
submittedAt=datetime.now() if is_approved else None,
|
||||
reviewedAt=datetime.now() if is_approved else None,
|
||||
storeListingId=listing_id,
|
||||
)
|
||||
)
|
||||
|
||||
# Update the store listing with the active version if approved
|
||||
if is_approved:
|
||||
await db.storelisting.update(
|
||||
where={"id": listing_id},
|
||||
data={"ActiveVersion": {"connect": {"id": version_id}}},
|
||||
)
|
||||
|
||||
|
||||
async def main():
|
||||
"""Main function to load all store agents."""
|
||||
print("=" * 60)
|
||||
print("Loading Store Agents into Test Database")
|
||||
print("=" * 60)
|
||||
|
||||
db = Prisma()
|
||||
await db.connect()
|
||||
|
||||
try:
|
||||
# Step 0: Initialize agent blocks
|
||||
print("\n[Step 0] Initializing agent blocks...")
|
||||
known_blocks = await initialize_blocks(db)
|
||||
|
||||
# Step 1: Create user and profile
|
||||
print("\n[Step 1] Creating user and profile...")
|
||||
await create_user_and_profile(db)
|
||||
|
||||
# Step 2: Load CSV metadata
|
||||
print("\n[Step 2] Loading CSV metadata...")
|
||||
csv_metadata = await load_csv_metadata()
|
||||
print(f" Found {len(csv_metadata)} store listing entries in CSV")
|
||||
|
||||
# Step 3: Find all JSON files and match with CSV
|
||||
print("\n[Step 3] Processing agent JSON files...")
|
||||
json_files = list(AGENTS_DIR.glob("agent_*.json"))
|
||||
print(f" Found {len(json_files)} agent JSON files")
|
||||
|
||||
# Build mapping from version_id to json file
|
||||
loaded_graphs = {} # graph_id -> (graph_id, version)
|
||||
failed_agents = []
|
||||
|
||||
for json_file in json_files:
|
||||
# Extract the version ID from filename (agent_<version_id>.json)
|
||||
version_id = json_file.stem.replace("agent_", "")
|
||||
|
||||
if version_id not in csv_metadata:
|
||||
print(
|
||||
f" Warning: {json_file.name} not found in CSV metadata, skipping"
|
||||
)
|
||||
continue
|
||||
|
||||
metadata = csv_metadata[version_id]
|
||||
agent_name = metadata["agent_name"]
|
||||
print(f"\nProcessing: {agent_name}")
|
||||
|
||||
# Use a transaction per agent to prevent dangling resources
|
||||
try:
|
||||
async with db.tx() as tx:
|
||||
# Load and create the agent graph
|
||||
agent_data = await load_agent_json(json_file)
|
||||
graph_id, graph_version = await create_agent_graph(
|
||||
tx, agent_data, known_blocks
|
||||
)
|
||||
loaded_graphs[graph_id] = (graph_id, graph_version)
|
||||
|
||||
# Create store listing
|
||||
await create_store_listing(tx, graph_id, graph_version, metadata)
|
||||
except Exception as e:
|
||||
print(f" Error loading agent '{agent_name}': {e}")
|
||||
failed_agents.append(agent_name)
|
||||
continue
|
||||
|
||||
# Step 4: Refresh materialized views
|
||||
print("\n[Step 4] Refreshing materialized views...")
|
||||
try:
|
||||
await db.execute_raw("SELECT refresh_store_materialized_views();")
|
||||
print(" Materialized views refreshed successfully")
|
||||
except Exception as e:
|
||||
print(f" Warning: Could not refresh materialized views: {e}")
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print(f"Successfully loaded {len(loaded_graphs)} agents")
|
||||
if failed_agents:
|
||||
print(
|
||||
f"Failed to load {len(failed_agents)} agents: {', '.join(failed_agents)}"
|
||||
)
|
||||
print("=" * 60)
|
||||
|
||||
finally:
|
||||
await db.disconnect()
|
||||
|
||||
|
||||
def run():
|
||||
"""Entry point for poetry script."""
|
||||
asyncio.run(main())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run()
|
||||
@@ -3,6 +3,14 @@ import { withSentryConfig } from "@sentry/nextjs";
|
||||
/** @type {import('next').NextConfig} */
|
||||
const nextConfig = {
|
||||
productionBrowserSourceMaps: true,
|
||||
experimental: {
|
||||
serverActions: {
|
||||
bodySizeLimit: "256mb",
|
||||
},
|
||||
// Increase body size limit for API routes (file uploads) - 256MB to match backend limit
|
||||
proxyClientMaxBodySize: "256mb",
|
||||
middlewareClientMaxBodySize: "256mb",
|
||||
},
|
||||
images: {
|
||||
domains: [
|
||||
// We dont need to maintain alphabetical order here
|
||||
|
||||
@@ -72,6 +72,7 @@
|
||||
"dotenv": "17.2.3",
|
||||
"elliptic": "6.6.1",
|
||||
"embla-carousel-react": "8.6.0",
|
||||
"flatbush": "4.5.0",
|
||||
"framer-motion": "12.23.24",
|
||||
"geist": "1.5.1",
|
||||
"highlight.js": "11.11.1",
|
||||
@@ -81,7 +82,7 @@
|
||||
"lodash": "4.17.21",
|
||||
"lucide-react": "0.552.0",
|
||||
"moment": "2.30.1",
|
||||
"next": "15.4.7",
|
||||
"next": "15.4.10",
|
||||
"next-themes": "0.4.6",
|
||||
"nuqs": "2.7.2",
|
||||
"party-js": "2.2.0",
|
||||
@@ -136,9 +137,8 @@
|
||||
"concurrently": "9.2.1",
|
||||
"cross-env": "10.1.0",
|
||||
"eslint": "8.57.1",
|
||||
"eslint-config-next": "15.5.2",
|
||||
"eslint-config-next": "15.5.7",
|
||||
"eslint-plugin-storybook": "9.1.5",
|
||||
"import-in-the-middle": "1.14.2",
|
||||
"msw": "2.11.6",
|
||||
"msw-storybook-addon": "2.0.6",
|
||||
"orval": "7.13.0",
|
||||
|
||||
429
autogpt_platform/frontend/pnpm-lock.yaml
generated
429
autogpt_platform/frontend/pnpm-lock.yaml
generated
@@ -16,7 +16,7 @@ importers:
|
||||
version: 5.2.2(react-hook-form@7.66.0(react@18.3.1))
|
||||
'@next/third-parties':
|
||||
specifier: 15.4.6
|
||||
version: 15.4.6(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
|
||||
version: 15.4.6(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
|
||||
'@phosphor-icons/react':
|
||||
specifier: 2.1.10
|
||||
version: 2.1.10(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
@@ -88,7 +88,7 @@ importers:
|
||||
version: 5.24.13(@rjsf/utils@5.24.13(react@18.3.1))
|
||||
'@sentry/nextjs':
|
||||
specifier: 10.27.0
|
||||
version: 10.27.0(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.101.3(esbuild@0.25.9))
|
||||
version: 10.27.0(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.101.3(esbuild@0.25.9))
|
||||
'@supabase/ssr':
|
||||
specifier: 0.7.0
|
||||
version: 0.7.0(@supabase/supabase-js@2.78.0)
|
||||
@@ -106,10 +106,10 @@ importers:
|
||||
version: 0.2.4
|
||||
'@vercel/analytics':
|
||||
specifier: 1.5.0
|
||||
version: 1.5.0(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
|
||||
version: 1.5.0(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
|
||||
'@vercel/speed-insights':
|
||||
specifier: 1.2.0
|
||||
version: 1.2.0(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
|
||||
version: 1.2.0(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
|
||||
'@xyflow/react':
|
||||
specifier: 12.9.2
|
||||
version: 12.9.2(@types/react@18.3.17)(immer@10.1.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
@@ -140,12 +140,15 @@ importers:
|
||||
embla-carousel-react:
|
||||
specifier: 8.6.0
|
||||
version: 8.6.0(react@18.3.1)
|
||||
flatbush:
|
||||
specifier: 4.5.0
|
||||
version: 4.5.0
|
||||
framer-motion:
|
||||
specifier: 12.23.24
|
||||
version: 12.23.24(@emotion/is-prop-valid@1.2.2)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
geist:
|
||||
specifier: 1.5.1
|
||||
version: 1.5.1(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))
|
||||
version: 1.5.1(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))
|
||||
highlight.js:
|
||||
specifier: 11.11.1
|
||||
version: 11.11.1
|
||||
@@ -168,14 +171,14 @@ importers:
|
||||
specifier: 2.30.1
|
||||
version: 2.30.1
|
||||
next:
|
||||
specifier: 15.4.7
|
||||
version: 15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
specifier: 15.4.10
|
||||
version: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
next-themes:
|
||||
specifier: 0.4.6
|
||||
version: 0.4.6(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
nuqs:
|
||||
specifier: 2.7.2
|
||||
version: 2.7.2(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
|
||||
version: 2.7.2(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
|
||||
party-js:
|
||||
specifier: 2.2.0
|
||||
version: 2.2.0
|
||||
@@ -281,7 +284,7 @@ importers:
|
||||
version: 9.1.5(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))
|
||||
'@storybook/nextjs':
|
||||
specifier: 9.1.5
|
||||
version: 9.1.5(esbuild@0.25.9)(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(type-fest@4.41.0)(typescript@5.9.3)(webpack-hot-middleware@2.26.1)(webpack@5.101.3(esbuild@0.25.9))
|
||||
version: 9.1.5(esbuild@0.25.9)(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(type-fest@4.41.0)(typescript@5.9.3)(webpack-hot-middleware@2.26.1)(webpack@5.101.3(esbuild@0.25.9))
|
||||
'@tanstack/eslint-plugin-query':
|
||||
specifier: 5.91.2
|
||||
version: 5.91.2(eslint@8.57.1)(typescript@5.9.3)
|
||||
@@ -328,14 +331,11 @@ importers:
|
||||
specifier: 8.57.1
|
||||
version: 8.57.1
|
||||
eslint-config-next:
|
||||
specifier: 15.5.2
|
||||
version: 15.5.2(eslint@8.57.1)(typescript@5.9.3)
|
||||
specifier: 15.5.7
|
||||
version: 15.5.7(eslint@8.57.1)(typescript@5.9.3)
|
||||
eslint-plugin-storybook:
|
||||
specifier: 9.1.5
|
||||
version: 9.1.5(eslint@8.57.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(typescript@5.9.3)
|
||||
import-in-the-middle:
|
||||
specifier: 1.14.2
|
||||
version: 1.14.2
|
||||
msw:
|
||||
specifier: 2.11.6
|
||||
version: 2.11.6(@types/node@24.10.0)(typescript@5.9.3)
|
||||
@@ -983,12 +983,15 @@ packages:
|
||||
'@date-fns/tz@1.4.1':
|
||||
resolution: {integrity: sha512-P5LUNhtbj6YfI3iJjw5EL9eUAG6OitD0W3fWQcpQjDRc/QIsL0tRNuO1PcDvPccWL1fSTXXdE1ds+l95DV/OFA==}
|
||||
|
||||
'@emnapi/core@1.5.0':
|
||||
resolution: {integrity: sha512-sbP8GzB1WDzacS8fgNPpHlp6C9VZe+SJP3F90W9rLemaQj2PzIuTEl1qDOYQf58YIpyjViI24y9aPWCjEzY2cg==}
|
||||
'@emnapi/core@1.7.1':
|
||||
resolution: {integrity: sha512-o1uhUASyo921r2XtHYOHy7gdkGLge8ghBEQHMWmyJFoXlpU58kIrhhN3w26lpQb6dspetweapMn2CSNwQ8I4wg==}
|
||||
|
||||
'@emnapi/runtime@1.5.0':
|
||||
resolution: {integrity: sha512-97/BJ3iXHww3djw6hYIfErCZFee7qCtrneuLa20UXFCOTCfBM2cvQHjWJ2EG0s0MtdNwInarqCTz35i4wWXHsQ==}
|
||||
|
||||
'@emnapi/runtime@1.7.1':
|
||||
resolution: {integrity: sha512-PVtJr5CmLwYAU9PZDMITZoR5iAOShYREoR45EyyLrbntV50mdePTgUn4AmOw90Ifcj+x2kRjdzr1HP3RrNiHGA==}
|
||||
|
||||
'@emnapi/wasi-threads@1.1.0':
|
||||
resolution: {integrity: sha512-WI0DdZ8xFSbgMjR1sFsKABJ/C5OnRrjT06JXbZKexJGrDuPTzZdDYfFlsgcCXCyf+suG5QU2e/y1Wo2V/OapLQ==}
|
||||
|
||||
@@ -1326,6 +1329,10 @@ packages:
|
||||
resolution: {integrity: sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==}
|
||||
engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0}
|
||||
|
||||
'@eslint-community/regexpp@4.12.2':
|
||||
resolution: {integrity: sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==}
|
||||
engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0}
|
||||
|
||||
'@eslint/eslintrc@2.1.4':
|
||||
resolution: {integrity: sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==}
|
||||
engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
|
||||
@@ -1599,56 +1606,56 @@ packages:
|
||||
'@neoconfetti/react@1.0.0':
|
||||
resolution: {integrity: sha512-klcSooChXXOzIm+SE5IISIAn3bYzYfPjbX7D7HoqZL84oAfgREeSg5vSIaSFH+DaGzzvImTyWe1OyrJ67vik4A==}
|
||||
|
||||
'@next/env@15.4.7':
|
||||
resolution: {integrity: sha512-PrBIpO8oljZGTOe9HH0miix1w5MUiGJ/q83Jge03mHEE0E3pyqzAy2+l5G6aJDbXoobmxPJTVhbCuwlLtjSHwg==}
|
||||
'@next/env@15.4.10':
|
||||
resolution: {integrity: sha512-knhmoJ0Vv7VRf6pZEPSnciUG1S4bIhWx+qTYBW/AjxEtlzsiNORPk8sFDCEvqLfmKuey56UB9FL1UdHEV3uBrg==}
|
||||
|
||||
'@next/eslint-plugin-next@15.5.2':
|
||||
resolution: {integrity: sha512-lkLrRVxcftuOsJNhWatf1P2hNVfh98k/omQHrCEPPriUypR6RcS13IvLdIrEvkm9AH2Nu2YpR5vLqBuy6twH3Q==}
|
||||
'@next/eslint-plugin-next@15.5.7':
|
||||
resolution: {integrity: sha512-DtRU2N7BkGr8r+pExfuWHwMEPX5SD57FeA6pxdgCHODo+b/UgIgjE+rgWKtJAbEbGhVZ2jtHn4g3wNhWFoNBQQ==}
|
||||
|
||||
'@next/swc-darwin-arm64@15.4.7':
|
||||
resolution: {integrity: sha512-2Dkb+VUTp9kHHkSqtws4fDl2Oxms29HcZBwFIda1X7Ztudzy7M6XF9HDS2dq85TmdN47VpuhjE+i6wgnIboVzQ==}
|
||||
'@next/swc-darwin-arm64@15.4.8':
|
||||
resolution: {integrity: sha512-Pf6zXp7yyQEn7sqMxur6+kYcywx5up1J849psyET7/8pG2gQTVMjU3NzgIt8SeEP5to3If/SaWmaA6H6ysBr1A==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [darwin]
|
||||
|
||||
'@next/swc-darwin-x64@15.4.7':
|
||||
resolution: {integrity: sha512-qaMnEozKdWezlmh1OGDVFueFv2z9lWTcLvt7e39QA3YOvZHNpN2rLs/IQLwZaUiw2jSvxW07LxMCWtOqsWFNQg==}
|
||||
'@next/swc-darwin-x64@15.4.8':
|
||||
resolution: {integrity: sha512-xla6AOfz68a6kq3gRQccWEvFC/VRGJmA/QuSLENSO7CZX5WIEkSz7r1FdXUjtGCQ1c2M+ndUAH7opdfLK1PQbw==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [darwin]
|
||||
|
||||
'@next/swc-linux-arm64-gnu@15.4.7':
|
||||
resolution: {integrity: sha512-ny7lODPE7a15Qms8LZiN9wjNWIeI+iAZOFDOnv2pcHStncUr7cr9lD5XF81mdhrBXLUP9yT9RzlmSWKIazWoDw==}
|
||||
'@next/swc-linux-arm64-gnu@15.4.8':
|
||||
resolution: {integrity: sha512-y3fmp+1Px/SJD+5ntve5QLZnGLycsxsVPkTzAc3zUiXYSOlTPqT8ynfmt6tt4fSo1tAhDPmryXpYKEAcoAPDJw==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
'@next/swc-linux-arm64-musl@15.4.7':
|
||||
resolution: {integrity: sha512-4SaCjlFR/2hGJqZLLWycccy1t+wBrE/vyJWnYaZJhUVHccpGLG5q0C+Xkw4iRzUIkE+/dr90MJRUym3s1+vO8A==}
|
||||
'@next/swc-linux-arm64-musl@15.4.8':
|
||||
resolution: {integrity: sha512-DX/L8VHzrr1CfwaVjBQr3GWCqNNFgyWJbeQ10Lx/phzbQo3JNAxUok1DZ8JHRGcL6PgMRgj6HylnLNndxn4Z6A==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
'@next/swc-linux-x64-gnu@15.4.7':
|
||||
resolution: {integrity: sha512-2uNXjxvONyRidg00VwvlTYDwC9EgCGNzPAPYbttIATZRxmOZ3hllk/YYESzHZb65eyZfBR5g9xgCZjRAl9YYGg==}
|
||||
'@next/swc-linux-x64-gnu@15.4.8':
|
||||
resolution: {integrity: sha512-9fLAAXKAL3xEIFdKdzG5rUSvSiZTLLTCc6JKq1z04DR4zY7DbAPcRvNm3K1inVhTiQCs19ZRAgUerHiVKMZZIA==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
'@next/swc-linux-x64-musl@15.4.7':
|
||||
resolution: {integrity: sha512-ceNbPjsFgLscYNGKSu4I6LYaadq2B8tcK116nVuInpHHdAWLWSwVK6CHNvCi0wVS9+TTArIFKJGsEyVD1H+4Kg==}
|
||||
'@next/swc-linux-x64-musl@15.4.8':
|
||||
resolution: {integrity: sha512-s45V7nfb5g7dbS7JK6XZDcapicVrMMvX2uYgOHP16QuKH/JA285oy6HcxlKqwUNaFY/UC6EvQ8QZUOo19cBKSA==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
'@next/swc-win32-arm64-msvc@15.4.7':
|
||||
resolution: {integrity: sha512-pZyxmY1iHlZJ04LUL7Css8bNvsYAMYOY9JRwFA3HZgpaNKsJSowD09Vg2R9734GxAcLJc2KDQHSCR91uD6/AAw==}
|
||||
'@next/swc-win32-arm64-msvc@15.4.8':
|
||||
resolution: {integrity: sha512-KjgeQyOAq7t/HzAJcWPGA8X+4WY03uSCZ2Ekk98S9OgCFsb6lfBE3dbUzUuEQAN2THbwYgFfxX2yFTCMm8Kehw==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [win32]
|
||||
|
||||
'@next/swc-win32-x64-msvc@15.4.7':
|
||||
resolution: {integrity: sha512-HjuwPJ7BeRzgl3KrjKqD2iDng0eQIpIReyhpF5r4yeAHFwWRuAhfW92rWv/r3qeQHEwHsLRzFDvMqRjyM5DI6A==}
|
||||
'@next/swc-win32-x64-msvc@15.4.8':
|
||||
resolution: {integrity: sha512-Exsmf/+42fWVnLMaZHzshukTBxZrSwuuLKFvqhGHJ+mC1AokqieLY/XzAl3jc/CqhXLqLY3RRjkKJ9YnLPcRWg==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [win32]
|
||||
@@ -2619,8 +2626,8 @@ packages:
|
||||
'@rtsao/scc@1.1.0':
|
||||
resolution: {integrity: sha512-zt6OdqaDoOnJ1ZYsCYGt9YmWzDXl4vQdKTyJev62gFhRGKdx7mcT54V9KIjg+d2wi9EXsPvAPKe7i7WjfVWB8g==}
|
||||
|
||||
'@rushstack/eslint-patch@1.12.0':
|
||||
resolution: {integrity: sha512-5EwMtOqvJMMa3HbmxLlF74e+3/HhwBTMcvt3nqVJgGCozO6hzIPOBlwm8mGVNR9SN2IJpxSnlxczyDjcn7qIyw==}
|
||||
'@rushstack/eslint-patch@1.15.0':
|
||||
resolution: {integrity: sha512-ojSshQPKwVvSMR8yT2L/QtUkV5SXi/IfDiJ4/8d6UbTPjiHVmxZzUAzGD8Tzks1b9+qQkZa0isUOvYObedITaw==}
|
||||
|
||||
'@scarf/scarf@1.4.0':
|
||||
resolution: {integrity: sha512-xxeapPiUXdZAE3che6f3xogoJPeZgig6omHEy1rIY5WVsB3H2BHNnZH+gHG6x91SCWyQCzWGsuL2Hh3ClO5/qQ==}
|
||||
@@ -3094,8 +3101,8 @@ packages:
|
||||
peerDependencies:
|
||||
'@testing-library/dom': '>=7.21.4'
|
||||
|
||||
'@tybys/wasm-util@0.10.0':
|
||||
resolution: {integrity: sha512-VyyPYFlOMNylG45GoAe0xDoLwWuowvf92F9kySqzYh8vmYm7D2u4iUJKa1tOUpS70Ku13ASrOkS4ScXFsTaCNQ==}
|
||||
'@tybys/wasm-util@0.10.1':
|
||||
resolution: {integrity: sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==}
|
||||
|
||||
'@types/aria-query@5.0.4':
|
||||
resolution: {integrity: sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==}
|
||||
@@ -3285,16 +3292,16 @@ packages:
|
||||
'@types/ws@8.18.1':
|
||||
resolution: {integrity: sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==}
|
||||
|
||||
'@typescript-eslint/eslint-plugin@8.43.0':
|
||||
resolution: {integrity: sha512-8tg+gt7ENL7KewsKMKDHXR1vm8tt9eMxjJBYINf6swonlWgkYn5NwyIgXpbbDxTNU5DgpDFfj95prcTq2clIQQ==}
|
||||
'@typescript-eslint/eslint-plugin@8.48.1':
|
||||
resolution: {integrity: sha512-X63hI1bxl5ohelzr0LY5coufyl0LJNthld+abwxpCoo6Gq+hSqhKwci7MUWkXo67mzgUK6YFByhmaHmUcuBJmA==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
peerDependencies:
|
||||
'@typescript-eslint/parser': ^8.43.0
|
||||
'@typescript-eslint/parser': ^8.48.1
|
||||
eslint: ^8.57.0 || ^9.0.0
|
||||
typescript: '>=4.8.4 <6.0.0'
|
||||
|
||||
'@typescript-eslint/parser@8.43.0':
|
||||
resolution: {integrity: sha512-B7RIQiTsCBBmY+yW4+ILd6mF5h1FUwJsVvpqkrgpszYifetQ2Ke+Z4u6aZh0CblkUGIdR59iYVyXqqZGkZ3aBw==}
|
||||
'@typescript-eslint/parser@8.48.1':
|
||||
resolution: {integrity: sha512-PC0PDZfJg8sP7cmKe6L3QIL8GZwU5aRvUFedqSIpw3B+QjRSUZeeITC2M5XKeMXEzL6wccN196iy3JLwKNvDVA==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
peerDependencies:
|
||||
eslint: ^8.57.0 || ^9.0.0
|
||||
@@ -3312,6 +3319,12 @@ packages:
|
||||
peerDependencies:
|
||||
typescript: '>=4.8.4 <6.0.0'
|
||||
|
||||
'@typescript-eslint/project-service@8.48.1':
|
||||
resolution: {integrity: sha512-HQWSicah4s9z2/HifRPQ6b6R7G+SBx64JlFQpgSSHWPKdvCZX57XCbszg/bapbRsOEv42q5tayTYcEFpACcX1w==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
peerDependencies:
|
||||
typescript: '>=4.8.4 <6.0.0'
|
||||
|
||||
'@typescript-eslint/scope-manager@8.43.0':
|
||||
resolution: {integrity: sha512-daSWlQ87ZhsjrbMLvpuuMAt3y4ba57AuvadcR7f3nl8eS3BjRc8L9VLxFLk92RL5xdXOg6IQ+qKjjqNEimGuAg==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
@@ -3320,6 +3333,10 @@ packages:
|
||||
resolution: {integrity: sha512-LF4b/NmGvdWEHD2H4MsHD8ny6JpiVNDzrSZr3CsckEgCbAGZbYM4Cqxvi9L+WqDMT+51Ozy7lt2M+d0JLEuBqA==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
|
||||
'@typescript-eslint/scope-manager@8.48.1':
|
||||
resolution: {integrity: sha512-rj4vWQsytQbLxC5Bf4XwZ0/CKd362DkWMUkviT7DCS057SK64D5lH74sSGzhI6PDD2HCEq02xAP9cX68dYyg1w==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
|
||||
'@typescript-eslint/tsconfig-utils@8.43.0':
|
||||
resolution: {integrity: sha512-ALC2prjZcj2YqqL5X/bwWQmHA2em6/94GcbB/KKu5SX3EBDOsqztmmX1kMkvAJHzxk7TazKzJfFiEIagNV3qEA==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
@@ -3332,8 +3349,14 @@ packages:
|
||||
peerDependencies:
|
||||
typescript: '>=4.8.4 <6.0.0'
|
||||
|
||||
'@typescript-eslint/type-utils@8.43.0':
|
||||
resolution: {integrity: sha512-qaH1uLBpBuBBuRf8c1mLJ6swOfzCXryhKND04Igr4pckzSEW9JX5Aw9AgW00kwfjWJF0kk0ps9ExKTfvXfw4Qg==}
|
||||
'@typescript-eslint/tsconfig-utils@8.48.1':
|
||||
resolution: {integrity: sha512-k0Jhs4CpEffIBm6wPaCXBAD7jxBtrHjrSgtfCjUvPp9AZ78lXKdTR8fxyZO5y4vWNlOvYXRtngSZNSn+H53Jkw==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
peerDependencies:
|
||||
typescript: '>=4.8.4 <6.0.0'
|
||||
|
||||
'@typescript-eslint/type-utils@8.48.1':
|
||||
resolution: {integrity: sha512-1jEop81a3LrJQLTf/1VfPQdhIY4PlGDBc/i67EVWObrtvcziysbLN3oReexHOM6N3jyXgCrkBsZpqwH0hiDOQg==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
peerDependencies:
|
||||
eslint: ^8.57.0 || ^9.0.0
|
||||
@@ -3347,6 +3370,10 @@ packages:
|
||||
resolution: {integrity: sha512-lNCWCbq7rpg7qDsQrd3D6NyWYu+gkTENkG5IKYhUIcxSb59SQC/hEQ+MrG4sTgBVghTonNWq42bA/d4yYumldQ==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
|
||||
'@typescript-eslint/types@8.48.1':
|
||||
resolution: {integrity: sha512-+fZ3LZNeiELGmimrujsDCT4CRIbq5oXdHe7chLiW8qzqyPMnn1puNstCrMNVAqwcl2FdIxkuJ4tOs/RFDBVc/Q==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
|
||||
'@typescript-eslint/typescript-estree@8.43.0':
|
||||
resolution: {integrity: sha512-7Vv6zlAhPb+cvEpP06WXXy/ZByph9iL6BQRBDj4kmBsW98AqEeQHlj/13X+sZOrKSo9/rNKH4Ul4f6EICREFdw==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
@@ -3359,6 +3386,12 @@ packages:
|
||||
peerDependencies:
|
||||
typescript: '>=4.8.4 <6.0.0'
|
||||
|
||||
'@typescript-eslint/typescript-estree@8.48.1':
|
||||
resolution: {integrity: sha512-/9wQ4PqaefTK6POVTjJaYS0bynCgzh6ClJHGSBj06XEHjkfylzB+A3qvyaXnErEZSaxhIo4YdyBgq6j4RysxDg==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
peerDependencies:
|
||||
typescript: '>=4.8.4 <6.0.0'
|
||||
|
||||
'@typescript-eslint/utils@8.43.0':
|
||||
resolution: {integrity: sha512-S1/tEmkUeeswxd0GGcnwuVQPFWo8NzZTOMxCvw8BX7OMxnNae+i8Tm7REQen/SwUIPoPqfKn7EaZ+YLpiB3k9g==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
@@ -3373,6 +3406,13 @@ packages:
|
||||
eslint: ^8.57.0 || ^9.0.0
|
||||
typescript: '>=4.8.4 <6.0.0'
|
||||
|
||||
'@typescript-eslint/utils@8.48.1':
|
||||
resolution: {integrity: sha512-fAnhLrDjiVfey5wwFRwrweyRlCmdz5ZxXz2G/4cLn0YDLjTapmN4gcCsTBR1N2rWnZSDeWpYtgLDsJt+FpmcwA==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
peerDependencies:
|
||||
eslint: ^8.57.0 || ^9.0.0
|
||||
typescript: '>=4.8.4 <6.0.0'
|
||||
|
||||
'@typescript-eslint/visitor-keys@8.43.0':
|
||||
resolution: {integrity: sha512-T+S1KqRD4sg/bHfLwrpF/K3gQLBM1n7Rp7OjjikjTEssI2YJzQpi5WXoynOaQ93ERIuq3O8RBTOUYDKszUCEHw==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
@@ -3381,6 +3421,10 @@ packages:
|
||||
resolution: {integrity: sha512-tUFMXI4gxzzMXt4xpGJEsBsTox0XbNQ1y94EwlD/CuZwFcQP79xfQqMhau9HsRc/J0cAPA/HZt1dZPtGn9V/7w==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
|
||||
'@typescript-eslint/visitor-keys@8.48.1':
|
||||
resolution: {integrity: sha512-BmxxndzEWhE4TIEEMBs8lP3MBWN3jFPs/p6gPm/wkv02o41hI6cq9AuSmGAaTTHPtA1FTi2jBre4A9rm5ZmX+Q==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
|
||||
'@ungap/structured-clone@1.3.0':
|
||||
resolution: {integrity: sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==}
|
||||
|
||||
@@ -4582,8 +4626,8 @@ packages:
|
||||
resolution: {integrity: sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==}
|
||||
engines: {node: '>=12'}
|
||||
|
||||
eslint-config-next@15.5.2:
|
||||
resolution: {integrity: sha512-3hPZghsLupMxxZ2ggjIIrat/bPniM2yRpsVPVM40rp8ZMzKWOJp2CGWn7+EzoV2ddkUr5fxNfHpF+wU1hGt/3g==}
|
||||
eslint-config-next@15.5.7:
|
||||
resolution: {integrity: sha512-nU/TRGHHeG81NeLW5DeQT5t6BDUqbpsNQTvef1ld/tqHT+/zTx60/TIhKnmPISTTe++DVo+DLxDmk4rnwHaZVw==}
|
||||
peerDependencies:
|
||||
eslint: ^7.23.0 || ^8.0.0 || ^9.0.0
|
||||
typescript: '>=3.3.1'
|
||||
@@ -4835,6 +4879,12 @@ packages:
|
||||
resolution: {integrity: sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==}
|
||||
engines: {node: ^10.12.0 || >=12.0.0}
|
||||
|
||||
flatbush@4.5.0:
|
||||
resolution: {integrity: sha512-K7JSilGr4lySRLdJqKY45fu0m/dIs6YAAu/ESqdMsnW3pI0m3gpa6oRc6NDXW161Ov9+rIQjsuyOt5ObdIfgwg==}
|
||||
|
||||
flatqueue@3.0.0:
|
||||
resolution: {integrity: sha512-y1deYaVt+lIc/d2uIcWDNd0CrdQTO5xoCjeFdhX0kSXvm2Acm0o+3bAOiYklTEoRyzwio3sv3/IiBZdusbAe2Q==}
|
||||
|
||||
flatted@3.3.3:
|
||||
resolution: {integrity: sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==}
|
||||
|
||||
@@ -4909,6 +4959,10 @@ packages:
|
||||
peerDependencies:
|
||||
next: '>=13.2.0'
|
||||
|
||||
generator-function@2.0.1:
|
||||
resolution: {integrity: sha512-SFdFmIJi+ybC0vjlHN0ZGVGHc3lgE0DxPAT0djjVg+kjOnSqclqmj0KQ7ykTOLP6YxoqOvuAODGdcHJn+43q3g==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
gensync@1.0.0-beta.2:
|
||||
resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==}
|
||||
engines: {node: '>=6.9.0'}
|
||||
@@ -4937,8 +4991,8 @@ packages:
|
||||
resolution: {integrity: sha512-w9UMqWwJxHNOvoNzSJ2oPF5wvYcvP7jUvYzhp67yEhTi17ZDBBC1z9pTdGuzjD+EFIqLSYRweZjqfiPzQ06Ebg==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
get-tsconfig@4.10.1:
|
||||
resolution: {integrity: sha512-auHyJ4AgMz7vgS8Hp3N6HXSmlMdUyhSUrfBF16w153rxtLIEOE+HGqaBppczZvnHLqQJfiHotCYpNhl0lUROFQ==}
|
||||
get-tsconfig@4.13.0:
|
||||
resolution: {integrity: sha512-1VKTZJCwBrvbd+Wn3AOgQP/2Av+TfTCOlE4AcRJE72W1ksZXbAx8PPBR9RzgTeSPzlPMHrbANMH3LbltH73wxQ==}
|
||||
|
||||
github-slugger@2.0.0:
|
||||
resolution: {integrity: sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw==}
|
||||
@@ -5159,9 +5213,6 @@ packages:
|
||||
resolution: {integrity: sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==}
|
||||
engines: {node: '>=6'}
|
||||
|
||||
import-in-the-middle@1.14.2:
|
||||
resolution: {integrity: sha512-5tCuY9BV8ujfOpwtAGgsTx9CGUapcFMEEyByLv1B+v2+6DhAcw+Zr0nhQT7uwaZ7DiourxFEscghOR8e1aPLQw==}
|
||||
|
||||
import-in-the-middle@2.0.0:
|
||||
resolution: {integrity: sha512-yNZhyQYqXpkT0AKq3F3KLasUSK4fHvebNH5hOsKQw2dhGSALvQ4U0BqUc5suziKvydO5u5hgN2hy1RJaho8U5A==}
|
||||
|
||||
@@ -5273,6 +5324,10 @@ packages:
|
||||
resolution: {integrity: sha512-nPUB5km40q9e8UfN/Zc24eLlzdSf9OfKByBw9CIdw4H1giPMeA0OIJvbchsCu4npfI2QcMVBsGEBHKZ7wLTWmQ==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
is-generator-function@1.1.2:
|
||||
resolution: {integrity: sha512-upqt1SkGkODW9tsGNG5mtXTXtECizwtS2kA161M+gJPc1xdb/Ax629af6YrTwcOeQHbewrPNlE5Dx7kzvXTizA==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
is-glob@4.0.3:
|
||||
resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==}
|
||||
engines: {node: '>=0.10.0'}
|
||||
@@ -5894,8 +5949,8 @@ packages:
|
||||
engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1}
|
||||
hasBin: true
|
||||
|
||||
napi-postinstall@0.3.3:
|
||||
resolution: {integrity: sha512-uTp172LLXSxuSYHv/kou+f6KW3SMppU9ivthaVTXian9sOt3XM/zHYHpRZiLgQoxeWfYUnslNWQHF1+G71xcow==}
|
||||
napi-postinstall@0.3.4:
|
||||
resolution: {integrity: sha512-PHI5f1O0EP5xJ9gQmFGMS6IZcrVvTjpXjz7Na41gTE7eE2hK11lg04CECCYEEjdc17EV4DO+fkGEtt7TpTaTiQ==}
|
||||
engines: {node: ^12.20.0 || ^14.18.0 || >=16.0.0}
|
||||
hasBin: true
|
||||
|
||||
@@ -5911,8 +5966,8 @@ packages:
|
||||
react: ^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc
|
||||
react-dom: ^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc
|
||||
|
||||
next@15.4.7:
|
||||
resolution: {integrity: sha512-OcqRugwF7n7mC8OSYjvsZhhG1AYSvulor1EIUsIkbbEbf1qoE5EbH36Swj8WhF4cHqmDgkiam3z1c1W0J1Wifg==}
|
||||
next@15.4.10:
|
||||
resolution: {integrity: sha512-itVlc79QjpKMFMRhP+kbGKaSG/gZM6RCvwhEbwmCNF06CdDiNaoHcbeg0PqkEa2GOcn8KJ0nnc7+yL7EjoYLHQ==}
|
||||
engines: {node: ^18.18.0 || ^19.8.0 || >= 20.0.0}
|
||||
hasBin: true
|
||||
peerDependencies:
|
||||
@@ -6760,6 +6815,11 @@ packages:
|
||||
engines: {node: '>= 0.4'}
|
||||
hasBin: true
|
||||
|
||||
resolve@1.22.11:
|
||||
resolution: {integrity: sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==}
|
||||
engines: {node: '>= 0.4'}
|
||||
hasBin: true
|
||||
|
||||
resolve@1.22.8:
|
||||
resolution: {integrity: sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==}
|
||||
hasBin: true
|
||||
@@ -7849,7 +7909,7 @@ snapshots:
|
||||
'@babel/helper-plugin-utils': 7.27.1
|
||||
debug: 4.4.3
|
||||
lodash.debounce: 4.0.8
|
||||
resolve: 1.22.10
|
||||
resolve: 1.22.11
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
@@ -8541,7 +8601,7 @@ snapshots:
|
||||
|
||||
'@date-fns/tz@1.4.1': {}
|
||||
|
||||
'@emnapi/core@1.5.0':
|
||||
'@emnapi/core@1.7.1':
|
||||
dependencies:
|
||||
'@emnapi/wasi-threads': 1.1.0
|
||||
tslib: 2.8.1
|
||||
@@ -8552,6 +8612,11 @@ snapshots:
|
||||
tslib: 2.8.1
|
||||
optional: true
|
||||
|
||||
'@emnapi/runtime@1.7.1':
|
||||
dependencies:
|
||||
tslib: 2.8.1
|
||||
optional: true
|
||||
|
||||
'@emnapi/wasi-threads@1.1.0':
|
||||
dependencies:
|
||||
tslib: 2.8.1
|
||||
@@ -8730,6 +8795,8 @@ snapshots:
|
||||
|
||||
'@eslint-community/regexpp@4.12.1': {}
|
||||
|
||||
'@eslint-community/regexpp@4.12.2': {}
|
||||
|
||||
'@eslint/eslintrc@2.1.4':
|
||||
dependencies:
|
||||
ajv: 6.12.6
|
||||
@@ -8987,46 +9054,46 @@ snapshots:
|
||||
|
||||
'@napi-rs/wasm-runtime@0.2.12':
|
||||
dependencies:
|
||||
'@emnapi/core': 1.5.0
|
||||
'@emnapi/runtime': 1.5.0
|
||||
'@tybys/wasm-util': 0.10.0
|
||||
'@emnapi/core': 1.7.1
|
||||
'@emnapi/runtime': 1.7.1
|
||||
'@tybys/wasm-util': 0.10.1
|
||||
optional: true
|
||||
|
||||
'@neoconfetti/react@1.0.0': {}
|
||||
|
||||
'@next/env@15.4.7': {}
|
||||
'@next/env@15.4.10': {}
|
||||
|
||||
'@next/eslint-plugin-next@15.5.2':
|
||||
'@next/eslint-plugin-next@15.5.7':
|
||||
dependencies:
|
||||
fast-glob: 3.3.1
|
||||
|
||||
'@next/swc-darwin-arm64@15.4.7':
|
||||
'@next/swc-darwin-arm64@15.4.8':
|
||||
optional: true
|
||||
|
||||
'@next/swc-darwin-x64@15.4.7':
|
||||
'@next/swc-darwin-x64@15.4.8':
|
||||
optional: true
|
||||
|
||||
'@next/swc-linux-arm64-gnu@15.4.7':
|
||||
'@next/swc-linux-arm64-gnu@15.4.8':
|
||||
optional: true
|
||||
|
||||
'@next/swc-linux-arm64-musl@15.4.7':
|
||||
'@next/swc-linux-arm64-musl@15.4.8':
|
||||
optional: true
|
||||
|
||||
'@next/swc-linux-x64-gnu@15.4.7':
|
||||
'@next/swc-linux-x64-gnu@15.4.8':
|
||||
optional: true
|
||||
|
||||
'@next/swc-linux-x64-musl@15.4.7':
|
||||
'@next/swc-linux-x64-musl@15.4.8':
|
||||
optional: true
|
||||
|
||||
'@next/swc-win32-arm64-msvc@15.4.7':
|
||||
'@next/swc-win32-arm64-msvc@15.4.8':
|
||||
optional: true
|
||||
|
||||
'@next/swc-win32-x64-msvc@15.4.7':
|
||||
'@next/swc-win32-x64-msvc@15.4.8':
|
||||
optional: true
|
||||
|
||||
'@next/third-parties@15.4.6(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)':
|
||||
'@next/third-parties@15.4.6(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)':
|
||||
dependencies:
|
||||
next: 15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
react: 18.3.1
|
||||
third-party-capital: 1.0.20
|
||||
|
||||
@@ -10106,7 +10173,7 @@ snapshots:
|
||||
|
||||
'@rtsao/scc@1.1.0': {}
|
||||
|
||||
'@rushstack/eslint-patch@1.12.0': {}
|
||||
'@rushstack/eslint-patch@1.15.0': {}
|
||||
|
||||
'@scarf/scarf@1.4.0': {}
|
||||
|
||||
@@ -10258,7 +10325,7 @@ snapshots:
|
||||
|
||||
'@sentry/core@10.27.0': {}
|
||||
|
||||
'@sentry/nextjs@10.27.0(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.101.3(esbuild@0.25.9))':
|
||||
'@sentry/nextjs@10.27.0(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.101.3(esbuild@0.25.9))':
|
||||
dependencies:
|
||||
'@opentelemetry/api': 1.9.0
|
||||
'@opentelemetry/semantic-conventions': 1.37.0
|
||||
@@ -10271,7 +10338,7 @@ snapshots:
|
||||
'@sentry/react': 10.27.0(react@18.3.1)
|
||||
'@sentry/vercel-edge': 10.27.0
|
||||
'@sentry/webpack-plugin': 4.3.0(webpack@5.101.3(esbuild@0.25.9))
|
||||
next: 15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
resolve: 1.22.8
|
||||
rollup: 4.52.2
|
||||
stacktrace-parser: 0.1.11
|
||||
@@ -10633,7 +10700,7 @@ snapshots:
|
||||
react: 18.3.1
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
|
||||
'@storybook/nextjs@9.1.5(esbuild@0.25.9)(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(type-fest@4.41.0)(typescript@5.9.3)(webpack-hot-middleware@2.26.1)(webpack@5.101.3(esbuild@0.25.9))':
|
||||
'@storybook/nextjs@9.1.5(esbuild@0.25.9)(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(type-fest@4.41.0)(typescript@5.9.3)(webpack-hot-middleware@2.26.1)(webpack@5.101.3(esbuild@0.25.9))':
|
||||
dependencies:
|
||||
'@babel/core': 7.28.4
|
||||
'@babel/plugin-syntax-bigint': 7.8.3(@babel/core@7.28.4)
|
||||
@@ -10657,7 +10724,7 @@ snapshots:
|
||||
css-loader: 6.11.0(webpack@5.101.3(esbuild@0.25.9))
|
||||
image-size: 2.0.2
|
||||
loader-utils: 3.3.1
|
||||
next: 15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
node-polyfill-webpack-plugin: 2.0.1(webpack@5.101.3(esbuild@0.25.9))
|
||||
postcss: 8.5.6
|
||||
postcss-loader: 8.2.0(postcss@8.5.6)(typescript@5.9.3)(webpack@5.101.3(esbuild@0.25.9))
|
||||
@@ -10858,7 +10925,7 @@ snapshots:
|
||||
dependencies:
|
||||
'@testing-library/dom': 10.4.1
|
||||
|
||||
'@tybys/wasm-util@0.10.0':
|
||||
'@tybys/wasm-util@0.10.1':
|
||||
dependencies:
|
||||
tslib: 2.8.1
|
||||
optional: true
|
||||
@@ -11056,14 +11123,14 @@ snapshots:
|
||||
dependencies:
|
||||
'@types/node': 24.10.0
|
||||
|
||||
'@typescript-eslint/eslint-plugin@8.43.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1)(typescript@5.9.3)':
|
||||
'@typescript-eslint/eslint-plugin@8.48.1(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1)(typescript@5.9.3)':
|
||||
dependencies:
|
||||
'@eslint-community/regexpp': 4.12.1
|
||||
'@typescript-eslint/parser': 8.43.0(eslint@8.57.1)(typescript@5.9.3)
|
||||
'@typescript-eslint/scope-manager': 8.43.0
|
||||
'@typescript-eslint/type-utils': 8.43.0(eslint@8.57.1)(typescript@5.9.3)
|
||||
'@typescript-eslint/utils': 8.43.0(eslint@8.57.1)(typescript@5.9.3)
|
||||
'@typescript-eslint/visitor-keys': 8.43.0
|
||||
'@eslint-community/regexpp': 4.12.2
|
||||
'@typescript-eslint/parser': 8.48.1(eslint@8.57.1)(typescript@5.9.3)
|
||||
'@typescript-eslint/scope-manager': 8.48.1
|
||||
'@typescript-eslint/type-utils': 8.48.1(eslint@8.57.1)(typescript@5.9.3)
|
||||
'@typescript-eslint/utils': 8.48.1(eslint@8.57.1)(typescript@5.9.3)
|
||||
'@typescript-eslint/visitor-keys': 8.48.1
|
||||
eslint: 8.57.1
|
||||
graphemer: 1.4.0
|
||||
ignore: 7.0.5
|
||||
@@ -11073,12 +11140,12 @@ snapshots:
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
'@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3)':
|
||||
'@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3)':
|
||||
dependencies:
|
||||
'@typescript-eslint/scope-manager': 8.43.0
|
||||
'@typescript-eslint/types': 8.43.0
|
||||
'@typescript-eslint/typescript-estree': 8.43.0(typescript@5.9.3)
|
||||
'@typescript-eslint/visitor-keys': 8.43.0
|
||||
'@typescript-eslint/scope-manager': 8.48.1
|
||||
'@typescript-eslint/types': 8.48.1
|
||||
'@typescript-eslint/typescript-estree': 8.48.1(typescript@5.9.3)
|
||||
'@typescript-eslint/visitor-keys': 8.48.1
|
||||
debug: 4.4.3
|
||||
eslint: 8.57.1
|
||||
typescript: 5.9.3
|
||||
@@ -11088,7 +11155,7 @@ snapshots:
|
||||
'@typescript-eslint/project-service@8.43.0(typescript@5.9.3)':
|
||||
dependencies:
|
||||
'@typescript-eslint/tsconfig-utils': 8.43.0(typescript@5.9.3)
|
||||
'@typescript-eslint/types': 8.43.0
|
||||
'@typescript-eslint/types': 8.48.1
|
||||
debug: 4.4.3
|
||||
typescript: 5.9.3
|
||||
transitivePeerDependencies:
|
||||
@@ -11097,7 +11164,16 @@ snapshots:
|
||||
'@typescript-eslint/project-service@8.46.2(typescript@5.9.3)':
|
||||
dependencies:
|
||||
'@typescript-eslint/tsconfig-utils': 8.46.2(typescript@5.9.3)
|
||||
'@typescript-eslint/types': 8.46.2
|
||||
'@typescript-eslint/types': 8.48.1
|
||||
debug: 4.4.3
|
||||
typescript: 5.9.3
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
'@typescript-eslint/project-service@8.48.1(typescript@5.9.3)':
|
||||
dependencies:
|
||||
'@typescript-eslint/tsconfig-utils': 8.48.1(typescript@5.9.3)
|
||||
'@typescript-eslint/types': 8.48.1
|
||||
debug: 4.4.3
|
||||
typescript: 5.9.3
|
||||
transitivePeerDependencies:
|
||||
@@ -11113,6 +11189,11 @@ snapshots:
|
||||
'@typescript-eslint/types': 8.46.2
|
||||
'@typescript-eslint/visitor-keys': 8.46.2
|
||||
|
||||
'@typescript-eslint/scope-manager@8.48.1':
|
||||
dependencies:
|
||||
'@typescript-eslint/types': 8.48.1
|
||||
'@typescript-eslint/visitor-keys': 8.48.1
|
||||
|
||||
'@typescript-eslint/tsconfig-utils@8.43.0(typescript@5.9.3)':
|
||||
dependencies:
|
||||
typescript: 5.9.3
|
||||
@@ -11121,11 +11202,15 @@ snapshots:
|
||||
dependencies:
|
||||
typescript: 5.9.3
|
||||
|
||||
'@typescript-eslint/type-utils@8.43.0(eslint@8.57.1)(typescript@5.9.3)':
|
||||
'@typescript-eslint/tsconfig-utils@8.48.1(typescript@5.9.3)':
|
||||
dependencies:
|
||||
'@typescript-eslint/types': 8.43.0
|
||||
'@typescript-eslint/typescript-estree': 8.43.0(typescript@5.9.3)
|
||||
'@typescript-eslint/utils': 8.43.0(eslint@8.57.1)(typescript@5.9.3)
|
||||
typescript: 5.9.3
|
||||
|
||||
'@typescript-eslint/type-utils@8.48.1(eslint@8.57.1)(typescript@5.9.3)':
|
||||
dependencies:
|
||||
'@typescript-eslint/types': 8.48.1
|
||||
'@typescript-eslint/typescript-estree': 8.48.1(typescript@5.9.3)
|
||||
'@typescript-eslint/utils': 8.48.1(eslint@8.57.1)(typescript@5.9.3)
|
||||
debug: 4.4.3
|
||||
eslint: 8.57.1
|
||||
ts-api-utils: 2.1.0(typescript@5.9.3)
|
||||
@@ -11137,6 +11222,8 @@ snapshots:
|
||||
|
||||
'@typescript-eslint/types@8.46.2': {}
|
||||
|
||||
'@typescript-eslint/types@8.48.1': {}
|
||||
|
||||
'@typescript-eslint/typescript-estree@8.43.0(typescript@5.9.3)':
|
||||
dependencies:
|
||||
'@typescript-eslint/project-service': 8.43.0(typescript@5.9.3)
|
||||
@@ -11147,7 +11234,7 @@ snapshots:
|
||||
fast-glob: 3.3.3
|
||||
is-glob: 4.0.3
|
||||
minimatch: 9.0.5
|
||||
semver: 7.7.2
|
||||
semver: 7.7.3
|
||||
ts-api-utils: 2.1.0(typescript@5.9.3)
|
||||
typescript: 5.9.3
|
||||
transitivePeerDependencies:
|
||||
@@ -11169,6 +11256,21 @@ snapshots:
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
'@typescript-eslint/typescript-estree@8.48.1(typescript@5.9.3)':
|
||||
dependencies:
|
||||
'@typescript-eslint/project-service': 8.48.1(typescript@5.9.3)
|
||||
'@typescript-eslint/tsconfig-utils': 8.48.1(typescript@5.9.3)
|
||||
'@typescript-eslint/types': 8.48.1
|
||||
'@typescript-eslint/visitor-keys': 8.48.1
|
||||
debug: 4.4.3
|
||||
minimatch: 9.0.5
|
||||
semver: 7.7.3
|
||||
tinyglobby: 0.2.15
|
||||
ts-api-utils: 2.1.0(typescript@5.9.3)
|
||||
typescript: 5.9.3
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
'@typescript-eslint/utils@8.43.0(eslint@8.57.1)(typescript@5.9.3)':
|
||||
dependencies:
|
||||
'@eslint-community/eslint-utils': 4.9.0(eslint@8.57.1)
|
||||
@@ -11191,6 +11293,17 @@ snapshots:
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
'@typescript-eslint/utils@8.48.1(eslint@8.57.1)(typescript@5.9.3)':
|
||||
dependencies:
|
||||
'@eslint-community/eslint-utils': 4.9.0(eslint@8.57.1)
|
||||
'@typescript-eslint/scope-manager': 8.48.1
|
||||
'@typescript-eslint/types': 8.48.1
|
||||
'@typescript-eslint/typescript-estree': 8.48.1(typescript@5.9.3)
|
||||
eslint: 8.57.1
|
||||
typescript: 5.9.3
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
'@typescript-eslint/visitor-keys@8.43.0':
|
||||
dependencies:
|
||||
'@typescript-eslint/types': 8.43.0
|
||||
@@ -11201,6 +11314,11 @@ snapshots:
|
||||
'@typescript-eslint/types': 8.46.2
|
||||
eslint-visitor-keys: 4.2.1
|
||||
|
||||
'@typescript-eslint/visitor-keys@8.48.1':
|
||||
dependencies:
|
||||
'@typescript-eslint/types': 8.48.1
|
||||
eslint-visitor-keys: 4.2.1
|
||||
|
||||
'@ungap/structured-clone@1.3.0': {}
|
||||
|
||||
'@unrs/resolver-binding-android-arm-eabi@1.11.1':
|
||||
@@ -11262,14 +11380,14 @@ snapshots:
|
||||
'@unrs/resolver-binding-win32-x64-msvc@1.11.1':
|
||||
optional: true
|
||||
|
||||
'@vercel/analytics@1.5.0(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)':
|
||||
'@vercel/analytics@1.5.0(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)':
|
||||
optionalDependencies:
|
||||
next: 15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
react: 18.3.1
|
||||
|
||||
'@vercel/speed-insights@1.2.0(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)':
|
||||
'@vercel/speed-insights@1.2.0(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)':
|
||||
optionalDependencies:
|
||||
next: 15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
react: 18.3.1
|
||||
|
||||
'@vitest/expect@3.2.4':
|
||||
@@ -12523,16 +12641,16 @@ snapshots:
|
||||
|
||||
escape-string-regexp@5.0.0: {}
|
||||
|
||||
eslint-config-next@15.5.2(eslint@8.57.1)(typescript@5.9.3):
|
||||
eslint-config-next@15.5.7(eslint@8.57.1)(typescript@5.9.3):
|
||||
dependencies:
|
||||
'@next/eslint-plugin-next': 15.5.2
|
||||
'@rushstack/eslint-patch': 1.12.0
|
||||
'@typescript-eslint/eslint-plugin': 8.43.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1)(typescript@5.9.3)
|
||||
'@typescript-eslint/parser': 8.43.0(eslint@8.57.1)(typescript@5.9.3)
|
||||
'@next/eslint-plugin-next': 15.5.7
|
||||
'@rushstack/eslint-patch': 1.15.0
|
||||
'@typescript-eslint/eslint-plugin': 8.48.1(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1)(typescript@5.9.3)
|
||||
'@typescript-eslint/parser': 8.48.1(eslint@8.57.1)(typescript@5.9.3)
|
||||
eslint: 8.57.1
|
||||
eslint-import-resolver-node: 0.3.9
|
||||
eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1))(eslint@8.57.1)
|
||||
eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1)
|
||||
eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0)(eslint@8.57.1)
|
||||
eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1)
|
||||
eslint-plugin-jsx-a11y: 6.10.2(eslint@8.57.1)
|
||||
eslint-plugin-react: 7.37.5(eslint@8.57.1)
|
||||
eslint-plugin-react-hooks: 5.2.0(eslint@8.57.1)
|
||||
@@ -12547,37 +12665,37 @@ snapshots:
|
||||
dependencies:
|
||||
debug: 3.2.7
|
||||
is-core-module: 2.16.1
|
||||
resolve: 1.22.10
|
||||
resolve: 1.22.11
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1))(eslint@8.57.1):
|
||||
eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0)(eslint@8.57.1):
|
||||
dependencies:
|
||||
'@nolyfill/is-core-module': 1.0.39
|
||||
debug: 4.4.3
|
||||
eslint: 8.57.1
|
||||
get-tsconfig: 4.10.1
|
||||
get-tsconfig: 4.13.0
|
||||
is-bun-module: 2.0.0
|
||||
stable-hash: 0.0.5
|
||||
tinyglobby: 0.2.15
|
||||
unrs-resolver: 1.11.1
|
||||
optionalDependencies:
|
||||
eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1)
|
||||
eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1)
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
eslint-module-utils@2.12.1(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1):
|
||||
eslint-module-utils@2.12.1(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1):
|
||||
dependencies:
|
||||
debug: 3.2.7
|
||||
optionalDependencies:
|
||||
'@typescript-eslint/parser': 8.43.0(eslint@8.57.1)(typescript@5.9.3)
|
||||
'@typescript-eslint/parser': 8.48.1(eslint@8.57.1)(typescript@5.9.3)
|
||||
eslint: 8.57.1
|
||||
eslint-import-resolver-node: 0.3.9
|
||||
eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1))(eslint@8.57.1)
|
||||
eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0)(eslint@8.57.1)
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1):
|
||||
eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1):
|
||||
dependencies:
|
||||
'@rtsao/scc': 1.1.0
|
||||
array-includes: 3.1.9
|
||||
@@ -12588,7 +12706,7 @@ snapshots:
|
||||
doctrine: 2.1.0
|
||||
eslint: 8.57.1
|
||||
eslint-import-resolver-node: 0.3.9
|
||||
eslint-module-utils: 2.12.1(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1)
|
||||
eslint-module-utils: 2.12.1(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1)
|
||||
hasown: 2.0.2
|
||||
is-core-module: 2.16.1
|
||||
is-glob: 4.0.3
|
||||
@@ -12600,7 +12718,7 @@ snapshots:
|
||||
string.prototype.trimend: 1.0.9
|
||||
tsconfig-paths: 3.15.0
|
||||
optionalDependencies:
|
||||
'@typescript-eslint/parser': 8.43.0(eslint@8.57.1)(typescript@5.9.3)
|
||||
'@typescript-eslint/parser': 8.48.1(eslint@8.57.1)(typescript@5.9.3)
|
||||
transitivePeerDependencies:
|
||||
- eslint-import-resolver-typescript
|
||||
- eslint-import-resolver-webpack
|
||||
@@ -12864,6 +12982,12 @@ snapshots:
|
||||
keyv: 4.5.4
|
||||
rimraf: 3.0.2
|
||||
|
||||
flatbush@4.5.0:
|
||||
dependencies:
|
||||
flatqueue: 3.0.0
|
||||
|
||||
flatqueue@3.0.0: {}
|
||||
|
||||
flatted@3.3.3: {}
|
||||
|
||||
for-each@0.3.5:
|
||||
@@ -12939,9 +13063,11 @@ snapshots:
|
||||
|
||||
functions-have-names@1.2.3: {}
|
||||
|
||||
geist@1.5.1(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)):
|
||||
geist@1.5.1(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)):
|
||||
dependencies:
|
||||
next: 15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
|
||||
generator-function@2.0.1: {}
|
||||
|
||||
gensync@1.0.0-beta.2: {}
|
||||
|
||||
@@ -12975,7 +13101,7 @@ snapshots:
|
||||
es-errors: 1.3.0
|
||||
get-intrinsic: 1.3.0
|
||||
|
||||
get-tsconfig@4.10.1:
|
||||
get-tsconfig@4.13.0:
|
||||
dependencies:
|
||||
resolve-pkg-maps: 1.0.0
|
||||
|
||||
@@ -13259,13 +13385,6 @@ snapshots:
|
||||
parent-module: 1.0.1
|
||||
resolve-from: 4.0.0
|
||||
|
||||
import-in-the-middle@1.14.2:
|
||||
dependencies:
|
||||
acorn: 8.15.0
|
||||
acorn-import-attributes: 1.9.5(acorn@8.15.0)
|
||||
cjs-module-lexer: 1.4.3
|
||||
module-details-from-path: 1.0.4
|
||||
|
||||
import-in-the-middle@2.0.0:
|
||||
dependencies:
|
||||
acorn: 8.15.0
|
||||
@@ -13342,7 +13461,7 @@ snapshots:
|
||||
|
||||
is-bun-module@2.0.0:
|
||||
dependencies:
|
||||
semver: 7.7.2
|
||||
semver: 7.7.3
|
||||
|
||||
is-callable@1.2.7: {}
|
||||
|
||||
@@ -13380,6 +13499,14 @@ snapshots:
|
||||
has-tostringtag: 1.0.2
|
||||
safe-regex-test: 1.1.0
|
||||
|
||||
is-generator-function@1.1.2:
|
||||
dependencies:
|
||||
call-bound: 1.0.4
|
||||
generator-function: 2.0.1
|
||||
get-proto: 1.0.1
|
||||
has-tostringtag: 1.0.2
|
||||
safe-regex-test: 1.1.0
|
||||
|
||||
is-glob@4.0.3:
|
||||
dependencies:
|
||||
is-extglob: 2.1.1
|
||||
@@ -14200,7 +14327,7 @@ snapshots:
|
||||
|
||||
nanoid@3.3.11: {}
|
||||
|
||||
napi-postinstall@0.3.3: {}
|
||||
napi-postinstall@0.3.4: {}
|
||||
|
||||
natural-compare@1.4.0: {}
|
||||
|
||||
@@ -14211,9 +14338,9 @@ snapshots:
|
||||
react: 18.3.1
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
|
||||
next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
|
||||
next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
|
||||
dependencies:
|
||||
'@next/env': 15.4.7
|
||||
'@next/env': 15.4.10
|
||||
'@swc/helpers': 0.5.15
|
||||
caniuse-lite: 1.0.30001741
|
||||
postcss: 8.4.31
|
||||
@@ -14221,14 +14348,14 @@ snapshots:
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
styled-jsx: 5.1.6(@babel/core@7.28.4)(react@18.3.1)
|
||||
optionalDependencies:
|
||||
'@next/swc-darwin-arm64': 15.4.7
|
||||
'@next/swc-darwin-x64': 15.4.7
|
||||
'@next/swc-linux-arm64-gnu': 15.4.7
|
||||
'@next/swc-linux-arm64-musl': 15.4.7
|
||||
'@next/swc-linux-x64-gnu': 15.4.7
|
||||
'@next/swc-linux-x64-musl': 15.4.7
|
||||
'@next/swc-win32-arm64-msvc': 15.4.7
|
||||
'@next/swc-win32-x64-msvc': 15.4.7
|
||||
'@next/swc-darwin-arm64': 15.4.8
|
||||
'@next/swc-darwin-x64': 15.4.8
|
||||
'@next/swc-linux-arm64-gnu': 15.4.8
|
||||
'@next/swc-linux-arm64-musl': 15.4.8
|
||||
'@next/swc-linux-x64-gnu': 15.4.8
|
||||
'@next/swc-linux-x64-musl': 15.4.8
|
||||
'@next/swc-win32-arm64-msvc': 15.4.8
|
||||
'@next/swc-win32-x64-msvc': 15.4.8
|
||||
'@opentelemetry/api': 1.9.0
|
||||
'@playwright/test': 1.56.1
|
||||
sharp: 0.34.3
|
||||
@@ -14306,12 +14433,12 @@ snapshots:
|
||||
dependencies:
|
||||
boolbase: 1.0.0
|
||||
|
||||
nuqs@2.7.2(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1):
|
||||
nuqs@2.7.2(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1):
|
||||
dependencies:
|
||||
'@standard-schema/spec': 1.0.0
|
||||
react: 18.3.1
|
||||
optionalDependencies:
|
||||
next: 15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
|
||||
oas-kit-common@1.0.8:
|
||||
dependencies:
|
||||
@@ -15170,6 +15297,12 @@ snapshots:
|
||||
path-parse: 1.0.7
|
||||
supports-preserve-symlinks-flag: 1.0.0
|
||||
|
||||
resolve@1.22.11:
|
||||
dependencies:
|
||||
is-core-module: 2.16.1
|
||||
path-parse: 1.0.7
|
||||
supports-preserve-symlinks-flag: 1.0.0
|
||||
|
||||
resolve@1.22.8:
|
||||
dependencies:
|
||||
is-core-module: 2.16.1
|
||||
@@ -15325,7 +15458,7 @@ snapshots:
|
||||
dependencies:
|
||||
color: 4.2.3
|
||||
detect-libc: 2.0.4
|
||||
semver: 7.7.2
|
||||
semver: 7.7.3
|
||||
optionalDependencies:
|
||||
'@img/sharp-darwin-arm64': 0.34.3
|
||||
'@img/sharp-darwin-x64': 0.34.3
|
||||
@@ -15981,7 +16114,7 @@ snapshots:
|
||||
|
||||
unrs-resolver@1.11.1:
|
||||
dependencies:
|
||||
napi-postinstall: 0.3.3
|
||||
napi-postinstall: 0.3.4
|
||||
optionalDependencies:
|
||||
'@unrs/resolver-binding-android-arm-eabi': 1.11.1
|
||||
'@unrs/resolver-binding-android-arm64': 1.11.1
|
||||
@@ -16209,7 +16342,7 @@ snapshots:
|
||||
is-async-function: 2.1.1
|
||||
is-date-object: 1.1.0
|
||||
is-finalizationregistry: 1.1.1
|
||||
is-generator-function: 1.1.0
|
||||
is-generator-function: 1.1.2
|
||||
is-regex: 1.2.1
|
||||
is-weakref: 1.1.1
|
||||
isarray: 2.0.5
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
"use client";
|
||||
import { StoreAgentDetails } from "@/lib/autogpt-server-api";
|
||||
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
|
||||
import { isEmptyOrWhitespace } from "@/lib/utils";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { useEffect, useState } from "react";
|
||||
@@ -13,15 +11,17 @@ import {
|
||||
OnboardingStep,
|
||||
} from "../components/OnboardingStep";
|
||||
import { OnboardingText } from "../components/OnboardingText";
|
||||
import { getV1RecommendedOnboardingAgents } from "@/app/api/__generated__/endpoints/onboarding/onboarding";
|
||||
import { resolveResponse } from "@/app/api/helpers";
|
||||
import { StoreAgentDetails } from "@/app/api/__generated__/models/storeAgentDetails";
|
||||
|
||||
export default function Page() {
|
||||
const { state, updateState, completeStep } = useOnboarding(4, "INTEGRATIONS");
|
||||
const [agents, setAgents] = useState<StoreAgentDetails[]>([]);
|
||||
const api = useBackendAPI();
|
||||
const router = useRouter();
|
||||
|
||||
useEffect(() => {
|
||||
api.getOnboardingAgents().then((agents) => {
|
||||
resolveResponse(getV1RecommendedOnboardingAgents()).then((agents) => {
|
||||
if (agents.length < 2) {
|
||||
completeStep("CONGRATS");
|
||||
router.replace("/");
|
||||
|
||||
@@ -8,7 +8,6 @@ import {
|
||||
CardTitle,
|
||||
} from "@/components/__legacy__/ui/card";
|
||||
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
|
||||
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
|
||||
import { CircleNotchIcon } from "@phosphor-icons/react/dist/ssr";
|
||||
import { Play } from "lucide-react";
|
||||
import OnboardingButton from "../components/OnboardingButton";
|
||||
@@ -79,20 +78,13 @@ export default function Page() {
|
||||
<CardContent className="flex flex-col gap-4">
|
||||
{Object.entries(agent?.input_schema.properties || {}).map(
|
||||
([key, inputSubSchema]) => (
|
||||
<div key={key} className="flex flex-col space-y-2">
|
||||
<label className="flex items-center gap-1 text-sm font-medium">
|
||||
{inputSubSchema.title || key}
|
||||
<InformationTooltip
|
||||
description={inputSubSchema.description}
|
||||
/>
|
||||
</label>
|
||||
<RunAgentInputs
|
||||
schema={inputSubSchema}
|
||||
value={onboarding.state?.agentInput?.[key]}
|
||||
placeholder={inputSubSchema.description}
|
||||
onChange={(value) => handleSetAgentInput(key, value)}
|
||||
/>
|
||||
</div>
|
||||
<RunAgentInputs
|
||||
key={key}
|
||||
schema={inputSubSchema}
|
||||
value={onboarding.state?.agentInput?.[key]}
|
||||
placeholder={inputSubSchema.description}
|
||||
onChange={(value) => handleSetAgentInput(key, value)}
|
||||
/>
|
||||
),
|
||||
)}
|
||||
<AgentOnboardingCredentials
|
||||
|
||||
@@ -12,6 +12,9 @@ import {
|
||||
useGetV2GetAgentByVersion,
|
||||
useGetV2GetAgentGraph,
|
||||
} from "@/app/api/__generated__/endpoints/store/store";
|
||||
import { resolveResponse } from "@/app/api/helpers";
|
||||
import { postV2AddMarketplaceAgent } from "@/app/api/__generated__/endpoints/library/library";
|
||||
import { GraphID } from "@/lib/autogpt-server-api";
|
||||
|
||||
export function useOnboardingRunStep() {
|
||||
const onboarding = useOnboarding(undefined, "AGENT_CHOICE");
|
||||
@@ -77,12 +80,7 @@ export function useOnboardingRunStep() {
|
||||
|
||||
setShowInput(true);
|
||||
onboarding.setStep(6);
|
||||
onboarding.updateState({
|
||||
completedSteps: [
|
||||
...(onboarding.state.completedSteps || []),
|
||||
"AGENT_NEW_RUN",
|
||||
],
|
||||
});
|
||||
onboarding.completeStep("AGENT_NEW_RUN");
|
||||
}
|
||||
|
||||
function handleSetAgentInput(key: string, value: string) {
|
||||
@@ -111,21 +109,22 @@ export function useOnboardingRunStep() {
|
||||
setRunningAgent(true);
|
||||
|
||||
try {
|
||||
const libraryAgent = await api.addMarketplaceAgentToLibrary(
|
||||
storeAgent?.store_listing_version_id || "",
|
||||
const libraryAgent = await resolveResponse(
|
||||
postV2AddMarketplaceAgent({
|
||||
store_listing_version_id: storeAgent?.store_listing_version_id || "",
|
||||
source: "onboarding",
|
||||
}),
|
||||
);
|
||||
|
||||
const { id: runID } = await api.executeGraph(
|
||||
libraryAgent.graph_id,
|
||||
libraryAgent.graph_id as GraphID,
|
||||
libraryAgent.graph_version,
|
||||
onboarding.state.agentInput || {},
|
||||
inputCredentials,
|
||||
"onboarding",
|
||||
);
|
||||
|
||||
onboarding.updateState({
|
||||
onboardingAgentExecutionId: runID,
|
||||
agentRuns: (onboarding.state.agentRuns || 0) + 1,
|
||||
});
|
||||
onboarding.updateState({ onboardingAgentExecutionId: runID });
|
||||
|
||||
router.push("/onboarding/6-congrats");
|
||||
} catch (error) {
|
||||
|
||||
@@ -5,6 +5,9 @@ import { useRouter } from "next/navigation";
|
||||
import * as party from "party-js";
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
import { useOnboarding } from "../../../../providers/onboarding/onboarding-provider";
|
||||
import { resolveResponse } from "@/app/api/helpers";
|
||||
import { getV1OnboardingState } from "@/app/api/__generated__/endpoints/onboarding/onboarding";
|
||||
import { postV2AddMarketplaceAgent } from "@/app/api/__generated__/endpoints/library/library";
|
||||
|
||||
export default function Page() {
|
||||
const { completeStep } = useOnboarding(7, "AGENT_INPUT");
|
||||
@@ -37,11 +40,15 @@ export default function Page() {
|
||||
completeStep("CONGRATS");
|
||||
|
||||
try {
|
||||
const onboarding = await api.getUserOnboarding();
|
||||
const onboarding = await resolveResponse(getV1OnboardingState());
|
||||
if (onboarding?.selectedStoreListingVersionId) {
|
||||
try {
|
||||
const libraryAgent = await api.addMarketplaceAgentToLibrary(
|
||||
onboarding.selectedStoreListingVersionId,
|
||||
const libraryAgent = await resolveResponse(
|
||||
postV2AddMarketplaceAgent({
|
||||
store_listing_version_id:
|
||||
onboarding.selectedStoreListingVersionId,
|
||||
source: "onboarding",
|
||||
}),
|
||||
);
|
||||
router.replace(`/library/agents/${libraryAgent.id}`);
|
||||
} catch (error) {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { cn } from "@/lib/utils";
|
||||
import StarRating from "./StarRating";
|
||||
import { StoreAgentDetails } from "@/lib/autogpt-server-api";
|
||||
import SmartImage from "@/components/__legacy__/SmartImage";
|
||||
import { StoreAgentDetails } from "@/app/api/__generated__/models/storeAgentDetails";
|
||||
|
||||
type OnboardingAgentCardProps = {
|
||||
agent?: StoreAgentDetails;
|
||||
|
||||
@@ -1,24 +1,24 @@
|
||||
"use client";
|
||||
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
||||
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { useEffect } from "react";
|
||||
import { resolveResponse, shouldShowOnboarding } from "@/app/api/helpers";
|
||||
import { getV1OnboardingState } from "@/app/api/__generated__/endpoints/onboarding/onboarding";
|
||||
|
||||
export default function OnboardingPage() {
|
||||
const router = useRouter();
|
||||
const api = useBackendAPI();
|
||||
|
||||
useEffect(() => {
|
||||
async function redirectToStep() {
|
||||
try {
|
||||
// Check if onboarding is enabled
|
||||
const isEnabled = await api.isOnboardingEnabled();
|
||||
const isEnabled = await shouldShowOnboarding();
|
||||
if (!isEnabled) {
|
||||
router.replace("/");
|
||||
return;
|
||||
}
|
||||
|
||||
const onboarding = await api.getUserOnboarding();
|
||||
const onboarding = await resolveResponse(getV1OnboardingState());
|
||||
|
||||
// Handle completed onboarding
|
||||
if (onboarding.completedSteps.includes("GET_RESULTS")) {
|
||||
@@ -66,7 +66,7 @@ export default function OnboardingPage() {
|
||||
}
|
||||
|
||||
redirectToStep();
|
||||
}, [api, router]);
|
||||
}, [router]);
|
||||
|
||||
return <LoadingSpinner size="large" cover />;
|
||||
}
|
||||
|
||||
@@ -1,6 +1,16 @@
|
||||
"use client";
|
||||
|
||||
import { useState, useEffect } from "react";
|
||||
import {
|
||||
LineChart,
|
||||
Line,
|
||||
XAxis,
|
||||
YAxis,
|
||||
CartesianGrid,
|
||||
Tooltip,
|
||||
Legend,
|
||||
ResponsiveContainer,
|
||||
} from "recharts";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Input } from "@/components/__legacy__/ui/input";
|
||||
import { Label } from "@/components/__legacy__/ui/label";
|
||||
@@ -18,9 +28,12 @@ import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import {
|
||||
usePostV2GenerateExecutionAnalytics,
|
||||
useGetV2GetExecutionAnalyticsConfiguration,
|
||||
useGetV2GetExecutionAccuracyTrendsAndAlerts,
|
||||
} from "@/app/api/__generated__/endpoints/admin/admin";
|
||||
import type { ExecutionAnalyticsRequest } from "@/app/api/__generated__/models/executionAnalyticsRequest";
|
||||
import type { ExecutionAnalyticsResponse } from "@/app/api/__generated__/models/executionAnalyticsResponse";
|
||||
import type { AccuracyTrendsResponse } from "@/app/api/__generated__/models/accuracyTrendsResponse";
|
||||
import type { AccuracyLatestData } from "@/app/api/__generated__/models/accuracyLatestData";
|
||||
|
||||
// Use the generated type with minimal adjustment for form handling
|
||||
interface FormData extends Omit<ExecutionAnalyticsRequest, "created_after"> {
|
||||
@@ -33,8 +46,133 @@ export function ExecutionAnalyticsForm() {
|
||||
const [results, setResults] = useState<ExecutionAnalyticsResponse | null>(
|
||||
null,
|
||||
);
|
||||
const [trendsData, setTrendsData] = useState<AccuracyTrendsResponse | null>(
|
||||
null,
|
||||
);
|
||||
const { toast } = useToast();
|
||||
|
||||
// State for accuracy trends query parameters
|
||||
const [accuracyParams, setAccuracyParams] = useState<{
|
||||
graph_id: string;
|
||||
user_id?: string;
|
||||
days_back: number;
|
||||
drop_threshold: number;
|
||||
include_historical?: boolean;
|
||||
} | null>(null);
|
||||
|
||||
// Use the generated API client for accuracy trends (GET)
|
||||
const { data: accuracyApiResponse, error: accuracyError } =
|
||||
useGetV2GetExecutionAccuracyTrendsAndAlerts(
|
||||
accuracyParams || {
|
||||
graph_id: "",
|
||||
days_back: 30,
|
||||
drop_threshold: 10.0,
|
||||
include_historical: false,
|
||||
},
|
||||
{
|
||||
query: {
|
||||
enabled: !!accuracyParams?.graph_id,
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
// Update local state when data changes and handle success/error
|
||||
useEffect(() => {
|
||||
if (accuracyError) {
|
||||
console.error("Failed to fetch trends:", accuracyError);
|
||||
toast({
|
||||
title: "Trends Error",
|
||||
description:
|
||||
(accuracyError as any)?.message || "Failed to fetch accuracy trends",
|
||||
variant: "destructive",
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const data = accuracyApiResponse?.data;
|
||||
if (data && "latest_data" in data) {
|
||||
setTrendsData(data);
|
||||
|
||||
// Check for alerts
|
||||
if (data.alert) {
|
||||
toast({
|
||||
title: "🚨 Accuracy Alert Detected",
|
||||
description: `${data.alert.drop_percent.toFixed(1)}% accuracy drop detected for this agent`,
|
||||
variant: "destructive",
|
||||
});
|
||||
}
|
||||
}
|
||||
}, [accuracyApiResponse, accuracyError, toast]);
|
||||
|
||||
// Chart component for accuracy trends
|
||||
function AccuracyChart({ data }: { data: AccuracyLatestData[] }) {
|
||||
const chartData = data.map((item) => ({
|
||||
date: new Date(item.date).toLocaleDateString(),
|
||||
"Daily Score": item.daily_score,
|
||||
"3-Day Avg": item.three_day_avg,
|
||||
"7-Day Avg": item.seven_day_avg,
|
||||
"14-Day Avg": item.fourteen_day_avg,
|
||||
}));
|
||||
|
||||
return (
|
||||
<ResponsiveContainer width="100%" height={400}>
|
||||
<LineChart
|
||||
data={chartData}
|
||||
margin={{ top: 5, right: 30, left: 20, bottom: 5 }}
|
||||
>
|
||||
<CartesianGrid strokeDasharray="3 3" />
|
||||
<XAxis dataKey="date" />
|
||||
<YAxis domain={[0, 100]} />
|
||||
<Tooltip
|
||||
formatter={(value) => [`${Number(value).toFixed(2)}%`, ""]}
|
||||
/>
|
||||
<Legend />
|
||||
<Line
|
||||
type="monotone"
|
||||
dataKey="Daily Score"
|
||||
stroke="#3b82f6"
|
||||
strokeWidth={2}
|
||||
dot={{ r: 3 }}
|
||||
/>
|
||||
<Line
|
||||
type="monotone"
|
||||
dataKey="3-Day Avg"
|
||||
stroke="#10b981"
|
||||
strokeWidth={2}
|
||||
dot={{ r: 3 }}
|
||||
/>
|
||||
<Line
|
||||
type="monotone"
|
||||
dataKey="7-Day Avg"
|
||||
stroke="#f59e0b"
|
||||
strokeWidth={2}
|
||||
dot={{ r: 3 }}
|
||||
/>
|
||||
<Line
|
||||
type="monotone"
|
||||
dataKey="14-Day Avg"
|
||||
stroke="#8b5cf6"
|
||||
strokeWidth={2}
|
||||
dot={{ r: 3 }}
|
||||
/>
|
||||
</LineChart>
|
||||
</ResponsiveContainer>
|
||||
);
|
||||
}
|
||||
|
||||
// Function to fetch accuracy trends using generated API client
|
||||
const fetchAccuracyTrends = (graphId: string, userId?: string) => {
|
||||
if (!graphId.trim()) return;
|
||||
|
||||
setAccuracyParams({
|
||||
graph_id: graphId.trim(),
|
||||
user_id: userId?.trim() || undefined,
|
||||
days_back: 30,
|
||||
drop_threshold: 10.0,
|
||||
include_historical: showAccuracyChart, // Include historical data when chart is enabled
|
||||
});
|
||||
};
|
||||
|
||||
// Fetch configuration from API
|
||||
const {
|
||||
data: config,
|
||||
@@ -50,6 +188,7 @@ export function ExecutionAnalyticsForm() {
|
||||
}
|
||||
const result = res.data;
|
||||
setResults(result);
|
||||
|
||||
toast({
|
||||
title: "Analytics Generated",
|
||||
description: `Processed ${result.processed_executions} executions. ${result.successful_analytics} successful, ${result.failed_analytics} failed, ${result.skipped_executions} skipped.`,
|
||||
@@ -58,11 +197,21 @@ export function ExecutionAnalyticsForm() {
|
||||
},
|
||||
onError: (error: any) => {
|
||||
console.error("Analytics generation error:", error);
|
||||
|
||||
const errorMessage =
|
||||
error?.message || error?.detail || "An unexpected error occurred";
|
||||
const isOpenAIError = errorMessage.includes(
|
||||
"OpenAI API key not configured",
|
||||
);
|
||||
|
||||
toast({
|
||||
title: "Analytics Generation Failed",
|
||||
description:
|
||||
error?.message || error?.detail || "An unexpected error occurred",
|
||||
variant: "destructive",
|
||||
title: isOpenAIError
|
||||
? "Analytics Generation Skipped"
|
||||
: "Analytics Generation Failed",
|
||||
description: isOpenAIError
|
||||
? "Analytics generation requires OpenAI configuration, but accuracy trends are still available above."
|
||||
: errorMessage,
|
||||
variant: isOpenAIError ? "default" : "destructive",
|
||||
});
|
||||
},
|
||||
},
|
||||
@@ -77,6 +226,9 @@ export function ExecutionAnalyticsForm() {
|
||||
user_prompt: "", // Will use config default when empty
|
||||
});
|
||||
|
||||
// State for accuracy trends chart toggle
|
||||
const [showAccuracyChart, setShowAccuracyChart] = useState(true);
|
||||
|
||||
// Update form defaults when config loads
|
||||
useEffect(() => {
|
||||
if (config?.data && config.status === 200 && !formData.model_name) {
|
||||
@@ -101,6 +253,11 @@ export function ExecutionAnalyticsForm() {
|
||||
|
||||
setResults(null);
|
||||
|
||||
// Fetch accuracy trends if chart is enabled
|
||||
if (showAccuracyChart) {
|
||||
fetchAccuracyTrends(formData.graph_id, formData.user_id || undefined);
|
||||
}
|
||||
|
||||
// Prepare the request payload
|
||||
const payload: ExecutionAnalyticsRequest = {
|
||||
graph_id: formData.graph_id.trim(),
|
||||
@@ -262,6 +419,18 @@ export function ExecutionAnalyticsForm() {
|
||||
</Label>
|
||||
</div>
|
||||
|
||||
{/* Show Accuracy Chart Checkbox */}
|
||||
<div className="flex items-center space-x-2">
|
||||
<Checkbox
|
||||
id="show_accuracy_chart"
|
||||
checked={showAccuracyChart}
|
||||
onCheckedChange={(checked) => setShowAccuracyChart(!!checked)}
|
||||
/>
|
||||
<Label htmlFor="show_accuracy_chart" className="text-sm">
|
||||
Show accuracy trends chart and historical data visualization
|
||||
</Label>
|
||||
</div>
|
||||
|
||||
{/* Custom System Prompt */}
|
||||
<div className="space-y-2">
|
||||
<Label htmlFor="system_prompt">
|
||||
@@ -370,6 +539,98 @@ export function ExecutionAnalyticsForm() {
|
||||
</div>
|
||||
</form>
|
||||
|
||||
{/* Accuracy Trends Display */}
|
||||
{trendsData && (
|
||||
<div className="space-y-4">
|
||||
<h3 className="text-lg font-semibold">Execution Accuracy Trends</h3>
|
||||
|
||||
{/* Alert Section */}
|
||||
{trendsData.alert && (
|
||||
<div className="rounded-lg border-l-4 border-red-500 bg-red-50 p-4">
|
||||
<div className="flex items-start">
|
||||
<span className="text-2xl">🚨</span>
|
||||
<div className="ml-3 space-y-2">
|
||||
<h4 className="text-lg font-semibold text-red-800">
|
||||
Accuracy Alert Detected
|
||||
</h4>
|
||||
<p className="text-red-700">
|
||||
<strong>
|
||||
{trendsData.alert.drop_percent.toFixed(1)}% accuracy drop
|
||||
</strong>{" "}
|
||||
detected for agent{" "}
|
||||
<code className="rounded bg-red-100 px-1 text-sm">
|
||||
{formData.graph_id}
|
||||
</code>
|
||||
</p>
|
||||
<div className="space-y-1 text-sm text-red-600">
|
||||
<p>
|
||||
• 3-day average:{" "}
|
||||
<strong>
|
||||
{trendsData.alert.three_day_avg.toFixed(2)}%
|
||||
</strong>
|
||||
</p>
|
||||
<p>
|
||||
• 7-day average:{" "}
|
||||
<strong>
|
||||
{trendsData.alert.seven_day_avg.toFixed(2)}%
|
||||
</strong>
|
||||
</p>
|
||||
<p>
|
||||
• Detected at:{" "}
|
||||
<strong>
|
||||
{new Date(
|
||||
trendsData.alert.detected_at,
|
||||
).toLocaleString()}
|
||||
</strong>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Latest Data Summary */}
|
||||
<div className="grid grid-cols-2 gap-4 md:grid-cols-4">
|
||||
<div className="rounded-lg border bg-white p-4 text-center">
|
||||
<div className="text-2xl font-bold text-blue-600">
|
||||
{trendsData.latest_data.daily_score?.toFixed(2) || "N/A"}
|
||||
</div>
|
||||
<div className="text-sm text-gray-600">Daily Score</div>
|
||||
</div>
|
||||
<div className="rounded-lg border bg-white p-4 text-center">
|
||||
<div className="text-2xl font-bold text-green-600">
|
||||
{trendsData.latest_data.three_day_avg?.toFixed(2) || "N/A"}
|
||||
</div>
|
||||
<div className="text-sm text-gray-600">3-Day Avg</div>
|
||||
</div>
|
||||
<div className="rounded-lg border bg-white p-4 text-center">
|
||||
<div className="text-2xl font-bold text-orange-600">
|
||||
{trendsData.latest_data.seven_day_avg?.toFixed(2) || "N/A"}
|
||||
</div>
|
||||
<div className="text-sm text-gray-600">7-Day Avg</div>
|
||||
</div>
|
||||
<div className="rounded-lg border bg-white p-4 text-center">
|
||||
<div className="text-2xl font-bold text-purple-600">
|
||||
{trendsData.latest_data.fourteen_day_avg?.toFixed(2) || "N/A"}
|
||||
</div>
|
||||
<div className="text-sm text-gray-600">14-Day Avg</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Chart Section - only show when toggle is enabled and historical data exists */}
|
||||
{showAccuracyChart && trendsData?.historical_data && (
|
||||
<div className="mt-6">
|
||||
<h4 className="mb-4 text-lg font-semibold">
|
||||
Execution Accuracy Trends Chart
|
||||
</h4>
|
||||
<div className="rounded-lg border bg-white p-6">
|
||||
<AccuracyChart data={trendsData.historical_data} />
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{results && <AnalyticsResultsTable results={results} />}
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -17,12 +17,13 @@ function ExecutionAnalyticsDashboard() {
|
||||
</div>
|
||||
|
||||
<div className="rounded-lg border bg-white p-6 shadow-sm">
|
||||
<h2 className="mb-4 text-xl font-semibold">Analytics Generation</h2>
|
||||
<h2 className="mb-4 text-xl font-semibold">
|
||||
Execution Analytics & Accuracy Monitoring
|
||||
</h2>
|
||||
<p className="mb-6 text-gray-600">
|
||||
This tool will identify completed executions missing activity
|
||||
summaries or success scores and generate them using AI. Only
|
||||
executions that meet the criteria and are missing these fields will
|
||||
be processed.
|
||||
Generate missing activity summaries and success scores for agent
|
||||
executions. After generation, accuracy trends and alerts will
|
||||
automatically be displayed to help monitor agent health over time.
|
||||
</p>
|
||||
|
||||
<Suspense
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { OAuthPopupResultMessage } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInputs";
|
||||
import { OAuthPopupResultMessage } from "@/components/renderers/input-renderer/fields/CredentialField/models/OAuthCredentialModal/useOAuthCredentialModal";
|
||||
import { NextResponse } from "next/server";
|
||||
|
||||
// This route is intended to be used as the callback for integration OAuth flows,
|
||||
|
||||
@@ -9,6 +9,8 @@ import { useGraphStore } from "@/app/(platform)/build/stores/graphStore";
|
||||
import { useShallow } from "zustand/react/shallow";
|
||||
import { useState } from "react";
|
||||
import { useSaveGraph } from "@/app/(platform)/build/hooks/useSaveGraph";
|
||||
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
|
||||
import { ApiError } from "@/lib/autogpt-server-api/helpers"; // Check if this exists
|
||||
|
||||
export const useRunGraph = () => {
|
||||
const { saveGraph, isSaving } = useSaveGraph({
|
||||
@@ -24,6 +26,13 @@ export const useRunGraph = () => {
|
||||
);
|
||||
const [openRunInputDialog, setOpenRunInputDialog] = useState(false);
|
||||
|
||||
const setNodeErrorsForBackendId = useNodeStore(
|
||||
useShallow((state) => state.setNodeErrorsForBackendId),
|
||||
);
|
||||
const clearAllNodeErrors = useNodeStore(
|
||||
useShallow((state) => state.clearAllNodeErrors),
|
||||
);
|
||||
|
||||
const [{ flowID, flowVersion, flowExecutionID }, setQueryStates] =
|
||||
useQueryStates({
|
||||
flowID: parseAsString,
|
||||
@@ -35,19 +44,49 @@ export const useRunGraph = () => {
|
||||
usePostV1ExecuteGraphAgent({
|
||||
mutation: {
|
||||
onSuccess: (response: any) => {
|
||||
clearAllNodeErrors();
|
||||
const { id } = response.data as GraphExecutionMeta;
|
||||
setQueryStates({
|
||||
flowExecutionID: id,
|
||||
});
|
||||
},
|
||||
onError: (error: any) => {
|
||||
// Reset running state on error
|
||||
setIsGraphRunning(false);
|
||||
toast({
|
||||
title: (error.detail as string) ?? "An unexpected error occurred.",
|
||||
description: "An unexpected error occurred.",
|
||||
variant: "destructive",
|
||||
});
|
||||
if (error instanceof ApiError && error.isGraphValidationError?.()) {
|
||||
const errorData = error.response?.detail;
|
||||
|
||||
if (errorData?.node_errors) {
|
||||
Object.entries(errorData.node_errors).forEach(
|
||||
([backendId, nodeErrors]) => {
|
||||
setNodeErrorsForBackendId(
|
||||
backendId,
|
||||
nodeErrors as { [key: string]: string },
|
||||
);
|
||||
},
|
||||
);
|
||||
|
||||
useNodeStore.getState().nodes.forEach((node) => {
|
||||
const backendId = node.data.metadata?.backend_id || node.id;
|
||||
if (!errorData.node_errors[backendId as string]) {
|
||||
useNodeStore.getState().updateNodeErrors(node.id, {});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
toast({
|
||||
title: errorData?.message || "Graph validation failed",
|
||||
description:
|
||||
"Please fix the validation errors on the highlighted nodes and try again.",
|
||||
variant: "destructive",
|
||||
});
|
||||
} else {
|
||||
toast({
|
||||
title:
|
||||
(error.detail as string) ?? "An unexpected error occurred.",
|
||||
description: "An unexpected error occurred.",
|
||||
variant: "destructive",
|
||||
});
|
||||
}
|
||||
},
|
||||
},
|
||||
});
|
||||
@@ -77,7 +116,7 @@ export const useRunGraph = () => {
|
||||
await executeGraph({
|
||||
graphId: flowID ?? "",
|
||||
graphVersion: flowVersion || null,
|
||||
data: { inputs: {}, credentials_inputs: {} },
|
||||
data: { inputs: {}, credentials_inputs: {}, source: "builder" },
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
@@ -79,7 +79,11 @@ export const useRunInputDialog = ({
|
||||
await executeGraph({
|
||||
graphId: flowID ?? "",
|
||||
graphVersion: flowVersion || null,
|
||||
data: { inputs: inputValues, credentials_inputs: credentialValues },
|
||||
data: {
|
||||
inputs: inputValues,
|
||||
credentials_inputs: credentialValues,
|
||||
source: "builder",
|
||||
},
|
||||
});
|
||||
// Optimistically set running state immediately for responsive UI
|
||||
setIsGraphRunning(true);
|
||||
|
||||
@@ -4,7 +4,7 @@ import CustomEdge from "../edges/CustomEdge";
|
||||
import { useFlow } from "./useFlow";
|
||||
import { useShallow } from "zustand/react/shallow";
|
||||
import { useNodeStore } from "../../../stores/nodeStore";
|
||||
import { useMemo, useEffect } from "react";
|
||||
import { useMemo, useEffect, useCallback } from "react";
|
||||
import { CustomNode } from "../nodes/CustomNode/CustomNode";
|
||||
import { useCustomEdge } from "../edges/useCustomEdge";
|
||||
import { useFlowRealtime } from "./useFlowRealtime";
|
||||
@@ -21,6 +21,7 @@ import { useGetV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/grap
|
||||
import { GraphModel } from "@/app/api/__generated__/models/graphModel";
|
||||
import { okData } from "@/app/api/helpers";
|
||||
import { TriggerAgentBanner } from "./components/TriggerAgentBanner";
|
||||
import { resolveCollisions } from "./helpers/resolve-collision";
|
||||
|
||||
export const Flow = () => {
|
||||
const [{ flowID, flowExecutionID }] = useQueryStates({
|
||||
@@ -40,6 +41,7 @@ export const Flow = () => {
|
||||
);
|
||||
|
||||
const nodes = useNodeStore(useShallow((state) => state.nodes));
|
||||
const setNodes = useNodeStore(useShallow((state) => state.setNodes));
|
||||
const onNodesChange = useNodeStore(
|
||||
useShallow((state) => state.onNodesChange),
|
||||
);
|
||||
@@ -48,6 +50,15 @@ export const Flow = () => {
|
||||
);
|
||||
const nodeTypes = useMemo(() => ({ custom: CustomNode }), []);
|
||||
const edgeTypes = useMemo(() => ({ custom: CustomEdge }), []);
|
||||
const onNodeDragStop = useCallback(() => {
|
||||
setNodes(
|
||||
resolveCollisions(nodes, {
|
||||
maxIterations: Infinity,
|
||||
overlapThreshold: 0.5,
|
||||
margin: 15,
|
||||
}),
|
||||
);
|
||||
}, [setNodes, nodes]);
|
||||
const { edges, onConnect, onEdgesChange } = useCustomEdge();
|
||||
|
||||
// We use this hook to load the graph and convert them into custom nodes and edges.
|
||||
@@ -84,6 +95,7 @@ export const Flow = () => {
|
||||
edges={edges}
|
||||
onConnect={onConnect}
|
||||
onEdgesChange={onEdgesChange}
|
||||
onNodeDragStop={onNodeDragStop}
|
||||
maxZoom={2}
|
||||
minZoom={0.1}
|
||||
onDragOver={onDragOver}
|
||||
|
||||
@@ -0,0 +1,160 @@
|
||||
import { CustomNode } from "../../nodes/CustomNode/CustomNode";
|
||||
import Flatbush from "flatbush";
|
||||
|
||||
export type CollisionAlgorithmOptions = {
|
||||
maxIterations: number;
|
||||
overlapThreshold: number;
|
||||
margin: number;
|
||||
};
|
||||
|
||||
export type CollisionAlgorithm = (
|
||||
nodes: CustomNode[],
|
||||
options: CollisionAlgorithmOptions,
|
||||
) => CustomNode[];
|
||||
|
||||
type Box = {
|
||||
minX: number;
|
||||
minY: number;
|
||||
maxX: number;
|
||||
maxY: number;
|
||||
id: string;
|
||||
moved: boolean;
|
||||
x: number;
|
||||
y: number;
|
||||
width: number;
|
||||
height: number;
|
||||
node: CustomNode;
|
||||
};
|
||||
|
||||
function rebuildFlatbush(boxes: Box[]) {
|
||||
const index = new Flatbush(boxes.length);
|
||||
for (const box of boxes) {
|
||||
index.add(box.minX, box.minY, box.maxX, box.maxY);
|
||||
}
|
||||
index.finish();
|
||||
return index;
|
||||
}
|
||||
|
||||
export const resolveCollisions: CollisionAlgorithm = (
|
||||
nodes,
|
||||
{ maxIterations = 50, overlapThreshold = 0.5, margin = 0 },
|
||||
) => {
|
||||
// Create boxes from nodes
|
||||
const boxes: Box[] = new Array(nodes.length);
|
||||
|
||||
for (let i = 0; i < nodes.length; i++) {
|
||||
const node = nodes[i];
|
||||
// Use measured dimensions if available, otherwise use defaults
|
||||
const width = (node.width ?? node.measured?.width ?? 0) + margin * 2;
|
||||
const height = (node.height ?? node.measured?.height ?? 0) + margin * 2;
|
||||
|
||||
console.log("width", width);
|
||||
console.log("height", height);
|
||||
const x = node.position.x - margin;
|
||||
const y = node.position.y - margin;
|
||||
|
||||
const box: Box = {
|
||||
minX: x,
|
||||
minY: y,
|
||||
maxX: x + width,
|
||||
maxY: y + height,
|
||||
id: node.id,
|
||||
moved: false,
|
||||
x,
|
||||
y,
|
||||
width,
|
||||
height,
|
||||
node,
|
||||
};
|
||||
|
||||
boxes[i] = box;
|
||||
}
|
||||
|
||||
let numIterations = 0;
|
||||
let index = rebuildFlatbush(boxes);
|
||||
|
||||
for (let iter = 0; iter <= maxIterations; iter++) {
|
||||
let moved = false;
|
||||
|
||||
// For each box, find potential collisions using spatial search
|
||||
for (let i = 0; i < boxes.length; i++) {
|
||||
const A = boxes[i];
|
||||
// Search for boxes that might overlap with A
|
||||
const candidateIndices = index.search(A.minX, A.minY, A.maxX, A.maxY);
|
||||
|
||||
for (const j of candidateIndices) {
|
||||
const B = boxes[j];
|
||||
// Skip self
|
||||
if (A.id === B.id) continue;
|
||||
|
||||
// Calculate center positions
|
||||
const centerAX = A.x + A.width * 0.5;
|
||||
const centerAY = A.y + A.height * 0.5;
|
||||
const centerBX = B.x + B.width * 0.5;
|
||||
const centerBY = B.y + B.height * 0.5;
|
||||
|
||||
// Calculate distance between centers
|
||||
const dx = centerAX - centerBX;
|
||||
const dy = centerAY - centerBY;
|
||||
|
||||
// Calculate overlap along each axis
|
||||
const px = (A.width + B.width) * 0.5 - Math.abs(dx);
|
||||
const py = (A.height + B.height) * 0.5 - Math.abs(dy);
|
||||
|
||||
// Check if there's significant overlap
|
||||
if (px > overlapThreshold && py > overlapThreshold) {
|
||||
A.moved = B.moved = moved = true;
|
||||
|
||||
// Resolve along the smallest overlap axis
|
||||
if (px < py) {
|
||||
// Move along x-axis
|
||||
const sx = dx > 0 ? 1 : -1;
|
||||
const moveAmount = (px / 2) * sx;
|
||||
|
||||
A.x += moveAmount;
|
||||
A.minX += moveAmount;
|
||||
A.maxX += moveAmount;
|
||||
B.x -= moveAmount;
|
||||
B.minX -= moveAmount;
|
||||
B.maxX -= moveAmount;
|
||||
} else {
|
||||
// Move along y-axis
|
||||
const sy = dy > 0 ? 1 : -1;
|
||||
const moveAmount = (py / 2) * sy;
|
||||
|
||||
A.y += moveAmount;
|
||||
A.minY += moveAmount;
|
||||
A.maxY += moveAmount;
|
||||
B.y -= moveAmount;
|
||||
B.minY -= moveAmount;
|
||||
B.maxY -= moveAmount;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
numIterations = numIterations + 1;
|
||||
|
||||
// Early exit if no overlaps were found
|
||||
if (!moved) {
|
||||
break;
|
||||
}
|
||||
|
||||
index = rebuildFlatbush(boxes);
|
||||
}
|
||||
|
||||
const newNodes = boxes.map((box) => {
|
||||
if (box.moved) {
|
||||
return {
|
||||
...box.node,
|
||||
position: {
|
||||
x: box.x + margin,
|
||||
y: box.y + margin,
|
||||
},
|
||||
};
|
||||
}
|
||||
return box.node;
|
||||
});
|
||||
|
||||
return newNodes;
|
||||
};
|
||||
@@ -1,24 +1,25 @@
|
||||
import { useCallback } from "react";
|
||||
import { useReactFlow } from "@xyflow/react";
|
||||
import { Key, storage } from "@/services/storage/local-storage";
|
||||
import { v4 as uuidv4 } from "uuid";
|
||||
import { useNodeStore } from "../../../stores/nodeStore";
|
||||
import { useEdgeStore } from "../../../stores/edgeStore";
|
||||
import { CustomNode } from "../nodes/CustomNode/CustomNode";
|
||||
import { CustomEdge } from "../edges/CustomEdge";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
|
||||
interface CopyableData {
|
||||
nodes: CustomNode[];
|
||||
edges: CustomEdge[];
|
||||
}
|
||||
|
||||
const CLIPBOARD_PREFIX = "autogpt-flow-data:";
|
||||
|
||||
export function useCopyPaste() {
|
||||
// Only use useReactFlow for viewport (not managed by stores)
|
||||
const { getViewport } = useReactFlow();
|
||||
const { toast } = useToast();
|
||||
|
||||
const handleCopyPaste = useCallback(
|
||||
(event: KeyboardEvent) => {
|
||||
// Prevent copy/paste if any modal is open or if the focus is on an input element
|
||||
const activeElement = document.activeElement;
|
||||
const isInputField =
|
||||
activeElement?.tagName === "INPUT" ||
|
||||
@@ -28,7 +29,6 @@ export function useCopyPaste() {
|
||||
if (isInputField) return;
|
||||
|
||||
if (event.ctrlKey || event.metaKey) {
|
||||
// COPY: Ctrl+C or Cmd+C
|
||||
if (event.key === "c" || event.key === "C") {
|
||||
const { nodes } = useNodeStore.getState();
|
||||
const { edges } = useEdgeStore.getState();
|
||||
@@ -53,81 +53,102 @@ export function useCopyPaste() {
|
||||
edges: selectedEdges,
|
||||
};
|
||||
|
||||
storage.set(Key.COPIED_FLOW_DATA, JSON.stringify(copiedData));
|
||||
const clipboardText = `${CLIPBOARD_PREFIX}${JSON.stringify(copiedData)}`;
|
||||
navigator.clipboard
|
||||
.writeText(clipboardText)
|
||||
.then(() => {
|
||||
toast({
|
||||
title: "Copied successfully",
|
||||
description: `${selectedNodes.length} node(s) copied to clipboard`,
|
||||
});
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error("Failed to copy to clipboard:", error);
|
||||
});
|
||||
}
|
||||
|
||||
// PASTE: Ctrl+V or Cmd+V
|
||||
if (event.key === "v" || event.key === "V") {
|
||||
const copiedDataString = storage.get(Key.COPIED_FLOW_DATA);
|
||||
if (copiedDataString) {
|
||||
const copiedData = JSON.parse(copiedDataString) as CopyableData;
|
||||
const oldToNewIdMap: Record<string, string> = {};
|
||||
navigator.clipboard
|
||||
.readText()
|
||||
.then((clipboardText) => {
|
||||
if (!clipboardText.startsWith(CLIPBOARD_PREFIX)) {
|
||||
return; // Not our data, ignore
|
||||
}
|
||||
|
||||
// Get fresh viewport values at paste time to ensure correct positioning
|
||||
const { x, y, zoom } = getViewport();
|
||||
const viewportCenter = {
|
||||
x: (window.innerWidth / 2 - x) / zoom,
|
||||
y: (window.innerHeight / 2 - y) / zoom,
|
||||
};
|
||||
const jsonString = clipboardText.slice(CLIPBOARD_PREFIX.length);
|
||||
const copiedData = JSON.parse(jsonString) as CopyableData;
|
||||
const oldToNewIdMap: Record<string, string> = {};
|
||||
|
||||
let minX = Infinity,
|
||||
minY = Infinity,
|
||||
maxX = -Infinity,
|
||||
maxY = -Infinity;
|
||||
copiedData.nodes.forEach((node) => {
|
||||
minX = Math.min(minX, node.position.x);
|
||||
minY = Math.min(minY, node.position.y);
|
||||
maxX = Math.max(maxX, node.position.x);
|
||||
maxY = Math.max(maxY, node.position.y);
|
||||
});
|
||||
|
||||
const offsetX = viewportCenter.x - (minX + maxX) / 2;
|
||||
const offsetY = viewportCenter.y - (minY + maxY) / 2;
|
||||
|
||||
// Deselect existing nodes first
|
||||
useNodeStore.setState((state) => ({
|
||||
nodes: state.nodes.map((node) => ({ ...node, selected: false })),
|
||||
}));
|
||||
|
||||
// Create and add new nodes with UNIQUE IDs using UUID
|
||||
copiedData.nodes.forEach((node) => {
|
||||
const newNodeId = uuidv4();
|
||||
oldToNewIdMap[node.id] = newNodeId;
|
||||
|
||||
const newNode: CustomNode = {
|
||||
...node,
|
||||
id: newNodeId,
|
||||
selected: true,
|
||||
position: {
|
||||
x: node.position.x + offsetX,
|
||||
y: node.position.y + offsetY,
|
||||
},
|
||||
const { x, y, zoom } = getViewport();
|
||||
const viewportCenter = {
|
||||
x: (window.innerWidth / 2 - x) / zoom,
|
||||
y: (window.innerHeight / 2 - y) / zoom,
|
||||
};
|
||||
|
||||
useNodeStore.getState().addNode(newNode);
|
||||
});
|
||||
|
||||
// Add edges with updated source/target IDs
|
||||
const { addEdge } = useEdgeStore.getState();
|
||||
copiedData.edges.forEach((edge) => {
|
||||
const newSourceId = oldToNewIdMap[edge.source] ?? edge.source;
|
||||
const newTargetId = oldToNewIdMap[edge.target] ?? edge.target;
|
||||
|
||||
addEdge({
|
||||
source: newSourceId,
|
||||
target: newTargetId,
|
||||
sourceHandle: edge.sourceHandle ?? "",
|
||||
targetHandle: edge.targetHandle ?? "",
|
||||
data: {
|
||||
...edge.data,
|
||||
},
|
||||
let minX = Infinity,
|
||||
minY = Infinity,
|
||||
maxX = -Infinity,
|
||||
maxY = -Infinity;
|
||||
copiedData.nodes.forEach((node) => {
|
||||
minX = Math.min(minX, node.position.x);
|
||||
minY = Math.min(minY, node.position.y);
|
||||
maxX = Math.max(maxX, node.position.x);
|
||||
maxY = Math.max(maxY, node.position.y);
|
||||
});
|
||||
|
||||
const offsetX = viewportCenter.x - (minX + maxX) / 2;
|
||||
const offsetY = viewportCenter.y - (minY + maxY) / 2;
|
||||
|
||||
// Deselect existing nodes first
|
||||
useNodeStore.setState((state) => ({
|
||||
nodes: state.nodes.map((node) => ({
|
||||
...node,
|
||||
selected: false,
|
||||
})),
|
||||
}));
|
||||
|
||||
// Create and add new nodes with UNIQUE IDs using UUID
|
||||
copiedData.nodes.forEach((node) => {
|
||||
const newNodeId = uuidv4();
|
||||
oldToNewIdMap[node.id] = newNodeId;
|
||||
|
||||
const newNode: CustomNode = {
|
||||
...node,
|
||||
id: newNodeId,
|
||||
selected: true,
|
||||
position: {
|
||||
x: node.position.x + offsetX,
|
||||
y: node.position.y + offsetY,
|
||||
},
|
||||
};
|
||||
|
||||
useNodeStore.getState().addNode(newNode);
|
||||
});
|
||||
|
||||
// Add edges with updated source/target IDs
|
||||
const { addEdge } = useEdgeStore.getState();
|
||||
copiedData.edges.forEach((edge) => {
|
||||
const newSourceId = oldToNewIdMap[edge.source] ?? edge.source;
|
||||
const newTargetId = oldToNewIdMap[edge.target] ?? edge.target;
|
||||
|
||||
addEdge({
|
||||
source: newSourceId,
|
||||
target: newTargetId,
|
||||
sourceHandle: edge.sourceHandle ?? "",
|
||||
targetHandle: edge.targetHandle ?? "",
|
||||
data: {
|
||||
...edge.data,
|
||||
},
|
||||
});
|
||||
});
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error("Failed to read from clipboard:", error);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
[getViewport],
|
||||
[getViewport, toast],
|
||||
);
|
||||
|
||||
return handleCopyPaste;
|
||||
|
||||
@@ -42,11 +42,12 @@ export const useFlow = () => {
|
||||
const setBlockMenuOpen = useControlPanelStore(
|
||||
useShallow((state) => state.setBlockMenuOpen),
|
||||
);
|
||||
const [{ flowID, flowVersion, flowExecutionID }] = useQueryStates({
|
||||
flowID: parseAsString,
|
||||
flowVersion: parseAsInteger,
|
||||
flowExecutionID: parseAsString,
|
||||
});
|
||||
const [{ flowID, flowVersion, flowExecutionID }, setQueryStates] =
|
||||
useQueryStates({
|
||||
flowID: parseAsString,
|
||||
flowVersion: parseAsInteger,
|
||||
flowExecutionID: parseAsString,
|
||||
});
|
||||
|
||||
const { data: executionDetails } = useGetV1GetExecutionDetails(
|
||||
flowID || "",
|
||||
@@ -81,7 +82,7 @@ export const useFlow = () => {
|
||||
{
|
||||
query: {
|
||||
select: (res) => res.data as BlockInfo[],
|
||||
enabled: !!flowID && !!blockIds,
|
||||
enabled: !!flowID && !!blockIds && blockIds.length > 0,
|
||||
},
|
||||
},
|
||||
);
|
||||
@@ -102,6 +103,9 @@ export const useFlow = () => {
|
||||
// load graph schemas
|
||||
useEffect(() => {
|
||||
if (graph) {
|
||||
setQueryStates({
|
||||
flowVersion: graph.version ?? 1,
|
||||
});
|
||||
setGraphSchemas(
|
||||
graph.input_schema as Record<string, any> | null,
|
||||
graph.credentials_input_schema as Record<string, any> | null,
|
||||
|
||||
@@ -37,6 +37,7 @@ export type CustomNodeData = {
|
||||
costs: BlockCost[];
|
||||
categories: BlockInfoCategoriesItem[];
|
||||
metadata?: NodeModelMetadata;
|
||||
errors?: { [key: string]: string };
|
||||
};
|
||||
|
||||
export type CustomNode = XYNode<CustomNodeData, "custom">;
|
||||
@@ -71,10 +72,24 @@ export const CustomNode: React.FC<NodeProps<CustomNode>> = React.memo(
|
||||
? (data.hardcodedValues.output_schema ?? {})
|
||||
: data.outputSchema;
|
||||
|
||||
const hasConfigErrors =
|
||||
data.errors &&
|
||||
Object.values(data.errors).some(
|
||||
(value) => value !== null && value !== undefined && value !== "",
|
||||
);
|
||||
|
||||
const outputData = data.nodeExecutionResult?.output_data;
|
||||
const hasOutputError =
|
||||
typeof outputData === "object" &&
|
||||
outputData !== null &&
|
||||
"error" in outputData;
|
||||
|
||||
const hasErrors = hasConfigErrors || hasOutputError;
|
||||
|
||||
// Currently all blockTypes design are similar - that's why i am using the same component for all of them
|
||||
// If in future - if we need some drastic change in some blockTypes design - we can create separate components for them
|
||||
return (
|
||||
<NodeContainer selected={selected} nodeId={nodeId}>
|
||||
<NodeContainer selected={selected} nodeId={nodeId} hasErrors={hasErrors}>
|
||||
<div className="rounded-xlarge bg-white">
|
||||
<NodeHeader data={data} nodeId={nodeId} />
|
||||
{isWebhook && <WebhookDisclaimer nodeId={nodeId} />}
|
||||
@@ -91,7 +106,11 @@ export const CustomNode: React.FC<NodeProps<CustomNode>> = React.memo(
|
||||
/>
|
||||
<NodeAdvancedToggle nodeId={nodeId} />
|
||||
{data.uiType != BlockUIType.OUTPUT && (
|
||||
<OutputHandler outputSchema={outputSchema} nodeId={nodeId} />
|
||||
<OutputHandler
|
||||
uiType={data.uiType}
|
||||
outputSchema={outputSchema}
|
||||
nodeId={nodeId}
|
||||
/>
|
||||
)}
|
||||
<NodeDataRenderer nodeId={nodeId} />
|
||||
</div>
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user