mirror of
https://github.com/simstudioai/sim.git
synced 2026-01-09 15:07:55 -05:00
feat(tools): added arXiv and wikipedia tools/blocks & docs (#814)
* feat(tools): added arxiv tools * feat(tools): added wikipedia tool * updated docs & remove empty interface * remove empty interface * fixed docs generator * fixed wikipedia * removed hasExpandableContent from tool-input for consistency across all tools, irregardless of their parsm * lint --------- Co-authored-by: waleedlatif <waleedlatif@waleedlatifs-MacBook-Pro.local>
This commit is contained in:
138
apps/docs/content/docs/tools/arxiv.mdx
Normal file
138
apps/docs/content/docs/tools/arxiv.mdx
Normal file
@@ -0,0 +1,138 @@
|
||||
---
|
||||
title: ArXiv
|
||||
description: Search and retrieve academic papers from ArXiv
|
||||
---
|
||||
|
||||
import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
|
||||
<BlockInfoCard
|
||||
type="arxiv"
|
||||
color="#E0E0E0"
|
||||
icon={true}
|
||||
iconSvg={`<svg className="block-icon" id='logomark' xmlns='http://www.w3.org/2000/svg' viewBox='0 0 17.732 24.269'>
|
||||
<g id='tiny'>
|
||||
<path
|
||||
d='M573.549,280.916l2.266,2.738,6.674-7.84c.353-.47.52-.717.353-1.117a1.218,1.218,0,0,0-1.061-.748h0a.953.953,0,0,0-.712.262Z'
|
||||
transform='translate(-566.984 -271.548)'
|
||||
fill='#bdb9b4'
|
||||
/>
|
||||
<path
|
||||
d='M579.525,282.225l-10.606-10.174a1.413,1.413,0,0,0-.834-.5,1.09,1.09,0,0,0-1.027.66c-.167.4-.047.681.319,1.206l8.44,10.242h0l-6.282,7.716a1.336,1.336,0,0,0-.323,1.3,1.114,1.114,0,0,0,1.04.69A.992.992,0,0,0,571,293l8.519-7.92A1.924,1.924,0,0,0,579.525,282.225Z'
|
||||
transform='translate(-566.984 -271.548)'
|
||||
fill='#b31b1b'
|
||||
/>
|
||||
<path
|
||||
d='M584.32,293.912l-8.525-10.275,0,0L573.53,280.9l-1.389,1.254a2.063,2.063,0,0,0,0,2.965l10.812,10.419a.925.925,0,0,0,.742.282,1.039,1.039,0,0,0,.953-.667A1.261,1.261,0,0,0,584.32,293.912Z'
|
||||
transform='translate(-566.984 -271.548)'
|
||||
fill='#bdb9b4'
|
||||
/>
|
||||
</g>
|
||||
</svg>`}
|
||||
/>
|
||||
|
||||
{/* MANUAL-CONTENT-START:intro */}
|
||||
[ArXiv](https://arxiv.org/) is a free, open-access repository of scientific research papers in fields such as physics, mathematics, computer science, quantitative biology, quantitative finance, statistics, electrical engineering, systems science, and economics. ArXiv provides a vast collection of preprints and published articles, making it a primary resource for researchers and practitioners worldwide.
|
||||
|
||||
With ArXiv, you can:
|
||||
|
||||
- **Search for academic papers**: Find research by keywords, author names, titles, categories, and more
|
||||
- **Retrieve paper metadata**: Access abstracts, author lists, publication dates, and other bibliographic information
|
||||
- **Download full-text PDFs**: Obtain the complete text of most papers for in-depth study
|
||||
- **Explore author contributions**: View all papers by a specific author
|
||||
- **Stay up-to-date**: Discover the latest submissions and trending topics in your field
|
||||
|
||||
In Sim Studio, the ArXiv integration enables your agents to programmatically search, retrieve, and analyze scientific papers from ArXiv. This allows you to automate literature reviews, build research assistants, or incorporate up-to-date scientific knowledge into your agentic workflows. Use ArXiv as a dynamic data source for research, discovery, and knowledge extraction within your Sim Studio projects.
|
||||
{/* MANUAL-CONTENT-END */}
|
||||
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Search for academic papers, retrieve metadata, download papers, and access the vast collection of scientific research on ArXiv.
|
||||
|
||||
|
||||
|
||||
## Tools
|
||||
|
||||
### `arxiv_search`
|
||||
|
||||
Search for academic papers on ArXiv by keywords, authors, titles, or other fields.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `query` | string | Yes | The search query to execute |
|
||||
| `searchField` | string | No | Field to search in: all, ti \(title\), au \(author\), abs \(abstract\), co \(comment\), jr \(journal\), cat \(category\), rn \(report number\) |
|
||||
| `maxResults` | number | No | Maximum number of results to return \(default: 10, max: 2000\) |
|
||||
| `sortBy` | string | No | Sort by: relevance, lastUpdatedDate, submittedDate \(default: relevance\) |
|
||||
| `sortOrder` | string | No | Sort order: ascending, descending \(default: descending\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `query` | string |
|
||||
| `papers` | string |
|
||||
| `totalResults` | string |
|
||||
|
||||
### `arxiv_get_paper`
|
||||
|
||||
Get detailed information about a specific ArXiv paper by its ID.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `paperId` | string | Yes | ArXiv paper ID \(e.g., |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `paper` | string |
|
||||
|
||||
### `arxiv_get_author_papers`
|
||||
|
||||
Search for papers by a specific author on ArXiv.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `authorName` | string | Yes | Author name to search for |
|
||||
| `maxResults` | number | No | Maximum number of results to return \(default: 10, max: 2000\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `authorPapers` | string |
|
||||
| `authorName` | string |
|
||||
| `totalResults` | string |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `papers` | json | papers output from the block |
|
||||
| `totalResults` | number | totalResults output from the block |
|
||||
| `paper` | json | paper output from the block |
|
||||
| `authorPapers` | json | authorPapers output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
- Type: `arxiv`
|
||||
@@ -26,7 +26,7 @@ import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
/>
|
||||
|
||||
{/* MANUAL-CONTENT-START:intro */}
|
||||
[Exa](https://exa.ai/) is an AI-powered search engine designed specifically for developers and researchers that provides highly relevant and up-to-date information from across the web. It combines advanced semantic search capabilities with AI understanding to deliver more accurate and contextually relevant results than traditional search engines.
|
||||
[Exa](https://exa.ai/) is an AI-powered search engine designed specifically for developers and researchers, providing highly relevant and up-to-date information from across the web. It combines advanced semantic search capabilities with AI understanding to deliver more accurate and contextually relevant results than traditional search engines.
|
||||
|
||||
With Exa, you can:
|
||||
|
||||
@@ -35,14 +35,16 @@ With Exa, you can:
|
||||
- **Access up-to-date information**: Retrieve current information from across the web
|
||||
- **Find similar content**: Discover related resources based on content similarity
|
||||
- **Extract webpage contents**: Retrieve and process the full text of web pages
|
||||
- **Answer questions with citations**: Ask questions and receive direct answers with supporting sources
|
||||
- **Perform research tasks**: Automate multi-step research workflows to gather, synthesize, and summarize information
|
||||
|
||||
In Sim Studio, the Exa integration allows your agents to search the web for information, retrieve content from specific URLs, and find similar resources - all programmatically through API calls. This enables your agents to access real-time information from the internet, enhancing their ability to provide accurate, current, and relevant responses. The integration is particularly valuable for research tasks, information gathering, content discovery, and answering questions that require up-to-date information from across the web.
|
||||
In Sim Studio, the Exa integration allows your agents to search the web for information, retrieve content from specific URLs, find similar resources, answer questions with citations, and conduct research tasks—all programmatically through API calls. This enables your agents to access real-time information from the internet, enhancing their ability to provide accurate, current, and relevant responses. The integration is particularly valuable for research tasks, information gathering, content discovery, and answering questions that require up-to-date information from across the web.
|
||||
{/* MANUAL-CONTENT-END */}
|
||||
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Search the web, retrieve content, find similar links, and answer questions using Exa
|
||||
Search the web, retrieve content, find similar links, and answer questions using Exa's powerful AI search capabilities.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -44,8 +44,16 @@ With Firecrawl in Sim Studio, you can:
|
||||
- **Handle JavaScript-heavy sites**: Process content from modern web applications that rely on JavaScript
|
||||
- **Filter content**: Focus on specific parts of a page using CSS selectors
|
||||
- **Process at scale**: Handle high-volume scraping needs with a reliable API
|
||||
- **Search the web**: Perform intelligent web searches and retrieve structured results
|
||||
- **Crawl entire sites**: Crawl multiple pages from a website and aggregate their content
|
||||
|
||||
The Firecrawl integration allows your agents to access and process web content programmatically without leaving the Sim Studio environment. This enables scenarios like research, content aggregation, data extraction, and information analysis from across the web. Your agents can gather information from websites, extract structured data, and use that information to make decisions or generate insights - all without having to navigate the complexities of raw HTML parsing or browser automation. Simply configure the Firecrawl block with your API key, provide the target URL, and your agents can immediately begin working with web content in a clean, structured format.
|
||||
In Sim Studio, the Firecrawl integration enables your agents to access and process web content programmatically as part of their workflows. Supported operations include:
|
||||
|
||||
- **Scrape**: Extract structured content (Markdown, HTML, metadata) from a single web page.
|
||||
- **Search**: Search the web for information using Firecrawl's intelligent search capabilities.
|
||||
- **Crawl**: Crawl multiple pages from a website, returning structured content and metadata for each page.
|
||||
|
||||
This allows your agents to gather information from websites, extract structured data, and use that information to make decisions or generate insights—all without having to navigate the complexities of raw HTML parsing or browser automation. Simply configure the Firecrawl block with your API key, select the operation (Scrape, Search, or Crawl), and provide the relevant parameters. Your agents can immediately begin working with web content in a clean, structured format.
|
||||
{/* MANUAL-CONTENT-END */}
|
||||
|
||||
|
||||
|
||||
@@ -90,7 +90,7 @@ In Sim Studio, the Google Calendar integration enables your agents to programmat
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Google Calendar functionality to create, read, update, and list calendar events within your workflow. Automate scheduling, check availability, and manage events using OAuth authentication. Email invitations are sent asynchronously and delivery depends on recipients
|
||||
Integrate Google Calendar functionality to create, read, update, and list calendar events within your workflow. Automate scheduling, check availability, and manage events using OAuth authentication. Email invitations are sent asynchronously and delivery depends on recipients' Google Calendar settings.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ In Sim Studio, the DALL-E integration enables your agents to generate images pro
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Create high-quality images using OpenAI
|
||||
Create high-quality images using OpenAI's image generation models. Configure resolution, quality, style, and other parameters to get exactly the image you need.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ This integration is particularly valuable for building agents that need to gathe
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Transform web content into clean, readable text using Jina AI
|
||||
Transform web content into clean, readable text using Jina AI's advanced extraction capabilities. Extract meaningful content from websites while preserving important information and optionally gathering links.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
"items": [
|
||||
"index",
|
||||
"airtable",
|
||||
"arxiv",
|
||||
"browser_use",
|
||||
"clay",
|
||||
"confluence",
|
||||
@@ -53,6 +54,7 @@
|
||||
"wealthbox",
|
||||
"webhook",
|
||||
"whatsapp",
|
||||
"wikipedia",
|
||||
"x",
|
||||
"youtube"
|
||||
]
|
||||
|
||||
@@ -29,7 +29,17 @@ With Notion, you can:
|
||||
- **Connect information**: Link between pages and databases to create a knowledge network
|
||||
- **Access anywhere**: Use Notion across web, desktop, and mobile platforms with automatic syncing
|
||||
|
||||
In Sim Studio, the Notion integration enables your agents to interact directly with your Notion workspace programmatically. This allows for powerful automation scenarios such as knowledge management, content creation, and information retrieval. Your agents can read existing Notion pages to extract information, write to pages to update content, and create new pages from scratch. This integration bridges the gap between your AI workflows and your knowledge base, enabling seamless documentation and information management. By connecting Sim Studio with Notion, you can automate documentation processes, maintain up-to-date information repositories, generate reports, and organize information intelligently - all through your intelligent agents.
|
||||
In Sim Studio, the Notion integration enables your agents to interact directly with your Notion workspace programmatically. This allows for powerful automation scenarios such as knowledge management, content creation, and information retrieval. Your agents can:
|
||||
|
||||
- **Read Notion pages**: Extract content and metadata from any Notion page.
|
||||
- **Read Notion databases**: Retrieve database structure and information.
|
||||
- **Write to pages**: Append new content to existing Notion pages.
|
||||
- **Create new pages**: Generate new Notion pages under a parent page, with custom titles and content.
|
||||
- **Query databases**: Search and filter database entries using advanced filter and sort criteria.
|
||||
- **Search workspace**: Search across your entire Notion workspace for pages or databases matching specific queries.
|
||||
- **Create new databases**: Programmatically create new databases with custom properties and structure.
|
||||
|
||||
This integration bridges the gap between your AI workflows and your knowledge base, enabling seamless documentation and information management. By connecting Sim Studio with Notion, you can automate documentation processes, maintain up-to-date information repositories, generate reports, and organize information intelligently—all through your intelligent agents.
|
||||
{/* MANUAL-CONTENT-END */}
|
||||
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ In Sim Studio, the OpenAI integration enables your agents to leverage these powe
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Convert text into numerical vector representations using OpenAI
|
||||
Convert text into numerical vector representations using OpenAI's embedding models. Transform text data into embeddings for semantic search, clustering, and other vector-based operations.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ In Sim Studio, the Pinecone integration enables your agents to leverage vector s
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Store, search, and retrieve vector embeddings using Pinecone
|
||||
Store, search, and retrieve vector embeddings using Pinecone's specialized vector database. Generate embeddings from text and perform semantic similarity searches with customizable filtering options.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -80,6 +80,27 @@ import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
</svg>`}
|
||||
/>
|
||||
|
||||
{/* MANUAL-CONTENT-START:intro */}
|
||||
[Qdrant](https://qdrant.tech) is an open-source vector database designed for efficient storage, management, and retrieval of high-dimensional vector embeddings. Qdrant enables fast and scalable semantic search, making it ideal for AI applications that require similarity search, recommendation systems, and contextual information retrieval.
|
||||
|
||||
With Qdrant, you can:
|
||||
|
||||
- **Store vector embeddings**: Efficiently manage and persist high-dimensional vectors at scale
|
||||
- **Perform semantic similarity search**: Find the most similar vectors to a query vector in real time
|
||||
- **Filter and organize data**: Use advanced filtering to narrow down search results based on metadata or payload
|
||||
- **Fetch specific points**: Retrieve vectors and their associated payloads by ID
|
||||
- **Scale seamlessly**: Handle large collections and high-throughput workloads
|
||||
|
||||
In Sim Studio, the Qdrant integration enables your agents to interact with Qdrant programmatically as part of their workflows. Supported operations include:
|
||||
|
||||
- **Upsert**: Insert or update points (vectors and payloads) in a Qdrant collection
|
||||
- **Search**: Perform similarity search to find vectors most similar to a given query vector, with optional filtering and result customization
|
||||
- **Fetch**: Retrieve specific points from a collection by their IDs, with options to include payloads and vectors
|
||||
|
||||
This integration allows your agents to leverage powerful vector search and management capabilities, enabling advanced automation scenarios such as semantic search, recommendation, and contextual retrieval. By connecting Sim Studio with Qdrant, you can build agents that understand context, retrieve relevant information from large datasets, and deliver more intelligent and personalized responses—all without managing complex infrastructure.
|
||||
{/* MANUAL-CONTENT-END */}
|
||||
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Store, search, and retrieve vector embeddings using Qdrant. Perform semantic similarity searches and manage your vector collections.
|
||||
|
||||
@@ -26,19 +26,14 @@ import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
/>
|
||||
|
||||
{/* MANUAL-CONTENT-START:intro */}
|
||||
[Reddit](https://www.reddit.com/) is a vast social news aggregation, content rating, and discussion platform where registered users submit content such as text posts, images, and links, which are then voted up or down by other members. Known as "the front page of the internet," Reddit is organized into thousands of communities called subreddits, each focused on a specific topic.
|
||||
[Reddit](https://www.reddit.com/) is a social platform where users share and discuss content in topic-based communities called subreddits.
|
||||
|
||||
With Reddit, you can:
|
||||
In Sim Studio, you can use the Reddit integration to:
|
||||
|
||||
- **Access diverse content**: Browse thousands of specialized communities covering virtually every topic
|
||||
- **Stay informed**: Get real-time updates on trending news, discussions, and viral content
|
||||
- **Engage with communities**: Participate in discussions with like-minded individuals
|
||||
- **Discover trending topics**: See what's popular across different interest groups
|
||||
- **Gather insights**: Collect opinions, feedback, and perspectives from diverse user groups
|
||||
- **Monitor public sentiment**: Track reactions and discussions around specific topics or brands
|
||||
- **Research niche topics**: Access specialized knowledge in dedicated communities
|
||||
- **Get Posts**: Retrieve posts from any subreddit, with options to sort (Hot, New, Top, Rising) and filter Top posts by time (Day, Week, Month, Year, All Time).
|
||||
- **Get Comments**: Fetch comments from a specific post, with options to sort and set the number of comments.
|
||||
|
||||
In Sim Studio, the Reddit integration enables your agents to programmatically access and analyze content from Reddit's vast ecosystem. This allows for powerful automation scenarios such as trend monitoring, content aggregation, and sentiment analysis. Your agents can retrieve popular posts from specific subreddits, extract valuable information, and incorporate these insights into their workflows. This integration bridges the gap between social media monitoring and your AI workflows, enabling more informed decision-making based on public discussions and trending topics. By connecting Sim Studio with Reddit, you can create agents that stay on top of relevant conversations, identify emerging trends, gather diverse perspectives, and deliver timely insights - all without requiring manual browsing of countless Reddit threads.
|
||||
These operations let your agents access and analyze Reddit content as part of your automated workflows.
|
||||
{/* MANUAL-CONTENT-END */}
|
||||
|
||||
|
||||
|
||||
@@ -78,7 +78,7 @@ In Sim Studio, the Serper integration enables your agents to leverage the power
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Access real-time web search results with Serper
|
||||
Access real-time web search results with Serper's Google Search API integration. Retrieve structured search data including web pages, news, images, and places with customizable language and region settings.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -49,14 +49,22 @@ With Slack, you can:
|
||||
- **Automate agent notifications**: Send real-time updates from your Sim Studio agents to any Slack channel
|
||||
- **Create webhook endpoints**: Configure Slack bots as webhooks to trigger Sim Studio workflows from Slack activities
|
||||
- **Enhance agent workflows**: Integrate Slack messaging into your agents to deliver results, alerts, and status updates
|
||||
- **Create and share Slack canvases**: Programmatically generate collaborative documents (canvases) in Slack channels
|
||||
- **Read messages from channels**: Retrieve and process recent messages from any Slack channel for monitoring or workflow triggers
|
||||
|
||||
In Sim Studio, the Slack integration enables your agents to programmatically send messages to any Slack channel or user as part of their workflows. This allows for powerful automation scenarios such as sending notifications, alerts, updates, and reports directly to your team's communication hub. Your agents can deliver timely information, share results from processes they've completed, or alert team members when attention is needed. This integration bridges the gap between your AI workflows and your team's communication, ensuring everyone stays informed without manual intervention. By connecting Sim Studio with Slack, you can create agents that keep your team updated with relevant information at the right time, enhance collaboration by sharing insights automatically, and reduce the need for manual status updates - all while leveraging your existing Slack workspace where your team already communicates.
|
||||
In Sim Studio, the Slack integration enables your agents to programmatically interact with Slack in several ways as part of their workflows:
|
||||
|
||||
- **Send messages**: Agents can send formatted messages to any Slack channel or user, supporting Slack's mrkdwn syntax for rich formatting.
|
||||
- **Create canvases**: Agents can create and share Slack canvases (collaborative documents) directly in channels, enabling richer content sharing and documentation.
|
||||
- **Read messages**: Agents can read recent messages from channels, allowing for monitoring, reporting, or triggering further actions based on channel activity.
|
||||
|
||||
This allows for powerful automation scenarios such as sending notifications, alerts, updates, and reports directly to your team's communication hub, sharing structured documents, or monitoring conversations for workflow triggers. Your agents can deliver timely information, share results from processes they've completed, create collaborative documents, or alert team members when attention is needed. This integration bridges the gap between your AI workflows and your team's communication, ensuring everyone stays informed without manual intervention. By connecting Sim Studio with Slack, you can create agents that keep your team updated with relevant information at the right time, enhance collaboration by sharing insights automatically, and reduce the need for manual status updates—all while leveraging your existing Slack workspace where your team already communicates.
|
||||
{/* MANUAL-CONTENT-END */}
|
||||
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Comprehensive Slack integration with OAuth authentication. Send formatted messages using Slack
|
||||
Comprehensive Slack integration with OAuth authentication. Send formatted messages using Slack's mrkdwn syntax.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -51,19 +51,26 @@ import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
/>
|
||||
|
||||
{/* MANUAL-CONTENT-START:intro */}
|
||||
[Supabase](https://www.supabase.com/) is an open-source Firebase alternative that provides a suite of tools for building modern applications. It offers a PostgreSQL database, authentication, instant APIs, real-time subscriptions, storage, and edge functions, all within a unified platform.
|
||||
[Supabase](https://www.supabase.com/) is a powerful open-source backend-as-a-service platform that provides developers with a suite of tools to build, scale, and manage modern applications. Supabase offers a fully managed [PostgreSQL](https://www.postgresql.org/) database, robust authentication, instant RESTful and GraphQL APIs, real-time subscriptions, file storage, and edge functions—all accessible through a unified and developer-friendly interface. Its open-source nature and compatibility with popular frameworks make it a compelling alternative to Firebase, with the added benefit of SQL flexibility and transparency.
|
||||
|
||||
With Supabase, you can:
|
||||
**Why Supabase?**
|
||||
- **Instant APIs:** Every table and view in your database is instantly available via REST and GraphQL endpoints, making it easy to build data-driven applications without writing custom backend code.
|
||||
- **Real-time Data:** Supabase enables real-time subscriptions, allowing your apps to react instantly to changes in your database.
|
||||
- **Authentication & Authorization:** Built-in user management with support for email, OAuth, SSO, and more, plus row-level security for granular access control.
|
||||
- **Storage:** Securely upload, serve, and manage files with built-in storage that integrates seamlessly with your database.
|
||||
- **Edge Functions:** Deploy serverless functions close to your users for low-latency custom logic.
|
||||
|
||||
- **Manage relational data**: Work with a powerful PostgreSQL database with full SQL capabilities
|
||||
- **Implement authentication**: Add secure user authentication with multiple providers
|
||||
- **Create instant APIs**: Generate RESTful APIs automatically based on your database schema
|
||||
- **Enable real-time updates**: Subscribe to database changes and build reactive applications
|
||||
- **Store files**: Upload, transform, and serve files with storage buckets
|
||||
- **Deploy serverless functions**: Run code in response to database changes or HTTP requests
|
||||
- **Secure your application**: Implement row-level security and manage permissions
|
||||
**Using Supabase in Sim Studio**
|
||||
|
||||
In Sim Studio, the Supabase integration enables your agents to interact with your Supabase projects programmatically. This allows for powerful automation scenarios such as data querying, record creation, user management, and file operations. Your agents can retrieve information from your database, insert new records, update existing data, and leverage Supabase's authentication and storage capabilities as part of their workflows. This integration bridges the gap between your AI workflows and your application's data layer, enabling more sophisticated and data-driven automations. By connecting Sim Studio with Supabase, you can create agents that maintain data consistency across systems, trigger actions based on database changes, perform complex data operations, and build workflows that leverage your application's existing data infrastructure - all without requiring manual intervention or custom code.
|
||||
Sim Studio’s Supabase integration makes it effortless to connect your agentic workflows to your Supabase projects. With just a few configuration fields—your Project ID, Table name, and Service Role Secret—you can securely interact with your database directly from your Sim Studio blocks. The integration abstracts away the complexity of API calls, letting you focus on building logic and automations.
|
||||
|
||||
**Key benefits of using Supabase in Sim Studio:**
|
||||
- **No-code/low-code database operations:** Query, insert, update, and delete rows in your Supabase tables without writing SQL or backend code.
|
||||
- **Flexible querying:** Use [PostgREST filter syntax](https://postgrest.org/en/stable/api.html#operators) to perform advanced queries, including filtering, ordering, and limiting results.
|
||||
- **Seamless integration:** Easily connect Supabase to other tools and services in your workflow, enabling powerful automations such as syncing data, triggering notifications, or enriching records.
|
||||
- **Secure and scalable:** All operations use your Supabase Service Role Secret, ensuring secure access to your data with the scalability of a managed cloud platform.
|
||||
|
||||
Whether you’re building internal tools, automating business processes, or powering production applications, Supabase in Sim Studio provides a fast, reliable, and developer-friendly way to manage your data and backend logic—no infrastructure management required. Simply configure your block, select the operation you need, and let Sim Studio handle the rest.
|
||||
{/* MANUAL-CONTENT-END */}
|
||||
|
||||
|
||||
|
||||
@@ -58,7 +58,7 @@ In Sim Studio, the Tavily integration enables your agents to search the web and
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Access Tavily
|
||||
Access Tavily's AI-powered search engine to find relevant information from across the web. Extract and process content from specific URLs with customizable depth options.
|
||||
|
||||
|
||||
|
||||
|
||||
179
apps/docs/content/docs/tools/wikipedia.mdx
Normal file
179
apps/docs/content/docs/tools/wikipedia.mdx
Normal file
@@ -0,0 +1,179 @@
|
||||
---
|
||||
title: Wikipedia
|
||||
description: Search and retrieve content from Wikipedia
|
||||
---
|
||||
|
||||
import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
|
||||
<BlockInfoCard
|
||||
type="wikipedia"
|
||||
color="#000000"
|
||||
icon={true}
|
||||
iconSvg={`<svg className="block-icon"
|
||||
|
||||
fill='currentColor'
|
||||
version='1.1'
|
||||
id='Capa_1'
|
||||
xmlns='http://www.w3.org/2000/svg'
|
||||
xmlnsXlink='http://www.w3.org/1999/xlink'
|
||||
|
||||
|
||||
viewBox='0 0 98.05 98.05'
|
||||
xmlSpace='preserve'
|
||||
>
|
||||
<g>
|
||||
<path
|
||||
d='M98.023,17.465l-19.584-0.056c-0.004,0.711-0.006,1.563-0.017,2.121c1.664,0.039,5.922,0.822,7.257,4.327L66.92,67.155
|
||||
c-0.919-2.149-9.643-21.528-10.639-24.02l9.072-18.818c1.873-2.863,5.455-4.709,8.918-4.843l-0.01-1.968L55.42,17.489
|
||||
c-0.045,0.499,0.001,1.548-0.068,2.069c5.315,0.144,7.215,1.334,5.941,4.508c-2.102,4.776-6.51,13.824-7.372,15.475
|
||||
c-2.696-5.635-4.41-9.972-7.345-16.064c-1.266-2.823,1.529-3.922,4.485-4.004v-1.981l-21.82-0.067
|
||||
c0.016,0.93-0.021,1.451-0.021,2.131c3.041,0.046,6.988,0.371,8.562,3.019c2.087,4.063,9.044,20.194,11.149,24.514
|
||||
c-2.685,5.153-9.207,17.341-11.544,21.913c-3.348-7.43-15.732-36.689-19.232-44.241c-1.304-3.218,3.732-5.077,6.646-5.213
|
||||
l0.019-2.148L0,17.398c0.005,0.646,0.027,1.71,0.029,2.187c4.025-0.037,9.908,6.573,11.588,10.683
|
||||
c7.244,16.811,14.719,33.524,21.928,50.349c0.002,0.029,2.256,0.059,2.281,0.008c4.717-9.653,10.229-19.797,15.206-29.56
|
||||
L63.588,80.64c0.005,0.004,2.082,0.016,2.093,0.007c7.962-18.196,19.892-46.118,23.794-54.933c1.588-3.767,4.245-6.064,8.543-6.194
|
||||
l0.032-1.956L98.023,17.465z'
|
||||
/>
|
||||
</g>
|
||||
</svg>`}
|
||||
/>
|
||||
|
||||
{/* MANUAL-CONTENT-START:intro */}
|
||||
[Wikipedia](https://www.wikipedia.org/) is the world's largest free online encyclopedia, offering millions of articles on a vast range of topics, collaboratively written and maintained by volunteers.
|
||||
|
||||
With Wikipedia, you can:
|
||||
|
||||
- **Search for articles**: Find relevant Wikipedia pages by searching for keywords or topics
|
||||
- **Get article summaries**: Retrieve concise summaries of Wikipedia pages for quick reference
|
||||
- **Access full content**: Obtain the complete content of Wikipedia articles for in-depth information
|
||||
- **Discover random articles**: Explore new topics by retrieving random Wikipedia pages
|
||||
|
||||
In Sim Studio, the Wikipedia integration enables your agents to programmatically access and interact with Wikipedia content as part of their workflows. Agents can search for articles, fetch summaries, retrieve full page content, and discover random articles, empowering your automations with up-to-date, reliable information from the world's largest encyclopedia. This integration is ideal for scenarios such as research, content enrichment, fact-checking, and knowledge discovery, allowing your agents to seamlessly incorporate Wikipedia data into their decision-making and task execution processes.
|
||||
{/* MANUAL-CONTENT-END */}
|
||||
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Access Wikipedia articles, search for pages, get summaries, retrieve full content, and discover random articles from the world's largest encyclopedia.
|
||||
|
||||
|
||||
|
||||
## Tools
|
||||
|
||||
### `wikipedia_summary`
|
||||
|
||||
Get a summary and metadata for a specific Wikipedia page.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `pageTitle` | string | Yes | Title of the Wikipedia page to get summary for |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `summary` | string |
|
||||
| `title` | string |
|
||||
| `displaytitle` | string |
|
||||
| `description` | string |
|
||||
| `extract` | string |
|
||||
| `extract_html` | string |
|
||||
| `thumbnail` | string |
|
||||
| `originalimage` | string |
|
||||
| `content_urls` | string |
|
||||
| `revisions` | string |
|
||||
| `edit` | string |
|
||||
| `talk` | string |
|
||||
|
||||
### `wikipedia_search`
|
||||
|
||||
Search for Wikipedia pages by title or content.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `query` | string | Yes | Search query to find Wikipedia pages |
|
||||
| `searchLimit` | number | No | Maximum number of results to return \(default: 10, max: 50\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `totalHits` | string |
|
||||
| `query` | string |
|
||||
| `searchResults` | string |
|
||||
|
||||
### `wikipedia_content`
|
||||
|
||||
Get the full HTML content of a Wikipedia page.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `pageTitle` | string | Yes | Title of the Wikipedia page to get content for |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| `pageid` | string |
|
||||
| `html` | string |
|
||||
| `revision` | string |
|
||||
| `tid` | string |
|
||||
| `timestamp` | string |
|
||||
| `content_model` | string |
|
||||
| `content_format` | string |
|
||||
|
||||
### `wikipedia_random`
|
||||
|
||||
Get a random Wikipedia page.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `randomPage` | string |
|
||||
| `title` | string |
|
||||
| `displaytitle` | string |
|
||||
| `description` | string |
|
||||
| `extract` | string |
|
||||
| `thumbnail` | string |
|
||||
| `content_urls` | string |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `summary` | json | summary output from the block |
|
||||
| `searchResults` | json | searchResults output from the block |
|
||||
| `totalHits` | number | totalHits output from the block |
|
||||
| `content` | json | content output from the block |
|
||||
| `randomPage` | json | randomPage output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
- Type: `wikipedia`
|
||||
@@ -1342,8 +1342,7 @@ export function ToolInput({
|
||||
const requiresOAuth = !isCustomTool && toolRequiresOAuth(currentToolId)
|
||||
const oauthConfig = !isCustomTool ? getToolOAuthConfig(currentToolId) : null
|
||||
|
||||
// Check if the tool has any expandable content
|
||||
const hasExpandableContent = isCustomTool || displayParams.length > 0 || requiresOAuth
|
||||
// Tools are always expandable so users can access the interface
|
||||
|
||||
return (
|
||||
<div
|
||||
@@ -1378,12 +1377,12 @@ export function ToolInput({
|
||||
<div
|
||||
className={cn(
|
||||
'flex items-center justify-between bg-accent/50 p-2',
|
||||
hasExpandableContent ? 'cursor-pointer' : 'cursor-default'
|
||||
'cursor-pointer'
|
||||
)}
|
||||
onClick={() => {
|
||||
if (isCustomTool) {
|
||||
handleEditCustomTool(toolIndex)
|
||||
} else if (hasExpandableContent) {
|
||||
} else {
|
||||
toggleToolExpansion(toolIndex)
|
||||
}
|
||||
}}
|
||||
@@ -1492,7 +1491,7 @@ export function ToolInput({
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{!isCustomTool && hasExpandableContent && tool.isExpanded && (
|
||||
{!isCustomTool && tool.isExpanded && (
|
||||
<div className='space-y-3 overflow-visible p-3'>
|
||||
{/* Operation dropdown for tools with multiple operations */}
|
||||
{(() => {
|
||||
|
||||
159
apps/sim/blocks/blocks/arxiv.ts
Normal file
159
apps/sim/blocks/blocks/arxiv.ts
Normal file
@@ -0,0 +1,159 @@
|
||||
import { ArxivIcon } from '@/components/icons'
|
||||
import type { BlockConfig } from '@/blocks/types'
|
||||
import type { ArxivResponse } from '@/tools/arxiv/types'
|
||||
|
||||
export const ArxivBlock: BlockConfig<ArxivResponse> = {
|
||||
type: 'arxiv',
|
||||
name: 'ArXiv',
|
||||
description: 'Search and retrieve academic papers from ArXiv',
|
||||
longDescription:
|
||||
'Search for academic papers, retrieve metadata, download papers, and access the vast collection of scientific research on ArXiv.',
|
||||
docsLink: 'https://docs.simstudio.ai/tools/arxiv',
|
||||
category: 'tools',
|
||||
bgColor: '#E0E0E0',
|
||||
icon: ArxivIcon,
|
||||
subBlocks: [
|
||||
{
|
||||
id: 'operation',
|
||||
title: 'Operation',
|
||||
type: 'dropdown',
|
||||
layout: 'full',
|
||||
options: [
|
||||
{ label: 'Search Papers', id: 'arxiv_search' },
|
||||
{ label: 'Get Paper Details', id: 'arxiv_get_paper' },
|
||||
{ label: 'Get Author Papers', id: 'arxiv_get_author_papers' },
|
||||
],
|
||||
value: () => 'arxiv_search',
|
||||
},
|
||||
// Search operation inputs
|
||||
{
|
||||
id: 'query',
|
||||
title: 'Search Query',
|
||||
type: 'long-input',
|
||||
layout: 'full',
|
||||
placeholder: 'Enter search terms (e.g., "machine learning", "quantum physics")...',
|
||||
condition: { field: 'operation', value: 'arxiv_search' },
|
||||
},
|
||||
{
|
||||
id: 'searchField',
|
||||
title: 'Search Field',
|
||||
type: 'dropdown',
|
||||
layout: 'full',
|
||||
options: [
|
||||
{ label: 'All Fields', id: 'all' },
|
||||
{ label: 'Title', id: 'ti' },
|
||||
{ label: 'Author', id: 'au' },
|
||||
{ label: 'Abstract', id: 'abs' },
|
||||
{ label: 'Comment', id: 'co' },
|
||||
{ label: 'Journal Reference', id: 'jr' },
|
||||
{ label: 'Category', id: 'cat' },
|
||||
{ label: 'Report Number', id: 'rn' },
|
||||
],
|
||||
value: () => 'all',
|
||||
condition: { field: 'operation', value: 'arxiv_search' },
|
||||
},
|
||||
{
|
||||
id: 'maxResults',
|
||||
title: 'Max Results',
|
||||
type: 'short-input',
|
||||
layout: 'full',
|
||||
placeholder: '10',
|
||||
condition: { field: 'operation', value: 'arxiv_search' },
|
||||
},
|
||||
{
|
||||
id: 'sortBy',
|
||||
title: 'Sort By',
|
||||
type: 'dropdown',
|
||||
layout: 'full',
|
||||
options: [
|
||||
{ label: 'Relevance', id: 'relevance' },
|
||||
{ label: 'Last Updated Date', id: 'lastUpdatedDate' },
|
||||
{ label: 'Submitted Date', id: 'submittedDate' },
|
||||
],
|
||||
value: () => 'relevance',
|
||||
condition: { field: 'operation', value: 'arxiv_search' },
|
||||
},
|
||||
{
|
||||
id: 'sortOrder',
|
||||
title: 'Sort Order',
|
||||
type: 'dropdown',
|
||||
layout: 'full',
|
||||
options: [
|
||||
{ label: 'Descending', id: 'descending' },
|
||||
{ label: 'Ascending', id: 'ascending' },
|
||||
],
|
||||
value: () => 'descending',
|
||||
condition: { field: 'operation', value: 'arxiv_search' },
|
||||
},
|
||||
// Get Paper Details operation inputs
|
||||
{
|
||||
id: 'paperId',
|
||||
title: 'Paper ID',
|
||||
type: 'short-input',
|
||||
layout: 'full',
|
||||
placeholder: 'Enter ArXiv paper ID (e.g., 1706.03762, cs.AI/0001001)',
|
||||
condition: { field: 'operation', value: 'arxiv_get_paper' },
|
||||
},
|
||||
// Get Author Papers operation inputs
|
||||
{
|
||||
id: 'authorName',
|
||||
title: 'Author Name',
|
||||
type: 'short-input',
|
||||
layout: 'full',
|
||||
placeholder: 'Enter author name (e.g., "John Smith")...',
|
||||
condition: { field: 'operation', value: 'arxiv_get_author_papers' },
|
||||
},
|
||||
{
|
||||
id: 'maxResults',
|
||||
title: 'Max Results',
|
||||
type: 'short-input',
|
||||
layout: 'full',
|
||||
placeholder: '10',
|
||||
condition: { field: 'operation', value: 'arxiv_get_author_papers' },
|
||||
},
|
||||
],
|
||||
tools: {
|
||||
access: ['arxiv_search', 'arxiv_get_paper', 'arxiv_get_author_papers'],
|
||||
config: {
|
||||
tool: (params) => {
|
||||
// Convert maxResults to a number for operations that use it
|
||||
if (params.maxResults) {
|
||||
params.maxResults = Number(params.maxResults)
|
||||
}
|
||||
|
||||
switch (params.operation) {
|
||||
case 'arxiv_search':
|
||||
return 'arxiv_search'
|
||||
case 'arxiv_get_paper':
|
||||
return 'arxiv_get_paper'
|
||||
case 'arxiv_get_author_papers':
|
||||
return 'arxiv_get_author_papers'
|
||||
default:
|
||||
return 'arxiv_search'
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
inputs: {
|
||||
operation: { type: 'string', required: true },
|
||||
// Search operation
|
||||
query: { type: 'string', required: false },
|
||||
searchField: { type: 'string', required: false },
|
||||
maxResults: { type: 'number', required: false },
|
||||
sortBy: { type: 'string', required: false },
|
||||
sortOrder: { type: 'string', required: false },
|
||||
// Get Paper Details operation
|
||||
paperId: { type: 'string', required: false },
|
||||
// Get Author Papers operation
|
||||
authorName: { type: 'string', required: false },
|
||||
},
|
||||
outputs: {
|
||||
// Search output
|
||||
papers: 'json',
|
||||
totalResults: 'number',
|
||||
// Get Paper Details output
|
||||
paper: 'json',
|
||||
// Get Author Papers output
|
||||
authorPapers: 'json',
|
||||
},
|
||||
}
|
||||
108
apps/sim/blocks/blocks/wikipedia.ts
Normal file
108
apps/sim/blocks/blocks/wikipedia.ts
Normal file
@@ -0,0 +1,108 @@
|
||||
import { WikipediaIcon } from '@/components/icons'
|
||||
import type { BlockConfig } from '@/blocks/types'
|
||||
import type { WikipediaResponse } from '@/tools/wikipedia/types'
|
||||
|
||||
export const WikipediaBlock: BlockConfig<WikipediaResponse> = {
|
||||
type: 'wikipedia',
|
||||
name: 'Wikipedia',
|
||||
description: 'Search and retrieve content from Wikipedia',
|
||||
longDescription:
|
||||
"Access Wikipedia articles, search for pages, get summaries, retrieve full content, and discover random articles from the world's largest encyclopedia.",
|
||||
docsLink: 'https://docs.simstudio.ai/tools/wikipedia',
|
||||
category: 'tools',
|
||||
bgColor: '#000000',
|
||||
icon: WikipediaIcon,
|
||||
subBlocks: [
|
||||
{
|
||||
id: 'operation',
|
||||
title: 'Operation',
|
||||
type: 'dropdown',
|
||||
layout: 'full',
|
||||
options: [
|
||||
{ label: 'Get Page Summary', id: 'wikipedia_summary' },
|
||||
{ label: 'Search Pages', id: 'wikipedia_search' },
|
||||
{ label: 'Get Page Content', id: 'wikipedia_content' },
|
||||
{ label: 'Random Page', id: 'wikipedia_random' },
|
||||
],
|
||||
value: () => 'wikipedia_summary',
|
||||
},
|
||||
// Page Summary operation inputs
|
||||
{
|
||||
id: 'pageTitle',
|
||||
title: 'Page Title',
|
||||
type: 'long-input',
|
||||
layout: 'full',
|
||||
placeholder: 'Enter Wikipedia page title (e.g., "Python programming language")...',
|
||||
condition: { field: 'operation', value: 'wikipedia_summary' },
|
||||
},
|
||||
// Search Pages operation inputs
|
||||
{
|
||||
id: 'query',
|
||||
title: 'Search Query',
|
||||
type: 'long-input',
|
||||
layout: 'full',
|
||||
placeholder: 'Enter search terms...',
|
||||
condition: { field: 'operation', value: 'wikipedia_search' },
|
||||
},
|
||||
{
|
||||
id: 'searchLimit',
|
||||
title: 'Max Results',
|
||||
type: 'short-input',
|
||||
layout: 'full',
|
||||
placeholder: '10',
|
||||
condition: { field: 'operation', value: 'wikipedia_search' },
|
||||
},
|
||||
// Get Page Content operation inputs
|
||||
{
|
||||
id: 'pageTitle',
|
||||
title: 'Page Title',
|
||||
type: 'long-input',
|
||||
layout: 'full',
|
||||
placeholder: 'Enter Wikipedia page title...',
|
||||
condition: { field: 'operation', value: 'wikipedia_content' },
|
||||
},
|
||||
],
|
||||
tools: {
|
||||
access: ['wikipedia_summary', 'wikipedia_search', 'wikipedia_content', 'wikipedia_random'],
|
||||
config: {
|
||||
tool: (params) => {
|
||||
// Convert searchLimit to a number for search operation
|
||||
if (params.searchLimit) {
|
||||
params.searchLimit = Number(params.searchLimit)
|
||||
}
|
||||
|
||||
switch (params.operation) {
|
||||
case 'wikipedia_summary':
|
||||
return 'wikipedia_summary'
|
||||
case 'wikipedia_search':
|
||||
return 'wikipedia_search'
|
||||
case 'wikipedia_content':
|
||||
return 'wikipedia_content'
|
||||
case 'wikipedia_random':
|
||||
return 'wikipedia_random'
|
||||
default:
|
||||
return 'wikipedia_summary'
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
inputs: {
|
||||
operation: { type: 'string', required: true },
|
||||
// Page Summary & Content operations
|
||||
pageTitle: { type: 'string', required: false },
|
||||
// Search operation
|
||||
query: { type: 'string', required: false },
|
||||
searchLimit: { type: 'number', required: false },
|
||||
},
|
||||
outputs: {
|
||||
// Page Summary output
|
||||
summary: 'json',
|
||||
// Search output
|
||||
searchResults: 'json',
|
||||
totalHits: 'number',
|
||||
// Page Content output
|
||||
content: 'json',
|
||||
// Random Page output
|
||||
randomPage: 'json',
|
||||
},
|
||||
}
|
||||
@@ -6,6 +6,7 @@
|
||||
import { AgentBlock } from '@/blocks/blocks/agent'
|
||||
import { AirtableBlock } from '@/blocks/blocks/airtable'
|
||||
import { ApiBlock } from '@/blocks/blocks/api'
|
||||
import { ArxivBlock } from '@/blocks/blocks/arxiv'
|
||||
import { BrowserUseBlock } from '@/blocks/blocks/browser_use'
|
||||
import { ClayBlock } from '@/blocks/blocks/clay'
|
||||
import { ConditionBlock } from '@/blocks/blocks/condition'
|
||||
@@ -63,6 +64,7 @@ import { VisionBlock } from '@/blocks/blocks/vision'
|
||||
import { WealthboxBlock } from '@/blocks/blocks/wealthbox'
|
||||
import { WebhookBlock } from '@/blocks/blocks/webhook'
|
||||
import { WhatsAppBlock } from '@/blocks/blocks/whatsapp'
|
||||
import { WikipediaBlock } from '@/blocks/blocks/wikipedia'
|
||||
import { WorkflowBlock } from '@/blocks/blocks/workflow'
|
||||
import { XBlock } from '@/blocks/blocks/x'
|
||||
import { YouTubeBlock } from '@/blocks/blocks/youtube'
|
||||
@@ -73,6 +75,7 @@ export const registry: Record<string, BlockConfig> = {
|
||||
agent: AgentBlock,
|
||||
airtable: AirtableBlock,
|
||||
api: ApiBlock,
|
||||
arxiv: ArxivBlock,
|
||||
browser_use: BrowserUseBlock,
|
||||
clay: ClayBlock,
|
||||
condition: ConditionBlock,
|
||||
@@ -130,6 +133,7 @@ export const registry: Record<string, BlockConfig> = {
|
||||
wealthbox: WealthboxBlock,
|
||||
webhook: WebhookBlock,
|
||||
whatsapp: WhatsAppBlock,
|
||||
wikipedia: WikipediaBlock,
|
||||
workflow: WorkflowBlock,
|
||||
x: XBlock,
|
||||
youtube: YouTubeBlock,
|
||||
|
||||
@@ -3107,3 +3107,59 @@ export function QdrantIcon(props: SVGProps<SVGSVGElement>) {
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
export function ArxivIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg {...props} id='logomark' xmlns='http://www.w3.org/2000/svg' viewBox='0 0 17.732 24.269'>
|
||||
<g id='tiny'>
|
||||
<path
|
||||
d='M573.549,280.916l2.266,2.738,6.674-7.84c.353-.47.52-.717.353-1.117a1.218,1.218,0,0,0-1.061-.748h0a.953.953,0,0,0-.712.262Z'
|
||||
transform='translate(-566.984 -271.548)'
|
||||
fill='#bdb9b4'
|
||||
/>
|
||||
<path
|
||||
d='M579.525,282.225l-10.606-10.174a1.413,1.413,0,0,0-.834-.5,1.09,1.09,0,0,0-1.027.66c-.167.4-.047.681.319,1.206l8.44,10.242h0l-6.282,7.716a1.336,1.336,0,0,0-.323,1.3,1.114,1.114,0,0,0,1.04.69A.992.992,0,0,0,571,293l8.519-7.92A1.924,1.924,0,0,0,579.525,282.225Z'
|
||||
transform='translate(-566.984 -271.548)'
|
||||
fill='#b31b1b'
|
||||
/>
|
||||
<path
|
||||
d='M584.32,293.912l-8.525-10.275,0,0L573.53,280.9l-1.389,1.254a2.063,2.063,0,0,0,0,2.965l10.812,10.419a.925.925,0,0,0,.742.282,1.039,1.039,0,0,0,.953-.667A1.261,1.261,0,0,0,584.32,293.912Z'
|
||||
transform='translate(-566.984 -271.548)'
|
||||
fill='#bdb9b4'
|
||||
/>
|
||||
</g>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
export function WikipediaIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg
|
||||
{...props}
|
||||
fill='currentColor'
|
||||
version='1.1'
|
||||
id='Capa_1'
|
||||
xmlns='http://www.w3.org/2000/svg'
|
||||
xmlnsXlink='http://www.w3.org/1999/xlink'
|
||||
width='800px'
|
||||
height='800px'
|
||||
viewBox='0 0 98.05 98.05'
|
||||
xmlSpace='preserve'
|
||||
>
|
||||
<g>
|
||||
<path
|
||||
d='M98.023,17.465l-19.584-0.056c-0.004,0.711-0.006,1.563-0.017,2.121c1.664,0.039,5.922,0.822,7.257,4.327L66.92,67.155
|
||||
c-0.919-2.149-9.643-21.528-10.639-24.02l9.072-18.818c1.873-2.863,5.455-4.709,8.918-4.843l-0.01-1.968L55.42,17.489
|
||||
c-0.045,0.499,0.001,1.548-0.068,2.069c5.315,0.144,7.215,1.334,5.941,4.508c-2.102,4.776-6.51,13.824-7.372,15.475
|
||||
c-2.696-5.635-4.41-9.972-7.345-16.064c-1.266-2.823,1.529-3.922,4.485-4.004v-1.981l-21.82-0.067
|
||||
c0.016,0.93-0.021,1.451-0.021,2.131c3.041,0.046,6.988,0.371,8.562,3.019c2.087,4.063,9.044,20.194,11.149,24.514
|
||||
c-2.685,5.153-9.207,17.341-11.544,21.913c-3.348-7.43-15.732-36.689-19.232-44.241c-1.304-3.218,3.732-5.077,6.646-5.213
|
||||
l0.019-2.148L0,17.398c0.005,0.646,0.027,1.71,0.029,2.187c4.025-0.037,9.908,6.573,11.588,10.683
|
||||
c7.244,16.811,14.719,33.524,21.928,50.349c0.002,0.029,2.256,0.059,2.281,0.008c4.717-9.653,10.229-19.797,15.206-29.56
|
||||
L63.588,80.64c0.005,0.004,2.082,0.016,2.093,0.007c7.962-18.196,19.892-46.118,23.794-54.933c1.588-3.767,4.245-6.064,8.543-6.194
|
||||
l0.032-1.956L98.023,17.465z'
|
||||
/>
|
||||
</g>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
76
apps/sim/tools/arxiv/get_author_papers.ts
Normal file
76
apps/sim/tools/arxiv/get_author_papers.ts
Normal file
@@ -0,0 +1,76 @@
|
||||
import type { ArxivGetAuthorPapersParams, ArxivGetAuthorPapersResponse } from '@/tools/arxiv/types'
|
||||
import { extractTotalResults, parseArxivXML } from '@/tools/arxiv/utils'
|
||||
import type { ToolConfig } from '@/tools/types'
|
||||
|
||||
export const getAuthorPapersTool: ToolConfig<
|
||||
ArxivGetAuthorPapersParams,
|
||||
ArxivGetAuthorPapersResponse
|
||||
> = {
|
||||
id: 'arxiv_get_author_papers',
|
||||
name: 'ArXiv Get Author Papers',
|
||||
description: 'Search for papers by a specific author on ArXiv.',
|
||||
version: '1.0.0',
|
||||
|
||||
params: {
|
||||
authorName: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Author name to search for',
|
||||
},
|
||||
maxResults: {
|
||||
type: 'number',
|
||||
required: false,
|
||||
visibility: 'user-only',
|
||||
description: 'Maximum number of results to return (default: 10, max: 2000)',
|
||||
},
|
||||
},
|
||||
|
||||
request: {
|
||||
url: (params: ArxivGetAuthorPapersParams) => {
|
||||
const baseUrl = 'http://export.arxiv.org/api/query'
|
||||
const searchParams = new URLSearchParams()
|
||||
|
||||
searchParams.append('search_query', `au:"${params.authorName}"`)
|
||||
searchParams.append(
|
||||
'max_results',
|
||||
(params.maxResults ? Math.min(params.maxResults, 2000) : 10).toString()
|
||||
)
|
||||
searchParams.append('sortBy', 'submittedDate')
|
||||
searchParams.append('sortOrder', 'descending')
|
||||
|
||||
return `${baseUrl}?${searchParams.toString()}`
|
||||
},
|
||||
method: 'GET',
|
||||
headers: () => ({
|
||||
'Content-Type': 'application/xml',
|
||||
}),
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
if (!response.ok) {
|
||||
throw new Error(`ArXiv API error: ${response.status} ${response.statusText}`)
|
||||
}
|
||||
|
||||
const xmlText = await response.text()
|
||||
|
||||
// Parse XML response
|
||||
const papers = parseArxivXML(xmlText)
|
||||
const totalResults = extractTotalResults(xmlText)
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
authorPapers: papers,
|
||||
totalResults,
|
||||
authorName: '', // Will be filled by the calling code
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
transformError: (error) => {
|
||||
return error instanceof Error
|
||||
? error.message
|
||||
: 'An error occurred while searching for author papers on ArXiv'
|
||||
},
|
||||
}
|
||||
67
apps/sim/tools/arxiv/get_paper.ts
Normal file
67
apps/sim/tools/arxiv/get_paper.ts
Normal file
@@ -0,0 +1,67 @@
|
||||
import type { ArxivGetPaperParams, ArxivGetPaperResponse } from '@/tools/arxiv/types'
|
||||
import { parseArxivXML } from '@/tools/arxiv/utils'
|
||||
import type { ToolConfig } from '@/tools/types'
|
||||
|
||||
export const getPaperTool: ToolConfig<ArxivGetPaperParams, ArxivGetPaperResponse> = {
|
||||
id: 'arxiv_get_paper',
|
||||
name: 'ArXiv Get Paper',
|
||||
description: 'Get detailed information about a specific ArXiv paper by its ID.',
|
||||
version: '1.0.0',
|
||||
|
||||
params: {
|
||||
paperId: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'ArXiv paper ID (e.g., "1706.03762", "cs.AI/0001001")',
|
||||
},
|
||||
},
|
||||
|
||||
request: {
|
||||
url: (params: ArxivGetPaperParams) => {
|
||||
// Clean paper ID - remove arxiv.org URLs if present
|
||||
let paperId = params.paperId
|
||||
if (paperId.includes('arxiv.org/abs/')) {
|
||||
paperId = paperId.split('arxiv.org/abs/')[1]
|
||||
}
|
||||
|
||||
const baseUrl = 'http://export.arxiv.org/api/query'
|
||||
const searchParams = new URLSearchParams()
|
||||
searchParams.append('id_list', paperId)
|
||||
|
||||
return `${baseUrl}?${searchParams.toString()}`
|
||||
},
|
||||
method: 'GET',
|
||||
headers: () => ({
|
||||
'Content-Type': 'application/xml',
|
||||
}),
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
if (!response.ok) {
|
||||
throw new Error(`ArXiv API error: ${response.status} ${response.statusText}`)
|
||||
}
|
||||
|
||||
const xmlText = await response.text()
|
||||
|
||||
// Parse XML response
|
||||
const papers = parseArxivXML(xmlText)
|
||||
|
||||
if (papers.length === 0) {
|
||||
throw new Error('Paper not found')
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
paper: papers[0],
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
transformError: (error) => {
|
||||
return error instanceof Error
|
||||
? error.message
|
||||
: 'An error occurred while retrieving the ArXiv paper'
|
||||
},
|
||||
}
|
||||
7
apps/sim/tools/arxiv/index.ts
Normal file
7
apps/sim/tools/arxiv/index.ts
Normal file
@@ -0,0 +1,7 @@
|
||||
import { getAuthorPapersTool } from '@/tools/arxiv/get_author_papers'
|
||||
import { getPaperTool } from '@/tools/arxiv/get_paper'
|
||||
import { searchTool } from '@/tools/arxiv/search'
|
||||
|
||||
export const arxivSearchTool = searchTool
|
||||
export const arxivGetPaperTool = getPaperTool
|
||||
export const arxivGetAuthorPapersTool = getAuthorPapersTool
|
||||
104
apps/sim/tools/arxiv/search.ts
Normal file
104
apps/sim/tools/arxiv/search.ts
Normal file
@@ -0,0 +1,104 @@
|
||||
import type { ArxivSearchParams, ArxivSearchResponse } from '@/tools/arxiv/types'
|
||||
import { extractTotalResults, parseArxivXML } from '@/tools/arxiv/utils'
|
||||
import type { ToolConfig } from '@/tools/types'
|
||||
|
||||
export const searchTool: ToolConfig<ArxivSearchParams, ArxivSearchResponse> = {
|
||||
id: 'arxiv_search',
|
||||
name: 'ArXiv Search',
|
||||
description: 'Search for academic papers on ArXiv by keywords, authors, titles, or other fields.',
|
||||
version: '1.0.0',
|
||||
|
||||
params: {
|
||||
query: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'The search query to execute',
|
||||
},
|
||||
searchField: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-only',
|
||||
description:
|
||||
'Field to search in: all, ti (title), au (author), abs (abstract), co (comment), jr (journal), cat (category), rn (report number)',
|
||||
},
|
||||
maxResults: {
|
||||
type: 'number',
|
||||
required: false,
|
||||
visibility: 'user-only',
|
||||
description: 'Maximum number of results to return (default: 10, max: 2000)',
|
||||
},
|
||||
sortBy: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-only',
|
||||
description: 'Sort by: relevance, lastUpdatedDate, submittedDate (default: relevance)',
|
||||
},
|
||||
sortOrder: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-only',
|
||||
description: 'Sort order: ascending, descending (default: descending)',
|
||||
},
|
||||
},
|
||||
|
||||
request: {
|
||||
url: (params: ArxivSearchParams) => {
|
||||
const baseUrl = 'http://export.arxiv.org/api/query'
|
||||
const searchParams = new URLSearchParams()
|
||||
|
||||
// Build search query
|
||||
let searchQuery = params.query
|
||||
if (params.searchField && params.searchField !== 'all') {
|
||||
searchQuery = `${params.searchField}:${params.query}`
|
||||
}
|
||||
searchParams.append('search_query', searchQuery)
|
||||
|
||||
// Add optional parameters
|
||||
if (params.maxResults) {
|
||||
searchParams.append('max_results', Math.min(params.maxResults, 2000).toString())
|
||||
} else {
|
||||
searchParams.append('max_results', '10')
|
||||
}
|
||||
|
||||
if (params.sortBy) {
|
||||
searchParams.append('sortBy', params.sortBy)
|
||||
}
|
||||
|
||||
if (params.sortOrder) {
|
||||
searchParams.append('sortOrder', params.sortOrder)
|
||||
}
|
||||
|
||||
return `${baseUrl}?${searchParams.toString()}`
|
||||
},
|
||||
method: 'GET',
|
||||
headers: () => ({
|
||||
'Content-Type': 'application/xml',
|
||||
}),
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
if (!response.ok) {
|
||||
throw new Error(`ArXiv API error: ${response.status} ${response.statusText}`)
|
||||
}
|
||||
|
||||
const xmlText = await response.text()
|
||||
|
||||
// Parse XML response
|
||||
const papers = parseArxivXML(xmlText)
|
||||
const totalResults = extractTotalResults(xmlText)
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
papers,
|
||||
totalResults,
|
||||
query: '', // Will be filled by the calling code
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
transformError: (error) => {
|
||||
return error instanceof Error ? error.message : 'An error occurred while searching ArXiv'
|
||||
},
|
||||
}
|
||||
65
apps/sim/tools/arxiv/types.ts
Normal file
65
apps/sim/tools/arxiv/types.ts
Normal file
@@ -0,0 +1,65 @@
|
||||
// Common types for ArXiv tools
|
||||
import type { ToolResponse } from '@/tools/types'
|
||||
|
||||
// Search tool types
|
||||
export interface ArxivSearchParams {
|
||||
query: string
|
||||
searchField?: 'all' | 'ti' | 'au' | 'abs' | 'co' | 'jr' | 'cat' | 'rn'
|
||||
maxResults?: number
|
||||
sortBy?: 'relevance' | 'lastUpdatedDate' | 'submittedDate'
|
||||
sortOrder?: 'ascending' | 'descending'
|
||||
}
|
||||
|
||||
export interface ArxivPaper {
|
||||
id: string
|
||||
title: string
|
||||
summary: string
|
||||
authors: string[]
|
||||
published: string
|
||||
updated: string
|
||||
link: string
|
||||
pdfLink: string
|
||||
categories: string[]
|
||||
primaryCategory: string
|
||||
comment?: string
|
||||
journalRef?: string
|
||||
doi?: string
|
||||
}
|
||||
|
||||
export interface ArxivSearchResponse extends ToolResponse {
|
||||
output: {
|
||||
papers: ArxivPaper[]
|
||||
totalResults: number
|
||||
query: string
|
||||
}
|
||||
}
|
||||
|
||||
// Get Paper Details tool types
|
||||
export interface ArxivGetPaperParams {
|
||||
paperId: string
|
||||
}
|
||||
|
||||
export interface ArxivGetPaperResponse extends ToolResponse {
|
||||
output: {
|
||||
paper: ArxivPaper
|
||||
}
|
||||
}
|
||||
|
||||
// Get Author Papers tool types
|
||||
export interface ArxivGetAuthorPapersParams {
|
||||
authorName: string
|
||||
maxResults?: number
|
||||
}
|
||||
|
||||
export interface ArxivGetAuthorPapersResponse extends ToolResponse {
|
||||
output: {
|
||||
authorPapers: ArxivPaper[]
|
||||
totalResults: number
|
||||
authorName: string
|
||||
}
|
||||
}
|
||||
|
||||
export type ArxivResponse =
|
||||
| ArxivSearchResponse
|
||||
| ArxivGetPaperResponse
|
||||
| ArxivGetAuthorPapersResponse
|
||||
90
apps/sim/tools/arxiv/utils.ts
Normal file
90
apps/sim/tools/arxiv/utils.ts
Normal file
@@ -0,0 +1,90 @@
|
||||
import type { ArxivPaper } from '@/tools/arxiv/types'
|
||||
|
||||
export function parseArxivXML(xmlText: string): ArxivPaper[] {
|
||||
const papers: ArxivPaper[] = []
|
||||
|
||||
// Extract entries using regex (since we don't have XML parser in this environment)
|
||||
const entryRegex = /<entry>([\s\S]*?)<\/entry>/g
|
||||
let match
|
||||
|
||||
while ((match = entryRegex.exec(xmlText)) !== null) {
|
||||
const entryXml = match[1]
|
||||
|
||||
const paper: ArxivPaper = {
|
||||
id: extractXmlValue(entryXml, 'id')?.replace('http://arxiv.org/abs/', '') || '',
|
||||
title: cleanText(extractXmlValue(entryXml, 'title') || ''),
|
||||
summary: cleanText(extractXmlValue(entryXml, 'summary') || ''),
|
||||
authors: extractAuthors(entryXml),
|
||||
published: extractXmlValue(entryXml, 'published') || '',
|
||||
updated: extractXmlValue(entryXml, 'updated') || '',
|
||||
link: extractXmlValue(entryXml, 'id') || '',
|
||||
pdfLink: extractPdfLink(entryXml),
|
||||
categories: extractCategories(entryXml),
|
||||
primaryCategory: extractXmlAttribute(entryXml, 'arxiv:primary_category', 'term') || '',
|
||||
comment: extractXmlValue(entryXml, 'arxiv:comment'),
|
||||
journalRef: extractXmlValue(entryXml, 'arxiv:journal_ref'),
|
||||
doi: extractXmlValue(entryXml, 'arxiv:doi'),
|
||||
}
|
||||
|
||||
papers.push(paper)
|
||||
}
|
||||
|
||||
return papers
|
||||
}
|
||||
|
||||
export function extractTotalResults(xmlText: string): number {
|
||||
const totalResultsMatch = xmlText.match(
|
||||
/<opensearch:totalResults[^>]*>(\d+)<\/opensearch:totalResults>/
|
||||
)
|
||||
return totalResultsMatch ? Number.parseInt(totalResultsMatch[1], 10) : 0
|
||||
}
|
||||
|
||||
export function extractXmlValue(xml: string, tagName: string): string | undefined {
|
||||
const regex = new RegExp(`<${tagName}[^>]*>([\\s\\S]*?)<\/${tagName}>`)
|
||||
const match = xml.match(regex)
|
||||
return match ? match[1].trim() : undefined
|
||||
}
|
||||
|
||||
export function extractXmlAttribute(
|
||||
xml: string,
|
||||
tagName: string,
|
||||
attrName: string
|
||||
): string | undefined {
|
||||
const regex = new RegExp(`<${tagName}[^>]*${attrName}="([^"]*)"[^>]*>`)
|
||||
const match = xml.match(regex)
|
||||
return match ? match[1] : undefined
|
||||
}
|
||||
|
||||
export function extractAuthors(entryXml: string): string[] {
|
||||
const authors: string[] = []
|
||||
const authorRegex = /<author[^>]*>[\s\S]*?<name>([^<]+)<\/name>[\s\S]*?<\/author>/g
|
||||
let match
|
||||
|
||||
while ((match = authorRegex.exec(entryXml)) !== null) {
|
||||
authors.push(match[1].trim())
|
||||
}
|
||||
|
||||
return authors
|
||||
}
|
||||
|
||||
export function extractPdfLink(entryXml: string): string {
|
||||
const linkRegex = /<link[^>]*href="([^"]*)"[^>]*title="pdf"[^>]*>/
|
||||
const match = entryXml.match(linkRegex)
|
||||
return match ? match[1] : ''
|
||||
}
|
||||
|
||||
export function extractCategories(entryXml: string): string[] {
|
||||
const categories: string[] = []
|
||||
const categoryRegex = /<category[^>]*term="([^"]*)"[^>]*>/g
|
||||
let match
|
||||
|
||||
while ((match = categoryRegex.exec(entryXml)) !== null) {
|
||||
categories.push(match[1])
|
||||
}
|
||||
|
||||
return categories
|
||||
}
|
||||
|
||||
export function cleanText(text: string): string {
|
||||
return text.replace(/\s+/g, ' ').trim()
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import {
|
||||
airtableListRecordsTool,
|
||||
airtableUpdateRecordTool,
|
||||
} from '@/tools/airtable'
|
||||
import { arxivGetAuthorPapersTool, arxivGetPaperTool, arxivSearchTool } from '@/tools/arxiv'
|
||||
import { browserUseRunTaskTool } from '@/tools/browser_use'
|
||||
import { clayPopulateTool } from '@/tools/clay'
|
||||
import { confluenceRetrieveTool, confluenceUpdateTool } from '@/tools/confluence'
|
||||
@@ -125,12 +126,21 @@ import {
|
||||
wealthboxWriteTaskTool,
|
||||
} from '@/tools/wealthbox'
|
||||
import { whatsappSendMessageTool } from '@/tools/whatsapp'
|
||||
import {
|
||||
wikipediaPageContentTool,
|
||||
wikipediaPageSummaryTool,
|
||||
wikipediaRandomPageTool,
|
||||
wikipediaSearchTool,
|
||||
} from '@/tools/wikipedia'
|
||||
import { workflowExecutorTool } from '@/tools/workflow'
|
||||
import { xReadTool, xSearchTool, xUserTool, xWriteTool } from '@/tools/x'
|
||||
import { youtubeSearchTool } from '@/tools/youtube'
|
||||
|
||||
// Registry of all available tools
|
||||
export const tools: Record<string, ToolConfig> = {
|
||||
arxiv_search: arxivSearchTool,
|
||||
arxiv_get_paper: arxivGetPaperTool,
|
||||
arxiv_get_author_papers: arxivGetAuthorPapersTool,
|
||||
browser_use_run_task: browserUseRunTaskTool,
|
||||
openai_embeddings: openAIEmbeddings,
|
||||
http_request: httpRequest,
|
||||
@@ -262,6 +272,10 @@ export const tools: Record<string, ToolConfig> = {
|
||||
wealthbox_write_task: wealthboxWriteTaskTool,
|
||||
wealthbox_read_note: wealthboxReadNoteTool,
|
||||
wealthbox_write_note: wealthboxWriteNoteTool,
|
||||
wikipedia_summary: wikipediaPageSummaryTool,
|
||||
wikipedia_search: wikipediaSearchTool,
|
||||
wikipedia_content: wikipediaPageContentTool,
|
||||
wikipedia_random: wikipediaRandomPageTool,
|
||||
qdrant_fetch_points: qdrantFetchTool,
|
||||
qdrant_search_vector: qdrantSearchTool,
|
||||
qdrant_upsert_points: qdrantUpsertTool,
|
||||
|
||||
73
apps/sim/tools/wikipedia/content.ts
Normal file
73
apps/sim/tools/wikipedia/content.ts
Normal file
@@ -0,0 +1,73 @@
|
||||
import type { ToolConfig } from '@/tools/types'
|
||||
import type {
|
||||
WikipediaPageContentParams,
|
||||
WikipediaPageContentResponse,
|
||||
} from '@/tools/wikipedia/types'
|
||||
|
||||
export const pageContentTool: ToolConfig<WikipediaPageContentParams, WikipediaPageContentResponse> =
|
||||
{
|
||||
id: 'wikipedia_content',
|
||||
name: 'Wikipedia Page Content',
|
||||
description: 'Get the full HTML content of a Wikipedia page.',
|
||||
version: '1.0.0',
|
||||
|
||||
params: {
|
||||
pageTitle: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Title of the Wikipedia page to get content for',
|
||||
},
|
||||
},
|
||||
|
||||
request: {
|
||||
url: (params: WikipediaPageContentParams) => {
|
||||
const encodedTitle = encodeURIComponent(params.pageTitle.replace(/ /g, '_'))
|
||||
return `https://en.wikipedia.org/api/rest_v1/page/html/${encodedTitle}`
|
||||
},
|
||||
method: 'GET',
|
||||
headers: () => ({
|
||||
'User-Agent': 'SimStudio/1.0 (https://simstudio.ai)',
|
||||
Accept:
|
||||
'text/html; charset=utf-8; profile="https://www.mediawiki.org/wiki/Specs/HTML/2.1.0"',
|
||||
}),
|
||||
isInternalRoute: false,
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
if (!response.ok) {
|
||||
if (response.status === 404) {
|
||||
throw new Error('Wikipedia page not found')
|
||||
}
|
||||
throw new Error(`Wikipedia API error: ${response.status} ${response.statusText}`)
|
||||
}
|
||||
|
||||
const html = await response.text()
|
||||
|
||||
// Extract metadata from response headers
|
||||
const revision = response.headers.get('etag')?.match(/^"(\d+)/)?.[1] || '0'
|
||||
const timestamp = response.headers.get('last-modified') || new Date().toISOString()
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
content: {
|
||||
title: '', // Will be filled by the calling code
|
||||
pageid: 0, // Not available from this endpoint
|
||||
html: html,
|
||||
revision: Number.parseInt(revision, 10),
|
||||
tid: response.headers.get('etag') || '',
|
||||
timestamp: timestamp,
|
||||
content_model: 'wikitext',
|
||||
content_format: 'text/html',
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
transformError: (error) => {
|
||||
return error instanceof Error
|
||||
? error.message
|
||||
: 'An error occurred while retrieving the Wikipedia page content'
|
||||
},
|
||||
}
|
||||
9
apps/sim/tools/wikipedia/index.ts
Normal file
9
apps/sim/tools/wikipedia/index.ts
Normal file
@@ -0,0 +1,9 @@
|
||||
import { pageContentTool } from '@/tools/wikipedia/content'
|
||||
import { randomPageTool } from '@/tools/wikipedia/random'
|
||||
import { searchTool } from '@/tools/wikipedia/search'
|
||||
import { pageSummaryTool } from '@/tools/wikipedia/summary'
|
||||
|
||||
export const wikipediaPageSummaryTool = pageSummaryTool
|
||||
export const wikipediaSearchTool = searchTool
|
||||
export const wikipediaPageContentTool = pageContentTool
|
||||
export const wikipediaRandomPageTool = randomPageTool
|
||||
55
apps/sim/tools/wikipedia/random.ts
Normal file
55
apps/sim/tools/wikipedia/random.ts
Normal file
@@ -0,0 +1,55 @@
|
||||
import type { ToolConfig } from '@/tools/types'
|
||||
import type { WikipediaRandomPageResponse } from '@/tools/wikipedia/types'
|
||||
|
||||
export const randomPageTool: ToolConfig<Record<string, never>, WikipediaRandomPageResponse> = {
|
||||
id: 'wikipedia_random',
|
||||
name: 'Wikipedia Random Page',
|
||||
description: 'Get a random Wikipedia page.',
|
||||
version: '1.0.0',
|
||||
|
||||
params: {},
|
||||
|
||||
request: {
|
||||
url: () => {
|
||||
return 'https://en.wikipedia.org/api/rest_v1/page/random/summary'
|
||||
},
|
||||
method: 'GET',
|
||||
headers: () => ({
|
||||
'User-Agent': 'SimStudio/1.0 (https://simstudio.ai)',
|
||||
Accept: 'application/json',
|
||||
}),
|
||||
isInternalRoute: false,
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
if (!response.ok) {
|
||||
throw new Error(`Wikipedia random page API error: ${response.status} ${response.statusText}`)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
randomPage: {
|
||||
type: data.type || '',
|
||||
title: data.title || '',
|
||||
displaytitle: data.displaytitle || data.title || '',
|
||||
description: data.description,
|
||||
extract: data.extract || '',
|
||||
thumbnail: data.thumbnail,
|
||||
content_urls: data.content_urls || { desktop: { page: '' }, mobile: { page: '' } },
|
||||
lang: data.lang || '',
|
||||
timestamp: data.timestamp || '',
|
||||
pageid: data.pageid || 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
transformError: (error) => {
|
||||
return error instanceof Error
|
||||
? error.message
|
||||
: 'An error occurred while retrieving a random Wikipedia page'
|
||||
},
|
||||
}
|
||||
87
apps/sim/tools/wikipedia/search.ts
Normal file
87
apps/sim/tools/wikipedia/search.ts
Normal file
@@ -0,0 +1,87 @@
|
||||
import type { ToolConfig } from '@/tools/types'
|
||||
import type { WikipediaSearchParams, WikipediaSearchResponse } from '@/tools/wikipedia/types'
|
||||
|
||||
export const searchTool: ToolConfig<WikipediaSearchParams, WikipediaSearchResponse> = {
|
||||
id: 'wikipedia_search',
|
||||
name: 'Wikipedia Search',
|
||||
description: 'Search for Wikipedia pages by title or content.',
|
||||
version: '1.0.0',
|
||||
|
||||
params: {
|
||||
query: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Search query to find Wikipedia pages',
|
||||
},
|
||||
searchLimit: {
|
||||
type: 'number',
|
||||
required: false,
|
||||
visibility: 'user-only',
|
||||
description: 'Maximum number of results to return (default: 10, max: 50)',
|
||||
},
|
||||
},
|
||||
|
||||
request: {
|
||||
url: (params: WikipediaSearchParams) => {
|
||||
const baseUrl = 'https://en.wikipedia.org/w/api.php'
|
||||
const searchParams = new URLSearchParams()
|
||||
|
||||
searchParams.append('action', 'opensearch')
|
||||
searchParams.append('search', params.query)
|
||||
searchParams.append('format', 'json')
|
||||
searchParams.append('namespace', '0')
|
||||
if (params.searchLimit) {
|
||||
searchParams.append('limit', Math.min(params.searchLimit, 50).toString())
|
||||
} else {
|
||||
searchParams.append('limit', '10')
|
||||
}
|
||||
|
||||
return `${baseUrl}?${searchParams.toString()}`
|
||||
},
|
||||
method: 'GET',
|
||||
headers: () => ({
|
||||
'User-Agent': 'SimStudio/1.0 (https://simstudio.ai)',
|
||||
Accept: 'application/json',
|
||||
}),
|
||||
isInternalRoute: false,
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
if (!response.ok) {
|
||||
throw new Error(`Wikipedia search API error: ${response.status} ${response.statusText}`)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
|
||||
// OpenSearch API returns [searchTerm, titles[], descriptions[], urls[]]
|
||||
if (!Array.isArray(data) || data.length < 4) {
|
||||
throw new Error('Invalid OpenSearch response format')
|
||||
}
|
||||
|
||||
const [searchTerm, titles, descriptions, urls] = data
|
||||
const searchResults = titles.map((title: string, index: number) => ({
|
||||
id: index,
|
||||
key: title.replace(/ /g, '_'),
|
||||
title: title,
|
||||
excerpt: descriptions[index] || '',
|
||||
matched_title: title,
|
||||
description: descriptions[index] || '',
|
||||
thumbnail: undefined, // OpenSearch doesn't provide thumbnails
|
||||
url: urls[index] || '',
|
||||
}))
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
searchResults,
|
||||
totalHits: titles.length,
|
||||
query: searchTerm,
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
transformError: (error) => {
|
||||
return error instanceof Error ? error.message : 'An error occurred while searching Wikipedia'
|
||||
},
|
||||
}
|
||||
78
apps/sim/tools/wikipedia/summary.ts
Normal file
78
apps/sim/tools/wikipedia/summary.ts
Normal file
@@ -0,0 +1,78 @@
|
||||
import type { ToolConfig } from '@/tools/types'
|
||||
import type {
|
||||
WikipediaPageSummaryParams,
|
||||
WikipediaPageSummaryResponse,
|
||||
} from '@/tools/wikipedia/types'
|
||||
|
||||
export const pageSummaryTool: ToolConfig<WikipediaPageSummaryParams, WikipediaPageSummaryResponse> =
|
||||
{
|
||||
id: 'wikipedia_summary',
|
||||
name: 'Wikipedia Page Summary',
|
||||
description: 'Get a summary and metadata for a specific Wikipedia page.',
|
||||
version: '1.0.0',
|
||||
|
||||
params: {
|
||||
pageTitle: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Title of the Wikipedia page to get summary for',
|
||||
},
|
||||
},
|
||||
|
||||
request: {
|
||||
url: (params: WikipediaPageSummaryParams) => {
|
||||
const encodedTitle = encodeURIComponent(params.pageTitle.replace(/ /g, '_'))
|
||||
return `https://en.wikipedia.org/api/rest_v1/page/summary/${encodedTitle}`
|
||||
},
|
||||
method: 'GET',
|
||||
headers: () => ({
|
||||
'User-Agent': 'SimStudio/1.0 (https://simstudio.ai)',
|
||||
Accept: 'application/json',
|
||||
}),
|
||||
isInternalRoute: false,
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
if (!response.ok) {
|
||||
if (response.status === 404) {
|
||||
throw new Error('Wikipedia page not found')
|
||||
}
|
||||
throw new Error(`Wikipedia API error: ${response.status} ${response.statusText}`)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
summary: {
|
||||
type: data.type || '',
|
||||
title: data.title || '',
|
||||
displaytitle: data.displaytitle || data.title || '',
|
||||
description: data.description,
|
||||
extract: data.extract || '',
|
||||
extract_html: data.extract_html,
|
||||
thumbnail: data.thumbnail,
|
||||
originalimage: data.originalimage,
|
||||
content_urls: data.content_urls || {
|
||||
desktop: { page: '', revisions: '', edit: '', talk: '' },
|
||||
mobile: { page: '', revisions: '', edit: '', talk: '' },
|
||||
},
|
||||
lang: data.lang || '',
|
||||
dir: data.dir || 'ltr',
|
||||
timestamp: data.timestamp || '',
|
||||
pageid: data.pageid || 0,
|
||||
wikibase_item: data.wikibase_item,
|
||||
coordinates: data.coordinates,
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
transformError: (error) => {
|
||||
return error instanceof Error
|
||||
? error.message
|
||||
: 'An error occurred while retrieving the Wikipedia page summary'
|
||||
},
|
||||
}
|
||||
146
apps/sim/tools/wikipedia/types.ts
Normal file
146
apps/sim/tools/wikipedia/types.ts
Normal file
@@ -0,0 +1,146 @@
|
||||
// Common types for Wikipedia tools
|
||||
import type { ToolResponse } from '@/tools/types'
|
||||
|
||||
// Page Summary tool types
|
||||
export interface WikipediaPageSummaryParams {
|
||||
pageTitle: string
|
||||
}
|
||||
|
||||
export interface WikipediaPageSummary {
|
||||
type: string
|
||||
title: string
|
||||
displaytitle: string
|
||||
description?: string
|
||||
extract: string
|
||||
extract_html?: string
|
||||
thumbnail?: {
|
||||
source: string
|
||||
width: number
|
||||
height: number
|
||||
}
|
||||
originalimage?: {
|
||||
source: string
|
||||
width: number
|
||||
height: number
|
||||
}
|
||||
content_urls: {
|
||||
desktop: {
|
||||
page: string
|
||||
revisions: string
|
||||
edit: string
|
||||
talk: string
|
||||
}
|
||||
mobile: {
|
||||
page: string
|
||||
revisions: string
|
||||
edit: string
|
||||
talk: string
|
||||
}
|
||||
}
|
||||
lang: string
|
||||
dir: string
|
||||
timestamp: string
|
||||
pageid: number
|
||||
wikibase_item?: string
|
||||
coordinates?: {
|
||||
lat: number
|
||||
lon: number
|
||||
}
|
||||
}
|
||||
|
||||
export interface WikipediaPageSummaryResponse extends ToolResponse {
|
||||
output: {
|
||||
summary: WikipediaPageSummary
|
||||
}
|
||||
}
|
||||
|
||||
// Search Pages tool types
|
||||
export interface WikipediaSearchParams {
|
||||
query: string
|
||||
searchLimit?: number
|
||||
}
|
||||
|
||||
export interface WikipediaSearchResult {
|
||||
id: number
|
||||
key: string
|
||||
title: string
|
||||
excerpt: string
|
||||
matched_title?: string
|
||||
description?: string
|
||||
thumbnail?: {
|
||||
mimetype: string
|
||||
size?: number
|
||||
width: number
|
||||
height: number
|
||||
duration?: number
|
||||
url: string
|
||||
}
|
||||
url: string
|
||||
}
|
||||
|
||||
export interface WikipediaSearchResponse extends ToolResponse {
|
||||
output: {
|
||||
searchResults: WikipediaSearchResult[]
|
||||
totalHits: number
|
||||
query: string
|
||||
}
|
||||
}
|
||||
|
||||
// Get Page Content tool types
|
||||
export interface WikipediaPageContentParams {
|
||||
pageTitle: string
|
||||
}
|
||||
|
||||
export interface WikipediaPageContent {
|
||||
title: string
|
||||
pageid: number
|
||||
html: string
|
||||
revision: number
|
||||
tid: string
|
||||
timestamp: string
|
||||
content_model: string
|
||||
content_format: string
|
||||
}
|
||||
|
||||
export interface WikipediaPageContentResponse extends ToolResponse {
|
||||
output: {
|
||||
content: WikipediaPageContent
|
||||
}
|
||||
}
|
||||
|
||||
// Random Page tool types
|
||||
export interface WikipediaRandomPage {
|
||||
type: string
|
||||
title: string
|
||||
displaytitle: string
|
||||
description?: string
|
||||
extract: string
|
||||
thumbnail?: {
|
||||
source: string
|
||||
width: number
|
||||
height: number
|
||||
}
|
||||
content_urls: {
|
||||
desktop: {
|
||||
page: string
|
||||
}
|
||||
mobile: {
|
||||
page: string
|
||||
}
|
||||
}
|
||||
lang: string
|
||||
timestamp: string
|
||||
pageid: number
|
||||
}
|
||||
|
||||
export interface WikipediaRandomPageResponse extends ToolResponse {
|
||||
output: {
|
||||
randomPage: WikipediaRandomPage
|
||||
}
|
||||
}
|
||||
|
||||
export type WikipediaResponse =
|
||||
| WikipediaPageSummaryResponse
|
||||
| WikipediaSearchResponse
|
||||
| WikipediaPageContentResponse
|
||||
| WikipediaRandomPageResponse
|
||||
@@ -216,8 +216,13 @@ function findBlockType(content: string, blockName: string): string {
|
||||
|
||||
// Helper to extract a string property from content
|
||||
function extractStringProperty(content: string, propName: string): string | null {
|
||||
const simpleMatch = content.match(new RegExp(`${propName}\\s*:\\s*['"]([^'"]+)['"]`, 'm'))
|
||||
if (simpleMatch) return simpleMatch[1]
|
||||
// Try single quotes first - more permissive approach
|
||||
const singleQuoteMatch = content.match(new RegExp(`${propName}\\s*:\\s*'([^']*)'`, 'm'))
|
||||
if (singleQuoteMatch) return singleQuoteMatch[1]
|
||||
|
||||
// Try double quotes
|
||||
const doubleQuoteMatch = content.match(new RegExp(`${propName}\\s*:\\s*"([^"]*)"`, 'm'))
|
||||
if (doubleQuoteMatch) return doubleQuoteMatch[1]
|
||||
|
||||
// Try to match multi-line string with template literals
|
||||
const templateMatch = content.match(new RegExp(`${propName}\\s*:\\s*\`([^\`]+)\``, 's'))
|
||||
@@ -556,7 +561,10 @@ function extractToolInfo(
|
||||
const requiredMatch = paramBlock.match(/required\s*:\s*(true|false)/)
|
||||
|
||||
// More careful extraction of description with handling for multiline descriptions
|
||||
let descriptionMatch = paramBlock.match(/description\s*:\s*['"]([^'"]+)['"]/)
|
||||
let descriptionMatch = paramBlock.match(/description\s*:\s*'([^']*)'/)
|
||||
if (!descriptionMatch) {
|
||||
descriptionMatch = paramBlock.match(/description\s*:\s*"([^"]*)"/)
|
||||
}
|
||||
if (!descriptionMatch) {
|
||||
// Try for template literals if the description uses backticks
|
||||
descriptionMatch = paramBlock.match(/description\s*:\s*`([^`]+)`/)
|
||||
|
||||
Reference in New Issue
Block a user