mirror of
https://github.com/simstudioai/sim.git
synced 2026-01-09 15:07:55 -05:00
fix(tools): fix perplexity & parallel ai tag dropdown inaccuracies (#2300)
* fix(tools): fix perplexity & parallel ai tag dropdown inaccuracies * fixed stt, tts and added output conditions to conditionally display tag dropdown values based on other subblock values * updated exa to match latest API
This commit is contained in:
@@ -49,11 +49,11 @@ Search the web using Exa AI. Returns relevant search results with titles, URLs,
|
||||
| `type` | string | No | Search type: neural, keyword, auto or fast \(default: auto\) |
|
||||
| `includeDomains` | string | No | Comma-separated list of domains to include in results |
|
||||
| `excludeDomains` | string | No | Comma-separated list of domains to exclude from results |
|
||||
| `category` | string | No | Filter by category: company, research_paper, news_article, pdf, github, tweet, movie, song, personal_site |
|
||||
| `category` | string | No | Filter by category: company, research paper, news, pdf, github, tweet, personal site, linkedin profile, financial report |
|
||||
| `text` | boolean | No | Include full text content in results \(default: false\) |
|
||||
| `highlights` | boolean | No | Include highlighted snippets in results \(default: false\) |
|
||||
| `summary` | boolean | No | Include AI-generated summaries in results \(default: false\) |
|
||||
| `livecrawl` | string | No | Live crawling mode: always, fallback, or never \(default: never\) |
|
||||
| `livecrawl` | string | No | Live crawling mode: never \(default\), fallback, always, or preferred \(always try livecrawl, fall back to cache if fails\) |
|
||||
| `apiKey` | string | Yes | Exa AI API Key |
|
||||
|
||||
#### Output
|
||||
@@ -76,7 +76,7 @@ Retrieve the contents of webpages using Exa AI. Returns the title, text content,
|
||||
| `subpages` | number | No | Number of subpages to crawl from the provided URLs |
|
||||
| `subpageTarget` | string | No | Comma-separated keywords to target specific subpages \(e.g., "docs,tutorial,about"\) |
|
||||
| `highlights` | boolean | No | Include highlighted snippets in results \(default: false\) |
|
||||
| `livecrawl` | string | No | Live crawling mode: always, fallback, or never \(default: never\) |
|
||||
| `livecrawl` | string | No | Live crawling mode: never \(default\), fallback, always, or preferred \(always try livecrawl, fall back to cache if fails\) |
|
||||
| `apiKey` | string | Yes | Exa AI API Key |
|
||||
|
||||
#### Output
|
||||
@@ -99,10 +99,9 @@ Find webpages similar to a given URL using Exa AI. Returns a list of similar lin
|
||||
| `includeDomains` | string | No | Comma-separated list of domains to include in results |
|
||||
| `excludeDomains` | string | No | Comma-separated list of domains to exclude from results |
|
||||
| `excludeSourceDomain` | boolean | No | Exclude the source domain from results \(default: false\) |
|
||||
| `category` | string | No | Filter by category: company, research_paper, news_article, pdf, github, tweet, movie, song, personal_site |
|
||||
| `highlights` | boolean | No | Include highlighted snippets in results \(default: false\) |
|
||||
| `summary` | boolean | No | Include AI-generated summaries in results \(default: false\) |
|
||||
| `livecrawl` | string | No | Live crawling mode: always, fallback, or never \(default: never\) |
|
||||
| `livecrawl` | string | No | Live crawling mode: never \(default\), fallback, always, or preferred \(always try livecrawl, fall back to cache if fails\) |
|
||||
| `apiKey` | string | Yes | Exa AI API Key |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -94,11 +94,11 @@ Conduct comprehensive deep research across the web using Parallel AI. Synthesize
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `status` | string | Task status \(running, completed, failed\) |
|
||||
| `status` | string | Task status \(completed, failed\) |
|
||||
| `run_id` | string | Unique ID for this research task |
|
||||
| `message` | string | Status message \(for running tasks\) |
|
||||
| `message` | string | Status message |
|
||||
| `content` | object | Research results \(structured based on output_schema\) |
|
||||
| `basis` | array | Citations and sources with excerpts and confidence levels |
|
||||
| `basis` | array | Citations and sources with reasoning and confidence levels |
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -54,8 +54,9 @@ Generate completions using Perplexity AI chat models
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `success` | boolean | Operation success status |
|
||||
| `output` | object | Chat completion results |
|
||||
| `content` | string | Generated text content |
|
||||
| `model` | string | Model used for generation |
|
||||
| `usage` | object | Token usage information |
|
||||
|
||||
### `perplexity_search`
|
||||
|
||||
@@ -79,8 +80,7 @@ Get ranked search results from Perplexity
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `success` | boolean | Operation success status |
|
||||
| `output` | object | Search results |
|
||||
| `results` | array | Array of search results |
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -227,16 +227,16 @@ export async function POST(request: NextRequest) {
|
||||
|
||||
logger.info(`[${requestId}] Transcription completed successfully`)
|
||||
|
||||
return NextResponse.json({
|
||||
transcript,
|
||||
segments,
|
||||
language: detectedLanguage,
|
||||
duration,
|
||||
confidence,
|
||||
sentiment: sentimentResults,
|
||||
entities,
|
||||
summary,
|
||||
})
|
||||
const response: Record<string, any> = { transcript }
|
||||
if (segments !== undefined) response.segments = segments
|
||||
if (detectedLanguage !== undefined) response.language = detectedLanguage
|
||||
if (duration !== undefined) response.duration = duration
|
||||
if (confidence !== undefined) response.confidence = confidence
|
||||
if (sentimentResults !== undefined) response.sentiment = sentimentResults
|
||||
if (entities !== undefined) response.entities = entities
|
||||
if (summary !== undefined) response.summary = summary
|
||||
|
||||
return NextResponse.json(response)
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] STT proxy error:`, error)
|
||||
const errorMessage = error instanceof Error ? error.message : 'Unknown error'
|
||||
@@ -277,11 +277,11 @@ async function transcribeWithWhisper(
|
||||
formData.append('temperature', temperature.toString())
|
||||
}
|
||||
|
||||
formData.append('response_format', 'verbose_json')
|
||||
|
||||
if (timestamps === 'word') {
|
||||
formData.append('response_format', 'verbose_json')
|
||||
formData.append('timestamp_granularities', 'word')
|
||||
} else if (timestamps === 'sentence') {
|
||||
formData.append('response_format', 'verbose_json')
|
||||
formData.append('timestamp_granularities', 'segment')
|
||||
}
|
||||
|
||||
@@ -302,17 +302,14 @@ async function transcribeWithWhisper(
|
||||
|
||||
const data = await response.json()
|
||||
|
||||
if (timestamps === 'none') {
|
||||
return {
|
||||
transcript: data.text,
|
||||
language: data.language,
|
||||
}
|
||||
let segments: TranscriptSegment[] | undefined
|
||||
if (timestamps !== 'none') {
|
||||
segments = (data.segments || data.words || []).map((seg: any) => ({
|
||||
text: seg.text,
|
||||
start: seg.start,
|
||||
end: seg.end,
|
||||
}))
|
||||
}
|
||||
const segments: TranscriptSegment[] = (data.segments || data.words || []).map((seg: any) => ({
|
||||
text: seg.text,
|
||||
start: seg.start,
|
||||
end: seg.end,
|
||||
}))
|
||||
|
||||
return {
|
||||
transcript: data.text,
|
||||
|
||||
@@ -110,14 +110,14 @@ export const ParallelBlock: BlockConfig<ToolResponse> = {
|
||||
title: 'Processor',
|
||||
type: 'dropdown',
|
||||
options: [
|
||||
{ label: 'Lite ($5/1K)', id: 'lite' },
|
||||
{ label: 'Base ($10/1K)', id: 'base' },
|
||||
{ label: 'Core ($25/1K)', id: 'core' },
|
||||
{ label: 'Core 2x ($50/1K)', id: 'core2x' },
|
||||
{ label: 'Pro ($100/1K)', id: 'pro' },
|
||||
{ label: 'Ultra ($300/1K)', id: 'ultra' },
|
||||
{ label: 'Ultra 2x ($600/1K)', id: 'ultra2x' },
|
||||
{ label: 'Ultra 4x ($1,200/1K)', id: 'ultra4x' },
|
||||
{ label: 'Lite', id: 'lite' },
|
||||
{ label: 'Base', id: 'base' },
|
||||
{ label: 'Core', id: 'core' },
|
||||
{ label: 'Core 2x', id: 'core2x' },
|
||||
{ label: 'Pro', id: 'pro' },
|
||||
{ label: 'Ultra', id: 'ultra' },
|
||||
{ label: 'Ultra 2x', id: 'ultra2x' },
|
||||
{ label: 'Ultra 4x', id: 'ultra4x' },
|
||||
],
|
||||
value: () => 'base',
|
||||
condition: { field: 'operation', value: ['search', 'deep_research'] },
|
||||
|
||||
@@ -304,15 +304,44 @@ export const SttBlock: BlockConfig<SttBlockResponse> = {
|
||||
|
||||
outputs: {
|
||||
transcript: { type: 'string', description: 'Full transcribed text' },
|
||||
segments: { type: 'array', description: 'Timestamped segments with speaker labels' },
|
||||
segments: {
|
||||
type: 'array',
|
||||
description: 'Timestamped segments with speaker labels',
|
||||
condition: { field: 'timestamps', value: 'none', not: true },
|
||||
},
|
||||
language: { type: 'string', description: 'Detected or specified language' },
|
||||
duration: { type: 'number', description: 'Audio duration in seconds' },
|
||||
confidence: {
|
||||
type: 'number',
|
||||
description: 'Overall confidence score (Deepgram, AssemblyAI only)',
|
||||
description: 'Overall confidence score',
|
||||
condition: { field: 'provider', value: ['deepgram', 'assemblyai', 'gemini'] },
|
||||
},
|
||||
sentiment: {
|
||||
type: 'array',
|
||||
description: 'Sentiment analysis results',
|
||||
condition: {
|
||||
field: 'provider',
|
||||
value: 'assemblyai',
|
||||
and: { field: 'sentiment', value: true },
|
||||
},
|
||||
},
|
||||
entities: {
|
||||
type: 'array',
|
||||
description: 'Detected entities',
|
||||
condition: {
|
||||
field: 'provider',
|
||||
value: 'assemblyai',
|
||||
and: { field: 'entityDetection', value: true },
|
||||
},
|
||||
},
|
||||
summary: {
|
||||
type: 'string',
|
||||
description: 'Auto-generated summary',
|
||||
condition: {
|
||||
field: 'provider',
|
||||
value: 'assemblyai',
|
||||
and: { field: 'summarization', value: true },
|
||||
},
|
||||
},
|
||||
sentiment: { type: 'array', description: 'Sentiment analysis results (AssemblyAI only)' },
|
||||
entities: { type: 'array', description: 'Detected entities (AssemblyAI only)' },
|
||||
summary: { type: 'string', description: 'Auto-generated summary (AssemblyAI only)' },
|
||||
},
|
||||
}
|
||||
|
||||
@@ -579,7 +579,11 @@ export const TtsBlock: BlockConfig<TtsBlockResponse> = {
|
||||
outputs: {
|
||||
audioUrl: { type: 'string', description: 'URL to the generated audio file' },
|
||||
audioFile: { type: 'json', description: 'Generated audio file object (UserFile)' },
|
||||
duration: { type: 'number', description: 'Audio duration in seconds' },
|
||||
duration: {
|
||||
type: 'number',
|
||||
description: 'Audio duration in seconds',
|
||||
condition: { field: 'provider', value: 'deepgram' },
|
||||
},
|
||||
characterCount: { type: 'number', description: 'Number of characters processed' },
|
||||
format: { type: 'string', description: 'Audio format' },
|
||||
provider: { type: 'string', description: 'TTS provider used' },
|
||||
|
||||
@@ -116,9 +116,33 @@ export type BlockOutput =
|
||||
| PrimitiveValueType
|
||||
| { [key: string]: PrimitiveValueType | Record<string, any> }
|
||||
|
||||
/**
|
||||
* Condition for showing an output field.
|
||||
* Uses the same pattern as SubBlockConfig.condition
|
||||
*/
|
||||
export interface OutputCondition {
|
||||
field: string
|
||||
value: string | number | boolean | Array<string | number | boolean>
|
||||
not?: boolean
|
||||
and?: {
|
||||
field: string
|
||||
value: string | number | boolean | Array<string | number | boolean> | undefined
|
||||
not?: boolean
|
||||
}
|
||||
}
|
||||
|
||||
export type OutputFieldDefinition =
|
||||
| PrimitiveValueType
|
||||
| { type: PrimitiveValueType; description?: string }
|
||||
| {
|
||||
type: PrimitiveValueType
|
||||
description?: string
|
||||
/**
|
||||
* Optional condition for when this output should be shown.
|
||||
* If not specified, the output is always shown.
|
||||
* Uses the same condition format as subBlocks.
|
||||
*/
|
||||
condition?: OutputCondition
|
||||
}
|
||||
|
||||
export interface ParamConfig {
|
||||
type: ParamType
|
||||
|
||||
@@ -11,11 +11,80 @@ import {
|
||||
USER_FILE_PROPERTY_TYPES,
|
||||
} from '@/lib/workflows/types'
|
||||
import { getBlock } from '@/blocks'
|
||||
import type { BlockConfig } from '@/blocks/types'
|
||||
import type { BlockConfig, OutputCondition } from '@/blocks/types'
|
||||
import { getTrigger, isTriggerValid } from '@/triggers'
|
||||
|
||||
type OutputDefinition = Record<string, any>
|
||||
|
||||
/**
|
||||
* Evaluates an output condition against subBlock values.
|
||||
* Returns true if the condition is met and the output should be shown.
|
||||
*/
|
||||
function evaluateOutputCondition(
|
||||
condition: OutputCondition,
|
||||
subBlocks: Record<string, any> | undefined
|
||||
): boolean {
|
||||
if (!subBlocks) return false
|
||||
|
||||
const fieldValue = subBlocks[condition.field]?.value
|
||||
|
||||
let matches: boolean
|
||||
if (Array.isArray(condition.value)) {
|
||||
matches = condition.value.includes(fieldValue)
|
||||
} else {
|
||||
matches = fieldValue === condition.value
|
||||
}
|
||||
|
||||
if (condition.not) {
|
||||
matches = !matches
|
||||
}
|
||||
|
||||
if (condition.and) {
|
||||
const andFieldValue = subBlocks[condition.and.field]?.value
|
||||
let andMatches: boolean
|
||||
|
||||
if (Array.isArray(condition.and.value)) {
|
||||
andMatches = condition.and.value.includes(andFieldValue)
|
||||
} else {
|
||||
andMatches = andFieldValue === condition.and.value
|
||||
}
|
||||
|
||||
if (condition.and.not) {
|
||||
andMatches = !andMatches
|
||||
}
|
||||
|
||||
matches = matches && andMatches
|
||||
}
|
||||
|
||||
return matches
|
||||
}
|
||||
|
||||
/**
|
||||
* Filters outputs based on their conditions.
|
||||
* Returns a new OutputDefinition with only the outputs whose conditions are met.
|
||||
*/
|
||||
function filterOutputsByCondition(
|
||||
outputs: OutputDefinition,
|
||||
subBlocks: Record<string, any> | undefined
|
||||
): OutputDefinition {
|
||||
const filtered: OutputDefinition = {}
|
||||
|
||||
for (const [key, value] of Object.entries(outputs)) {
|
||||
if (!value || typeof value !== 'object' || !('condition' in value)) {
|
||||
filtered[key] = value
|
||||
continue
|
||||
}
|
||||
|
||||
const condition = value.condition as OutputCondition | undefined
|
||||
if (!condition || evaluateOutputCondition(condition, subBlocks)) {
|
||||
const { condition: _, ...rest } = value
|
||||
filtered[key] = rest
|
||||
}
|
||||
}
|
||||
|
||||
return filtered
|
||||
}
|
||||
|
||||
const CHAT_OUTPUTS: OutputDefinition = {
|
||||
input: { type: 'string', description: 'User message' },
|
||||
conversationId: { type: 'string', description: 'Conversation ID' },
|
||||
@@ -184,7 +253,8 @@ export function getBlockOutputs(
|
||||
}
|
||||
|
||||
const baseOutputs = { ...(blockConfig.outputs || {}) }
|
||||
return applyInputFormatToOutputs(blockType, blockConfig, subBlocks, baseOutputs)
|
||||
const filteredOutputs = filterOutputsByCondition(baseOutputs, subBlocks)
|
||||
return applyInputFormatToOutputs(blockType, blockConfig, subBlocks, filteredOutputs)
|
||||
}
|
||||
|
||||
function shouldFilterReservedField(
|
||||
|
||||
@@ -48,13 +48,6 @@ export const findSimilarLinksTool: ToolConfig<
|
||||
visibility: 'user-only',
|
||||
description: 'Exclude the source domain from results (default: false)',
|
||||
},
|
||||
category: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-only',
|
||||
description:
|
||||
'Filter by category: company, research_paper, news_article, pdf, github, tweet, movie, song, personal_site',
|
||||
},
|
||||
highlights: {
|
||||
type: 'boolean',
|
||||
required: false,
|
||||
@@ -71,7 +64,8 @@ export const findSimilarLinksTool: ToolConfig<
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-only',
|
||||
description: 'Live crawling mode: always, fallback, or never (default: never)',
|
||||
description:
|
||||
'Live crawling mode: never (default), fallback, always, or preferred (always try livecrawl, fall back to cache if fails)',
|
||||
},
|
||||
apiKey: {
|
||||
type: 'string',
|
||||
@@ -113,9 +107,6 @@ export const findSimilarLinksTool: ToolConfig<
|
||||
body.excludeSourceDomain = params.excludeSourceDomain
|
||||
}
|
||||
|
||||
// Category filtering
|
||||
if (params.category) body.category = params.category
|
||||
|
||||
// Content options - build contents object
|
||||
const contents: Record<string, any> = {}
|
||||
if (params.text !== undefined) contents.text = params.text
|
||||
|
||||
@@ -51,7 +51,8 @@ export const getContentsTool: ToolConfig<ExaGetContentsParams, ExaGetContentsRes
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-only',
|
||||
description: 'Live crawling mode: always, fallback, or never (default: never)',
|
||||
description:
|
||||
'Live crawling mode: never (default), fallback, always, or preferred (always try livecrawl, fall back to cache if fails)',
|
||||
},
|
||||
apiKey: {
|
||||
type: 'string',
|
||||
|
||||
@@ -50,7 +50,7 @@ export const searchTool: ToolConfig<ExaSearchParams, ExaSearchResponse> = {
|
||||
required: false,
|
||||
visibility: 'user-only',
|
||||
description:
|
||||
'Filter by category: company, research_paper, news_article, pdf, github, tweet, movie, song, personal_site',
|
||||
'Filter by category: company, research paper, news, pdf, github, tweet, personal site, linkedin profile, financial report',
|
||||
},
|
||||
text: {
|
||||
type: 'boolean',
|
||||
@@ -74,7 +74,8 @@ export const searchTool: ToolConfig<ExaSearchParams, ExaSearchResponse> = {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-only',
|
||||
description: 'Live crawling mode: always, fallback, or never (default: never)',
|
||||
description:
|
||||
'Live crawling mode: never (default), fallback, always, or preferred (always try livecrawl, fall back to cache if fails)',
|
||||
},
|
||||
apiKey: {
|
||||
type: 'string',
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import type { ParallelDeepResearchParams } from '@/tools/parallel/types'
|
||||
import type { ToolConfig, ToolResponse } from '@/tools/types'
|
||||
|
||||
const logger = createLogger('ParallelDeepResearchTool')
|
||||
|
||||
export const deepResearchTool: ToolConfig<ParallelDeepResearchParams, ToolResponse> = {
|
||||
id: 'parallel_deep_research',
|
||||
name: 'Parallel AI Deep Research',
|
||||
@@ -90,34 +93,83 @@ export const deepResearchTool: ToolConfig<ParallelDeepResearchParams, ToolRespon
|
||||
transformResponse: async (response: Response) => {
|
||||
const data = await response.json()
|
||||
|
||||
if (data.status === 'running') {
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
status: 'running',
|
||||
run_id: data.run_id,
|
||||
message:
|
||||
'Deep research task is running. This can take up to 15 minutes. Use the run_id to check status.',
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
status: data.status,
|
||||
run_id: data.run_id,
|
||||
content: data.content || {},
|
||||
basis: data.basis || [],
|
||||
metadata: data.metadata || {},
|
||||
status: data.status,
|
||||
message: `Research task ${data.status}, waiting for completion...`,
|
||||
content: {},
|
||||
basis: [],
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
postProcess: async (result, params) => {
|
||||
if (!result.success) {
|
||||
return result
|
||||
}
|
||||
|
||||
const runId = result.output.run_id
|
||||
if (!runId) {
|
||||
return {
|
||||
...result,
|
||||
success: false,
|
||||
error: 'No run_id returned from task creation',
|
||||
}
|
||||
}
|
||||
|
||||
logger.info(`Parallel AI deep research task ${runId} created, fetching results...`)
|
||||
|
||||
try {
|
||||
const resultResponse = await fetch(`https://api.parallel.ai/v1/tasks/runs/${runId}/result`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'x-api-key': params.apiKey,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
})
|
||||
|
||||
if (!resultResponse.ok) {
|
||||
const errorText = await resultResponse.text()
|
||||
throw new Error(`Failed to get task result: ${resultResponse.status} - ${errorText}`)
|
||||
}
|
||||
|
||||
const taskResult = await resultResponse.json()
|
||||
logger.info(`Parallel AI deep research task ${runId} completed`)
|
||||
|
||||
const output = taskResult.output || {}
|
||||
const run = taskResult.run || {}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
status: run.status || 'completed',
|
||||
run_id: runId,
|
||||
message: 'Research completed successfully',
|
||||
content: output.content || {},
|
||||
basis: output.basis || [],
|
||||
},
|
||||
}
|
||||
} catch (error: unknown) {
|
||||
const errorMessage = error instanceof Error ? error.message : 'Unknown error'
|
||||
logger.error('Error fetching research task result:', {
|
||||
message: errorMessage,
|
||||
runId,
|
||||
})
|
||||
|
||||
return {
|
||||
...result,
|
||||
success: false,
|
||||
error: `Error fetching research task result: ${errorMessage}`,
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
outputs: {
|
||||
status: {
|
||||
type: 'string',
|
||||
description: 'Task status (running, completed, failed)',
|
||||
description: 'Task status (completed, failed)',
|
||||
},
|
||||
run_id: {
|
||||
type: 'string',
|
||||
@@ -125,7 +177,7 @@ export const deepResearchTool: ToolConfig<ParallelDeepResearchParams, ToolRespon
|
||||
},
|
||||
message: {
|
||||
type: 'string',
|
||||
description: 'Status message (for running tasks)',
|
||||
description: 'Status message',
|
||||
},
|
||||
content: {
|
||||
type: 'object',
|
||||
@@ -133,20 +185,27 @@ export const deepResearchTool: ToolConfig<ParallelDeepResearchParams, ToolRespon
|
||||
},
|
||||
basis: {
|
||||
type: 'array',
|
||||
description: 'Citations and sources with excerpts and confidence levels',
|
||||
description: 'Citations and sources with reasoning and confidence levels',
|
||||
items: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
url: { type: 'string', description: 'Source URL' },
|
||||
title: { type: 'string', description: 'Source title' },
|
||||
excerpt: { type: 'string', description: 'Relevant excerpt' },
|
||||
confidence: { type: 'number', description: 'Confidence level' },
|
||||
field: { type: 'string', description: 'Output field name' },
|
||||
reasoning: { type: 'string', description: 'Explanation for the result' },
|
||||
citations: {
|
||||
type: 'array',
|
||||
description: 'Array of sources',
|
||||
items: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
url: { type: 'string', description: 'Source URL' },
|
||||
title: { type: 'string', description: 'Source title' },
|
||||
excerpts: { type: 'array', description: 'Relevant excerpts from the source' },
|
||||
},
|
||||
},
|
||||
},
|
||||
confidence: { type: 'string', description: 'Confidence level indicator' },
|
||||
},
|
||||
},
|
||||
},
|
||||
metadata: {
|
||||
type: 'object',
|
||||
description: 'Additional task metadata',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -105,25 +105,18 @@ export const chatTool: ToolConfig<PerplexityChatParams, PerplexityChatResponse>
|
||||
},
|
||||
|
||||
outputs: {
|
||||
success: { type: 'boolean', description: 'Operation success status' },
|
||||
output: {
|
||||
content: { type: 'string', description: 'Generated text content' },
|
||||
model: { type: 'string', description: 'Model used for generation' },
|
||||
usage: {
|
||||
type: 'object',
|
||||
description: 'Chat completion results',
|
||||
description: 'Token usage information',
|
||||
properties: {
|
||||
content: { type: 'string', description: 'Generated text content' },
|
||||
model: { type: 'string', description: 'Model used for generation' },
|
||||
usage: {
|
||||
type: 'object',
|
||||
description: 'Token usage information',
|
||||
properties: {
|
||||
prompt_tokens: { type: 'number', description: 'Number of tokens in the prompt' },
|
||||
completion_tokens: {
|
||||
type: 'number',
|
||||
description: 'Number of tokens in the completion',
|
||||
},
|
||||
total_tokens: { type: 'number', description: 'Total number of tokens used' },
|
||||
},
|
||||
prompt_tokens: { type: 'number', description: 'Number of tokens in the prompt' },
|
||||
completion_tokens: {
|
||||
type: 'number',
|
||||
description: 'Number of tokens in the completion',
|
||||
},
|
||||
total_tokens: { type: 'number', description: 'Total number of tokens used' },
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -126,29 +126,22 @@ export const searchTool: ToolConfig<PerplexitySearchParams, PerplexitySearchResp
|
||||
},
|
||||
|
||||
outputs: {
|
||||
success: { type: 'boolean', description: 'Operation success status' },
|
||||
output: {
|
||||
type: 'object',
|
||||
description: 'Search results',
|
||||
properties: {
|
||||
results: {
|
||||
type: 'array',
|
||||
description: 'Array of search results',
|
||||
items: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
title: { type: 'string', description: 'Title of the search result' },
|
||||
url: { type: 'string', description: 'URL of the search result' },
|
||||
snippet: { type: 'string', description: 'Brief excerpt or summary of the content' },
|
||||
date: {
|
||||
type: 'string',
|
||||
description: "Date the page was crawled and added to Perplexity's index",
|
||||
},
|
||||
last_updated: {
|
||||
type: 'string',
|
||||
description: "Date the page was last updated in Perplexity's index",
|
||||
},
|
||||
},
|
||||
results: {
|
||||
type: 'array',
|
||||
description: 'Array of search results',
|
||||
items: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
title: { type: 'string', description: 'Title of the search result' },
|
||||
url: { type: 'string', description: 'URL of the search result' },
|
||||
snippet: { type: 'string', description: 'Brief excerpt or summary of the content' },
|
||||
date: {
|
||||
type: 'string',
|
||||
description: "Date the page was crawled and added to Perplexity's index",
|
||||
},
|
||||
last_updated: {
|
||||
type: 'string',
|
||||
description: "Date the page was last updated in Perplexity's index",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user