fix(router): change router block content to prompt (#1261)

* fix(router): remove prompt from router content

* fixed router
This commit is contained in:
Waleed
2025-09-05 13:39:04 -07:00
committed by GitHub
parent abb835d22d
commit 3811b509ef
4 changed files with 10 additions and 11 deletions

View File

@@ -117,7 +117,7 @@ Your API key for the selected LLM provider. This is securely stored and used for
After a router makes a decision, you can access its outputs:
- **`<router.content>`**: Summary of the routing decision made
- **`<router.prompt>`**: Summary of the routing prompt used
- **`<router.selected_path>`**: Details of the chosen destination block
- **`<router.tokens>`**: Token usage statistics from the LLM
- **`<router.model>`**: The model used for decision-making
@@ -182,7 +182,7 @@ Confidence Threshold: 0.7 // Minimum confidence for routing
<Tab>
<ul className="list-disc space-y-2 pl-6">
<li>
<strong>router.content</strong>: Summary of routing decision
<strong>router.prompt</strong>: Summary of routing prompt used
</li>
<li>
<strong>router.selected_path</strong>: Details of chosen destination

View File

@@ -18,7 +18,7 @@ const getCurrentOllamaModels = () => {
interface RouterResponse extends ToolResponse {
output: {
content: string
prompt: string
model: string
tokens?: {
prompt?: number
@@ -198,7 +198,6 @@ export const RouterBlock: BlockConfig<RouterResponse> = {
hidden: true,
min: 0,
max: 2,
value: () => '0.1',
},
{
id: 'systemPrompt',
@@ -246,7 +245,7 @@ export const RouterBlock: BlockConfig<RouterResponse> = {
},
},
outputs: {
content: { type: 'string', description: 'Routing response content' },
prompt: { type: 'string', description: 'Routing prompt used' },
model: { type: 'string', description: 'Model used' },
tokens: { type: 'json', description: 'Token usage' },
cost: { type: 'json', description: 'Cost information' },

View File

@@ -119,7 +119,7 @@ describe('RouterBlockHandler', () => {
const inputs = {
prompt: 'Choose the best option.',
model: 'gpt-4o',
temperature: 0.5,
temperature: 0.1,
}
const expectedTargetBlocks = [
@@ -168,11 +168,11 @@ describe('RouterBlockHandler', () => {
model: 'gpt-4o',
systemPrompt: 'Generated System Prompt',
context: JSON.stringify([{ role: 'user', content: 'Choose the best option.' }]),
temperature: 0.5,
temperature: 0.1,
})
expect(result).toEqual({
content: 'Choose the best option.',
prompt: 'Choose the best option.',
model: 'mock-model',
tokens: { prompt: 100, completion: 5, total: 105 },
cost: {
@@ -233,7 +233,7 @@ describe('RouterBlockHandler', () => {
const requestBody = JSON.parse(fetchCallArgs[1].body)
expect(requestBody).toMatchObject({
model: 'gpt-4o',
temperature: 0,
temperature: 0.1,
})
})

View File

@@ -51,7 +51,7 @@ export class RouterBlockHandler implements BlockHandler {
model: routerConfig.model,
systemPrompt: systemPrompt,
context: JSON.stringify(messages),
temperature: routerConfig.temperature,
temperature: 0.1,
apiKey: routerConfig.apiKey,
workflowId: context.workflowId,
}
@@ -102,7 +102,7 @@ export class RouterBlockHandler implements BlockHandler {
)
return {
content: inputs.prompt,
prompt: inputs.prompt,
model: result.model,
tokens: {
prompt: tokens.prompt || 0,