fix(backend): Fix conversation history propagation on SmartDecisionBlock & AIBlock

This commit is contained in:
Zamil Majdy
2025-03-02 05:49:40 +07:00
parent 83d879ea65
commit a024b9a398
3 changed files with 9 additions and 3 deletions

View File

@@ -651,7 +651,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
logger.debug(f"Calling LLM with input data: {input_data}")
prompt = [p.model_dump() for p in input_data.conversation_history]
prompt = [json.to_dict(p) for p in input_data.conversation_history]
def trim_prompt(s: str) -> str:
lines = s.strip().split("\n")

View File

@@ -1,4 +1,3 @@
import json
import logging
import re
from typing import TYPE_CHECKING, Any
@@ -16,6 +15,7 @@ from backend.data.block import (
get_block,
)
from backend.data.model import SchemaField
from backend.util import json
if TYPE_CHECKING:
from backend.data.graph import Link, Node
@@ -287,7 +287,7 @@ class SmartDecisionMakerBlock(Block):
) -> BlockOutput:
tool_functions = self._create_function_signature(node_id)
prompt = [p.model_dump() for p in input_data.conversation_history]
prompt = [json.to_dict(p) for p in input_data.conversation_history]
values = input_data.prompt_values
if values:

View File

@@ -9,6 +9,12 @@ from .type import type_match
def to_dict(data) -> dict:
if isinstance(data, BaseModel):
data = data.model_dump()
elif isinstance(data, str):
data = loads(data)
return jsonable_encoder(data)