Merge branch 'dev' into hackathon/copilot

This commit is contained in:
Zamil Majdy
2026-01-07 03:34:01 +07:00
committed by GitHub
18 changed files with 3152 additions and 55 deletions

View File

@@ -39,7 +39,7 @@ import backend.data.user
import backend.integrations.webhooks.utils import backend.integrations.webhooks.utils
import backend.util.service import backend.util.service
import backend.util.settings import backend.util.settings
from backend.blocks.llm import LlmModel from backend.blocks.llm import DEFAULT_LLM_MODEL
from backend.data.model import Credentials from backend.data.model import Credentials
from backend.integrations.providers import ProviderName from backend.integrations.providers import ProviderName
from backend.monitoring.instrumentation import instrument_fastapi from backend.monitoring.instrumentation import instrument_fastapi
@@ -113,7 +113,7 @@ async def lifespan_context(app: fastapi.FastAPI):
await backend.data.user.migrate_and_encrypt_user_integrations() await backend.data.user.migrate_and_encrypt_user_integrations()
await backend.data.graph.fix_llm_provider_credentials() await backend.data.graph.fix_llm_provider_credentials()
await backend.data.graph.migrate_llm_models(LlmModel.GPT4O) await backend.data.graph.migrate_llm_models(DEFAULT_LLM_MODEL)
await backend.integrations.webhooks.utils.migrate_legacy_triggered_graphs() await backend.integrations.webhooks.utils.migrate_legacy_triggered_graphs()
with launch_darkly_context(): with launch_darkly_context():

View File

@@ -1,6 +1,7 @@
from typing import Any from typing import Any
from backend.blocks.llm import ( from backend.blocks.llm import (
DEFAULT_LLM_MODEL,
TEST_CREDENTIALS, TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT, TEST_CREDENTIALS_INPUT,
AIBlockBase, AIBlockBase,
@@ -49,7 +50,7 @@ class AIConditionBlock(AIBlockBase):
) )
model: LlmModel = SchemaField( model: LlmModel = SchemaField(
title="LLM Model", title="LLM Model",
default=LlmModel.GPT4O, default=DEFAULT_LLM_MODEL,
description="The language model to use for evaluating the condition.", description="The language model to use for evaluating the condition.",
advanced=False, advanced=False,
) )
@@ -81,7 +82,7 @@ class AIConditionBlock(AIBlockBase):
"condition": "the input is an email address", "condition": "the input is an email address",
"yes_value": "Valid email", "yes_value": "Valid email",
"no_value": "Not an email", "no_value": "Not an email",
"model": LlmModel.GPT4O, "model": DEFAULT_LLM_MODEL,
"credentials": TEST_CREDENTIALS_INPUT, "credentials": TEST_CREDENTIALS_INPUT,
}, },
test_credentials=TEST_CREDENTIALS, test_credentials=TEST_CREDENTIALS,

File diff suppressed because it is too large Load Diff

View File

@@ -92,8 +92,9 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
O1 = "o1" O1 = "o1"
O1_MINI = "o1-mini" O1_MINI = "o1-mini"
# GPT-5 models # GPT-5 models
GPT5 = "gpt-5-2025-08-07" GPT5_2 = "gpt-5.2-2025-12-11"
GPT5_1 = "gpt-5.1-2025-11-13" GPT5_1 = "gpt-5.1-2025-11-13"
GPT5 = "gpt-5-2025-08-07"
GPT5_MINI = "gpt-5-mini-2025-08-07" GPT5_MINI = "gpt-5-mini-2025-08-07"
GPT5_NANO = "gpt-5-nano-2025-08-07" GPT5_NANO = "gpt-5-nano-2025-08-07"
GPT5_CHAT = "gpt-5-chat-latest" GPT5_CHAT = "gpt-5-chat-latest"
@@ -194,8 +195,9 @@ MODEL_METADATA = {
LlmModel.O1: ModelMetadata("openai", 200000, 100000), # o1-2024-12-17 LlmModel.O1: ModelMetadata("openai", 200000, 100000), # o1-2024-12-17
LlmModel.O1_MINI: ModelMetadata("openai", 128000, 65536), # o1-mini-2024-09-12 LlmModel.O1_MINI: ModelMetadata("openai", 128000, 65536), # o1-mini-2024-09-12
# GPT-5 models # GPT-5 models
LlmModel.GPT5: ModelMetadata("openai", 400000, 128000), LlmModel.GPT5_2: ModelMetadata("openai", 400000, 128000),
LlmModel.GPT5_1: ModelMetadata("openai", 400000, 128000), LlmModel.GPT5_1: ModelMetadata("openai", 400000, 128000),
LlmModel.GPT5: ModelMetadata("openai", 400000, 128000),
LlmModel.GPT5_MINI: ModelMetadata("openai", 400000, 128000), LlmModel.GPT5_MINI: ModelMetadata("openai", 400000, 128000),
LlmModel.GPT5_NANO: ModelMetadata("openai", 400000, 128000), LlmModel.GPT5_NANO: ModelMetadata("openai", 400000, 128000),
LlmModel.GPT5_CHAT: ModelMetadata("openai", 400000, 16384), LlmModel.GPT5_CHAT: ModelMetadata("openai", 400000, 16384),
@@ -303,6 +305,8 @@ MODEL_METADATA = {
LlmModel.V0_1_0_MD: ModelMetadata("v0", 128000, 64000), LlmModel.V0_1_0_MD: ModelMetadata("v0", 128000, 64000),
} }
DEFAULT_LLM_MODEL = LlmModel.GPT5_2
for model in LlmModel: for model in LlmModel:
if model not in MODEL_METADATA: if model not in MODEL_METADATA:
raise ValueError(f"Missing MODEL_METADATA metadata for model: {model}") raise ValueError(f"Missing MODEL_METADATA metadata for model: {model}")
@@ -790,7 +794,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
) )
model: LlmModel = SchemaField( model: LlmModel = SchemaField(
title="LLM Model", title="LLM Model",
default=LlmModel.GPT4O, default=DEFAULT_LLM_MODEL,
description="The language model to use for answering the prompt.", description="The language model to use for answering the prompt.",
advanced=False, advanced=False,
) )
@@ -855,7 +859,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
input_schema=AIStructuredResponseGeneratorBlock.Input, input_schema=AIStructuredResponseGeneratorBlock.Input,
output_schema=AIStructuredResponseGeneratorBlock.Output, output_schema=AIStructuredResponseGeneratorBlock.Output,
test_input={ test_input={
"model": LlmModel.GPT4O, "model": DEFAULT_LLM_MODEL,
"credentials": TEST_CREDENTIALS_INPUT, "credentials": TEST_CREDENTIALS_INPUT,
"expected_format": { "expected_format": {
"key1": "value1", "key1": "value1",
@@ -1221,7 +1225,7 @@ class AITextGeneratorBlock(AIBlockBase):
) )
model: LlmModel = SchemaField( model: LlmModel = SchemaField(
title="LLM Model", title="LLM Model",
default=LlmModel.GPT4O, default=DEFAULT_LLM_MODEL,
description="The language model to use for answering the prompt.", description="The language model to use for answering the prompt.",
advanced=False, advanced=False,
) )
@@ -1317,7 +1321,7 @@ class AITextSummarizerBlock(AIBlockBase):
) )
model: LlmModel = SchemaField( model: LlmModel = SchemaField(
title="LLM Model", title="LLM Model",
default=LlmModel.GPT4O, default=DEFAULT_LLM_MODEL,
description="The language model to use for summarizing the text.", description="The language model to use for summarizing the text.",
) )
focus: str = SchemaField( focus: str = SchemaField(
@@ -1534,7 +1538,7 @@ class AIConversationBlock(AIBlockBase):
) )
model: LlmModel = SchemaField( model: LlmModel = SchemaField(
title="LLM Model", title="LLM Model",
default=LlmModel.GPT4O, default=DEFAULT_LLM_MODEL,
description="The language model to use for the conversation.", description="The language model to use for the conversation.",
) )
credentials: AICredentials = AICredentialsField() credentials: AICredentials = AICredentialsField()
@@ -1572,7 +1576,7 @@ class AIConversationBlock(AIBlockBase):
}, },
{"role": "user", "content": "Where was it played?"}, {"role": "user", "content": "Where was it played?"},
], ],
"model": LlmModel.GPT4O, "model": DEFAULT_LLM_MODEL,
"credentials": TEST_CREDENTIALS_INPUT, "credentials": TEST_CREDENTIALS_INPUT,
}, },
test_credentials=TEST_CREDENTIALS, test_credentials=TEST_CREDENTIALS,
@@ -1635,7 +1639,7 @@ class AIListGeneratorBlock(AIBlockBase):
) )
model: LlmModel = SchemaField( model: LlmModel = SchemaField(
title="LLM Model", title="LLM Model",
default=LlmModel.GPT4O, default=DEFAULT_LLM_MODEL,
description="The language model to use for generating the list.", description="The language model to use for generating the list.",
advanced=True, advanced=True,
) )
@@ -1692,7 +1696,7 @@ class AIListGeneratorBlock(AIBlockBase):
"drawing explorers to uncover its mysteries. Each planet showcases the limitless possibilities of " "drawing explorers to uncover its mysteries. Each planet showcases the limitless possibilities of "
"fictional worlds." "fictional worlds."
), ),
"model": LlmModel.GPT4O, "model": DEFAULT_LLM_MODEL,
"credentials": TEST_CREDENTIALS_INPUT, "credentials": TEST_CREDENTIALS_INPUT,
"max_retries": 3, "max_retries": 3,
"force_json_output": False, "force_json_output": False,

View File

@@ -226,7 +226,7 @@ class SmartDecisionMakerBlock(Block):
) )
model: llm.LlmModel = SchemaField( model: llm.LlmModel = SchemaField(
title="LLM Model", title="LLM Model",
default=llm.LlmModel.GPT4O, default=llm.DEFAULT_LLM_MODEL,
description="The language model to use for answering the prompt.", description="The language model to use for answering the prompt.",
advanced=False, advanced=False,
) )

View File

@@ -196,6 +196,15 @@ class TestXMLParserBlockSecurity:
async for _ in block.run(XMLParserBlock.Input(input_xml=large_xml)): async for _ in block.run(XMLParserBlock.Input(input_xml=large_xml)):
pass pass
async def test_rejects_text_outside_root(self):
"""Ensure parser surfaces readable errors for invalid root text."""
block = XMLParserBlock()
invalid_xml = "<root><child>value</child></root> trailing"
with pytest.raises(ValueError, match="text outside the root element"):
async for _ in block.run(XMLParserBlock.Input(input_xml=invalid_xml)):
pass
class TestStoreMediaFileSecurity: class TestStoreMediaFileSecurity:
"""Test file storage security limits.""" """Test file storage security limits."""

View File

@@ -28,7 +28,7 @@ class TestLLMStatsTracking:
response = await llm.llm_call( response = await llm.llm_call(
credentials=llm.TEST_CREDENTIALS, credentials=llm.TEST_CREDENTIALS,
llm_model=llm.LlmModel.GPT4O, llm_model=llm.DEFAULT_LLM_MODEL,
prompt=[{"role": "user", "content": "Hello"}], prompt=[{"role": "user", "content": "Hello"}],
max_tokens=100, max_tokens=100,
) )
@@ -65,7 +65,7 @@ class TestLLMStatsTracking:
input_data = llm.AIStructuredResponseGeneratorBlock.Input( input_data = llm.AIStructuredResponseGeneratorBlock.Input(
prompt="Test prompt", prompt="Test prompt",
expected_format={"key1": "desc1", "key2": "desc2"}, expected_format={"key1": "desc1", "key2": "desc2"},
model=llm.LlmModel.GPT4O, model=llm.DEFAULT_LLM_MODEL,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore # type: ignore
) )
@@ -109,7 +109,7 @@ class TestLLMStatsTracking:
# Run the block # Run the block
input_data = llm.AITextGeneratorBlock.Input( input_data = llm.AITextGeneratorBlock.Input(
prompt="Generate text", prompt="Generate text",
model=llm.LlmModel.GPT4O, model=llm.DEFAULT_LLM_MODEL,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
) )
@@ -170,7 +170,7 @@ class TestLLMStatsTracking:
input_data = llm.AIStructuredResponseGeneratorBlock.Input( input_data = llm.AIStructuredResponseGeneratorBlock.Input(
prompt="Test prompt", prompt="Test prompt",
expected_format={"key1": "desc1", "key2": "desc2"}, expected_format={"key1": "desc1", "key2": "desc2"},
model=llm.LlmModel.GPT4O, model=llm.DEFAULT_LLM_MODEL,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
retry=2, retry=2,
) )
@@ -228,7 +228,7 @@ class TestLLMStatsTracking:
input_data = llm.AITextSummarizerBlock.Input( input_data = llm.AITextSummarizerBlock.Input(
text=long_text, text=long_text,
model=llm.LlmModel.GPT4O, model=llm.DEFAULT_LLM_MODEL,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
max_tokens=100, # Small chunks max_tokens=100, # Small chunks
chunk_overlap=10, chunk_overlap=10,
@@ -299,7 +299,7 @@ class TestLLMStatsTracking:
# Test with very short text (should only need 1 chunk + 1 final summary) # Test with very short text (should only need 1 chunk + 1 final summary)
input_data = llm.AITextSummarizerBlock.Input( input_data = llm.AITextSummarizerBlock.Input(
text="This is a short text.", text="This is a short text.",
model=llm.LlmModel.GPT4O, model=llm.DEFAULT_LLM_MODEL,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
max_tokens=1000, # Large enough to avoid chunking max_tokens=1000, # Large enough to avoid chunking
) )
@@ -346,7 +346,7 @@ class TestLLMStatsTracking:
{"role": "assistant", "content": "Hi there!"}, {"role": "assistant", "content": "Hi there!"},
{"role": "user", "content": "How are you?"}, {"role": "user", "content": "How are you?"},
], ],
model=llm.LlmModel.GPT4O, model=llm.DEFAULT_LLM_MODEL,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
) )
@@ -387,7 +387,7 @@ class TestLLMStatsTracking:
# Run the block # Run the block
input_data = llm.AIListGeneratorBlock.Input( input_data = llm.AIListGeneratorBlock.Input(
focus="test items", focus="test items",
model=llm.LlmModel.GPT4O, model=llm.DEFAULT_LLM_MODEL,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
max_retries=3, max_retries=3,
) )
@@ -469,7 +469,7 @@ class TestLLMStatsTracking:
input_data = llm.AIStructuredResponseGeneratorBlock.Input( input_data = llm.AIStructuredResponseGeneratorBlock.Input(
prompt="Test", prompt="Test",
expected_format={"result": "desc"}, expected_format={"result": "desc"},
model=llm.LlmModel.GPT4O, model=llm.DEFAULT_LLM_MODEL,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
) )
@@ -513,7 +513,7 @@ class TestAITextSummarizerValidation:
# Create input data # Create input data
input_data = llm.AITextSummarizerBlock.Input( input_data = llm.AITextSummarizerBlock.Input(
text="Some text to summarize", text="Some text to summarize",
model=llm.LlmModel.GPT4O, model=llm.DEFAULT_LLM_MODEL,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
style=llm.SummaryStyle.BULLET_POINTS, style=llm.SummaryStyle.BULLET_POINTS,
) )
@@ -558,7 +558,7 @@ class TestAITextSummarizerValidation:
# Create input data # Create input data
input_data = llm.AITextSummarizerBlock.Input( input_data = llm.AITextSummarizerBlock.Input(
text="Some text to summarize", text="Some text to summarize",
model=llm.LlmModel.GPT4O, model=llm.DEFAULT_LLM_MODEL,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
style=llm.SummaryStyle.BULLET_POINTS, style=llm.SummaryStyle.BULLET_POINTS,
max_tokens=1000, max_tokens=1000,
@@ -593,7 +593,7 @@ class TestAITextSummarizerValidation:
# Create input data # Create input data
input_data = llm.AITextSummarizerBlock.Input( input_data = llm.AITextSummarizerBlock.Input(
text="Some text to summarize", text="Some text to summarize",
model=llm.LlmModel.GPT4O, model=llm.DEFAULT_LLM_MODEL,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
) )
@@ -623,7 +623,7 @@ class TestAITextSummarizerValidation:
# Create input data # Create input data
input_data = llm.AITextSummarizerBlock.Input( input_data = llm.AITextSummarizerBlock.Input(
text="Some text to summarize", text="Some text to summarize",
model=llm.LlmModel.GPT4O, model=llm.DEFAULT_LLM_MODEL,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
max_tokens=1000, max_tokens=1000,
) )
@@ -654,7 +654,7 @@ class TestAITextSummarizerValidation:
# Create input data # Create input data
input_data = llm.AITextSummarizerBlock.Input( input_data = llm.AITextSummarizerBlock.Input(
text="Some text to summarize", text="Some text to summarize",
model=llm.LlmModel.GPT4O, model=llm.DEFAULT_LLM_MODEL,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
) )

View File

@@ -233,7 +233,7 @@ async def test_smart_decision_maker_tracks_llm_stats():
# Create test input # Create test input
input_data = SmartDecisionMakerBlock.Input( input_data = SmartDecisionMakerBlock.Input(
prompt="Should I continue with this task?", prompt="Should I continue with this task?",
model=llm_module.LlmModel.GPT4O, model=llm_module.DEFAULT_LLM_MODEL,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0, agent_mode_max_iterations=0,
) )
@@ -335,7 +335,7 @@ async def test_smart_decision_maker_parameter_validation():
input_data = SmartDecisionMakerBlock.Input( input_data = SmartDecisionMakerBlock.Input(
prompt="Search for keywords", prompt="Search for keywords",
model=llm_module.LlmModel.GPT4O, model=llm_module.DEFAULT_LLM_MODEL,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
retry=2, # Set retry to 2 for testing retry=2, # Set retry to 2 for testing
agent_mode_max_iterations=0, agent_mode_max_iterations=0,
@@ -402,7 +402,7 @@ async def test_smart_decision_maker_parameter_validation():
input_data = SmartDecisionMakerBlock.Input( input_data = SmartDecisionMakerBlock.Input(
prompt="Search for keywords", prompt="Search for keywords",
model=llm_module.LlmModel.GPT4O, model=llm_module.DEFAULT_LLM_MODEL,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0, agent_mode_max_iterations=0,
) )
@@ -462,7 +462,7 @@ async def test_smart_decision_maker_parameter_validation():
input_data = SmartDecisionMakerBlock.Input( input_data = SmartDecisionMakerBlock.Input(
prompt="Search for keywords", prompt="Search for keywords",
model=llm_module.LlmModel.GPT4O, model=llm_module.DEFAULT_LLM_MODEL,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0, agent_mode_max_iterations=0,
) )
@@ -526,7 +526,7 @@ async def test_smart_decision_maker_parameter_validation():
input_data = SmartDecisionMakerBlock.Input( input_data = SmartDecisionMakerBlock.Input(
prompt="Search for keywords", prompt="Search for keywords",
model=llm_module.LlmModel.GPT4O, model=llm_module.DEFAULT_LLM_MODEL,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0, agent_mode_max_iterations=0,
) )
@@ -648,7 +648,7 @@ async def test_smart_decision_maker_raw_response_conversion():
input_data = SmartDecisionMakerBlock.Input( input_data = SmartDecisionMakerBlock.Input(
prompt="Test prompt", prompt="Test prompt",
model=llm_module.LlmModel.GPT4O, model=llm_module.DEFAULT_LLM_MODEL,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
retry=2, retry=2,
agent_mode_max_iterations=0, agent_mode_max_iterations=0,
@@ -722,7 +722,7 @@ async def test_smart_decision_maker_raw_response_conversion():
): ):
input_data = SmartDecisionMakerBlock.Input( input_data = SmartDecisionMakerBlock.Input(
prompt="Simple prompt", prompt="Simple prompt",
model=llm_module.LlmModel.GPT4O, model=llm_module.DEFAULT_LLM_MODEL,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0, agent_mode_max_iterations=0,
) )
@@ -778,7 +778,7 @@ async def test_smart_decision_maker_raw_response_conversion():
): ):
input_data = SmartDecisionMakerBlock.Input( input_data = SmartDecisionMakerBlock.Input(
prompt="Another test", prompt="Another test",
model=llm_module.LlmModel.GPT4O, model=llm_module.DEFAULT_LLM_MODEL,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0, agent_mode_max_iterations=0,
) )
@@ -931,7 +931,7 @@ async def test_smart_decision_maker_agent_mode():
# Test agent mode with max_iterations = 3 # Test agent mode with max_iterations = 3
input_data = SmartDecisionMakerBlock.Input( input_data = SmartDecisionMakerBlock.Input(
prompt="Complete this task using tools", prompt="Complete this task using tools",
model=llm_module.LlmModel.GPT4O, model=llm_module.DEFAULT_LLM_MODEL,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=3, # Enable agent mode with 3 max iterations agent_mode_max_iterations=3, # Enable agent mode with 3 max iterations
) )
@@ -1020,7 +1020,7 @@ async def test_smart_decision_maker_traditional_mode_default():
# Test default behavior (traditional mode) # Test default behavior (traditional mode)
input_data = SmartDecisionMakerBlock.Input( input_data = SmartDecisionMakerBlock.Input(
prompt="Test prompt", prompt="Test prompt",
model=llm_module.LlmModel.GPT4O, model=llm_module.DEFAULT_LLM_MODEL,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0, # Traditional mode agent_mode_max_iterations=0, # Traditional mode
) )

View File

@@ -373,7 +373,7 @@ async def test_output_yielding_with_dynamic_fields():
input_data = block.input_schema( input_data = block.input_schema(
prompt="Create a user dictionary", prompt="Create a user dictionary",
credentials=llm.TEST_CREDENTIALS_INPUT, credentials=llm.TEST_CREDENTIALS_INPUT,
model=llm.LlmModel.GPT4O, model=llm.DEFAULT_LLM_MODEL,
agent_mode_max_iterations=0, # Use traditional mode to test output yielding agent_mode_max_iterations=0, # Use traditional mode to test output yielding
) )
@@ -594,7 +594,7 @@ async def test_validation_errors_dont_pollute_conversation():
input_data = block.input_schema( input_data = block.input_schema(
prompt="Test prompt", prompt="Test prompt",
credentials=llm.TEST_CREDENTIALS_INPUT, credentials=llm.TEST_CREDENTIALS_INPUT,
model=llm.LlmModel.GPT4O, model=llm.DEFAULT_LLM_MODEL,
retry=3, # Allow retries retry=3, # Allow retries
agent_mode_max_iterations=1, agent_mode_max_iterations=1,
) )

View File

@@ -1,5 +1,5 @@
from gravitasml.parser import Parser from gravitasml.parser import Parser
from gravitasml.token import tokenize from gravitasml.token import Token, tokenize
from backend.data.block import Block, BlockOutput, BlockSchemaInput, BlockSchemaOutput from backend.data.block import Block, BlockOutput, BlockSchemaInput, BlockSchemaOutput
from backend.data.model import SchemaField from backend.data.model import SchemaField
@@ -25,6 +25,38 @@ class XMLParserBlock(Block):
], ],
) )
@staticmethod
def _validate_tokens(tokens: list[Token]) -> None:
"""Ensure the XML has a single root element and no stray text."""
if not tokens:
raise ValueError("XML input is empty.")
depth = 0
root_seen = False
for token in tokens:
if token.type == "TAG_OPEN":
if depth == 0 and root_seen:
raise ValueError("XML must have a single root element.")
depth += 1
if depth == 1:
root_seen = True
elif token.type == "TAG_CLOSE":
depth -= 1
if depth < 0:
raise SyntaxError("Unexpected closing tag in XML input.")
elif token.type in {"TEXT", "ESCAPE"}:
if depth == 0 and token.value:
raise ValueError(
"XML contains text outside the root element; "
"wrap content in a single root tag."
)
if depth != 0:
raise SyntaxError("Unclosed tag detected in XML input.")
if not root_seen:
raise ValueError("XML must include a root element.")
async def run(self, input_data: Input, **kwargs) -> BlockOutput: async def run(self, input_data: Input, **kwargs) -> BlockOutput:
# Security fix: Add size limits to prevent XML bomb attacks # Security fix: Add size limits to prevent XML bomb attacks
MAX_XML_SIZE = 10 * 1024 * 1024 # 10MB limit for XML input MAX_XML_SIZE = 10 * 1024 * 1024 # 10MB limit for XML input
@@ -35,7 +67,9 @@ class XMLParserBlock(Block):
) )
try: try:
tokens = tokenize(input_data.input_xml) tokens = list(tokenize(input_data.input_xml))
self._validate_tokens(tokens)
parser = Parser(tokens) parser = Parser(tokens)
parsed_result = parser.parse() parsed_result = parser.parse()
yield "parsed_xml", parsed_result yield "parsed_xml", parsed_result

View File

@@ -111,6 +111,8 @@ class TranscribeYoutubeVideoBlock(Block):
return parsed_url.path.split("/")[2] return parsed_url.path.split("/")[2]
if parsed_url.path[:3] == "/v/": if parsed_url.path[:3] == "/v/":
return parsed_url.path.split("/")[2] return parsed_url.path.split("/")[2]
if parsed_url.path.startswith("/shorts/"):
return parsed_url.path.split("/")[2]
raise ValueError(f"Invalid YouTube URL: {url}") raise ValueError(f"Invalid YouTube URL: {url}")
def get_transcript( def get_transcript(

View File

@@ -59,12 +59,13 @@ from backend.integrations.credentials_store import (
MODEL_COST: dict[LlmModel, int] = { MODEL_COST: dict[LlmModel, int] = {
LlmModel.O3: 4, LlmModel.O3: 4,
LlmModel.O3_MINI: 2, # $1.10 / $4.40 LlmModel.O3_MINI: 2,
LlmModel.O1: 16, # $15 / $60 LlmModel.O1: 16,
LlmModel.O1_MINI: 4, LlmModel.O1_MINI: 4,
# GPT-5 models # GPT-5 models
LlmModel.GPT5: 2, LlmModel.GPT5_2: 6,
LlmModel.GPT5_1: 5, LlmModel.GPT5_1: 5,
LlmModel.GPT5: 2,
LlmModel.GPT5_MINI: 1, LlmModel.GPT5_MINI: 1,
LlmModel.GPT5_NANO: 1, LlmModel.GPT5_NANO: 1,
LlmModel.GPT5_CHAT: 5, LlmModel.GPT5_CHAT: 5,
@@ -87,7 +88,7 @@ MODEL_COST: dict[LlmModel, int] = {
LlmModel.AIML_API_LLAMA3_3_70B: 1, LlmModel.AIML_API_LLAMA3_3_70B: 1,
LlmModel.AIML_API_META_LLAMA_3_1_70B: 1, LlmModel.AIML_API_META_LLAMA_3_1_70B: 1,
LlmModel.AIML_API_LLAMA_3_2_3B: 1, LlmModel.AIML_API_LLAMA_3_2_3B: 1,
LlmModel.LLAMA3_3_70B: 1, # $0.59 / $0.79 LlmModel.LLAMA3_3_70B: 1,
LlmModel.LLAMA3_1_8B: 1, LlmModel.LLAMA3_1_8B: 1,
LlmModel.OLLAMA_LLAMA3_3: 1, LlmModel.OLLAMA_LLAMA3_3: 1,
LlmModel.OLLAMA_LLAMA3_2: 1, LlmModel.OLLAMA_LLAMA3_2: 1,

View File

@@ -1906,16 +1906,32 @@ httpx = {version = ">=0.26,<0.29", extras = ["http2"]}
pydantic = ">=1.10,<3" pydantic = ">=1.10,<3"
pyjwt = ">=2.10.1,<3.0.0" pyjwt = ">=2.10.1,<3.0.0"
[[package]]
name = "gravitas-md2gdocs"
version = "0.1.0"
description = "Convert Markdown to Google Docs API requests"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "gravitas_md2gdocs-0.1.0-py3-none-any.whl", hash = "sha256:0cb0627779fdd65c1604818af4142eea1b25d055060183363de1bae4d9e46508"},
{file = "gravitas_md2gdocs-0.1.0.tar.gz", hash = "sha256:bb3122fe9fa35c528f3f00b785d3f1398d350082d5d03f60f56c895bdcc68033"},
]
[package.extras]
dev = ["google-auth-oauthlib (>=1.0.0)", "pytest (>=7.0.0)", "pytest-cov (>=4.0.0)", "python-dotenv (>=1.0.0)", "ruff (>=0.1.0)"]
google = ["google-api-python-client (>=2.0.0)", "google-auth (>=2.0.0)"]
[[package]] [[package]]
name = "gravitasml" name = "gravitasml"
version = "0.1.3" version = "0.1.4"
description = "" description = ""
optional = false optional = false
python-versions = "<4.0,>=3.10" python-versions = "<4.0,>=3.10"
groups = ["main"] groups = ["main"]
files = [ files = [
{file = "gravitasml-0.1.3-py3-none-any.whl", hash = "sha256:51ff98b4564b7a61f7796f18d5f2558b919d30b3722579296089645b7bc18b85"}, {file = "gravitasml-0.1.4-py3-none-any.whl", hash = "sha256:671a18b11d3d8a0e270c6a80c72cd058458b18d5ef7560d00010e962ab1bca74"},
{file = "gravitasml-0.1.3.tar.gz", hash = "sha256:04d240b9fa35878252d57a36032130b6516487468847fcdced1022c032a20f57"}, {file = "gravitasml-0.1.4.tar.gz", hash = "sha256:35d0d9fec7431817482d53d9c976e375557c3e041d1eb6928e809324a8c866e3"},
] ]
[package.dependencies] [package.dependencies]
@@ -7279,4 +7295,4 @@ cffi = ["cffi (>=1.11)"]
[metadata] [metadata]
lock-version = "2.1" lock-version = "2.1"
python-versions = ">=3.10,<3.14" python-versions = ">=3.10,<3.14"
content-hash = "13b191b2a1989d3321ff713c66ff6f5f4f3b82d15df4d407e0e5dbf87d7522c4" content-hash = "a93ba0cea3b465cb6ec3e3f258b383b09f84ea352ccfdbfa112902cde5653fc6"

View File

@@ -27,7 +27,7 @@ google-api-python-client = "^2.177.0"
google-auth-oauthlib = "^1.2.2" google-auth-oauthlib = "^1.2.2"
google-cloud-storage = "^3.2.0" google-cloud-storage = "^3.2.0"
googlemaps = "^4.10.0" googlemaps = "^4.10.0"
gravitasml = "^0.1.3" gravitasml = "^0.1.4"
groq = "^0.30.0" groq = "^0.30.0"
html2text = "^2024.2.26" html2text = "^2024.2.26"
jinja2 = "^3.1.6" jinja2 = "^3.1.6"
@@ -82,6 +82,7 @@ firecrawl-py = "^4.3.6"
exa-py = "^1.14.20" exa-py = "^1.14.20"
croniter = "^6.0.0" croniter = "^6.0.0"
stagehand = "^0.5.1" stagehand = "^0.5.1"
gravitas-md2gdocs = "^0.1.0"
[tool.poetry.group.dev.dependencies] [tool.poetry.group.dev.dependencies]
aiohappyeyeballs = "^2.6.1" aiohappyeyeballs = "^2.6.1"

View File

@@ -0,0 +1,113 @@
from unittest.mock import Mock
from backend.blocks.google.docs import GoogleDocsFormatTextBlock
def _make_mock_docs_service() -> Mock:
service = Mock()
# Ensure chained call exists: service.documents().batchUpdate(...).execute()
service.documents.return_value.batchUpdate.return_value.execute.return_value = {}
return service
def test_format_text_parses_shorthand_hex_color():
block = GoogleDocsFormatTextBlock()
service = _make_mock_docs_service()
result = block._format_text(
service,
document_id="doc_1",
start_index=1,
end_index=2,
bold=False,
italic=False,
underline=False,
font_size=0,
foreground_color="#FFF",
)
assert result["success"] is True
# Verify request body contains correct rgbColor for white.
_, kwargs = service.documents.return_value.batchUpdate.call_args
requests = kwargs["body"]["requests"]
rgb = requests[0]["updateTextStyle"]["textStyle"]["foregroundColor"]["color"][
"rgbColor"
]
assert rgb == {"red": 1.0, "green": 1.0, "blue": 1.0}
def test_format_text_parses_full_hex_color():
block = GoogleDocsFormatTextBlock()
service = _make_mock_docs_service()
result = block._format_text(
service,
document_id="doc_1",
start_index=1,
end_index=2,
bold=False,
italic=False,
underline=False,
font_size=0,
foreground_color="#FF0000",
)
assert result["success"] is True
_, kwargs = service.documents.return_value.batchUpdate.call_args
requests = kwargs["body"]["requests"]
rgb = requests[0]["updateTextStyle"]["textStyle"]["foregroundColor"]["color"][
"rgbColor"
]
assert rgb == {"red": 1.0, "green": 0.0, "blue": 0.0}
def test_format_text_ignores_invalid_color_when_other_fields_present():
block = GoogleDocsFormatTextBlock()
service = _make_mock_docs_service()
result = block._format_text(
service,
document_id="doc_1",
start_index=1,
end_index=2,
bold=True,
italic=False,
underline=False,
font_size=0,
foreground_color="#GGG",
)
assert result["success"] is True
assert "warning" in result
# Should still apply bold, but should NOT include foregroundColor in textStyle.
_, kwargs = service.documents.return_value.batchUpdate.call_args
requests = kwargs["body"]["requests"]
text_style = requests[0]["updateTextStyle"]["textStyle"]
fields = requests[0]["updateTextStyle"]["fields"]
assert text_style == {"bold": True}
assert fields == "bold"
def test_format_text_invalid_color_only_does_not_call_api():
block = GoogleDocsFormatTextBlock()
service = _make_mock_docs_service()
result = block._format_text(
service,
document_id="doc_1",
start_index=1,
end_index=2,
bold=False,
italic=False,
underline=False,
font_size=0,
foreground_color="#F",
)
assert result["success"] is False
assert "Invalid foreground_color" in result["message"]
service.documents.return_value.batchUpdate.assert_not_called()

View File

@@ -37,6 +37,18 @@ class TestTranscribeYoutubeVideoBlock:
video_id = self.youtube_block.extract_video_id(url) video_id = self.youtube_block.extract_video_id(url)
assert video_id == "dQw4w9WgXcQ" assert video_id == "dQw4w9WgXcQ"
def test_extract_video_id_shorts_url(self):
"""Test extracting video ID from YouTube Shorts URL."""
url = "https://www.youtube.com/shorts/dtUqwMu3e-g"
video_id = self.youtube_block.extract_video_id(url)
assert video_id == "dtUqwMu3e-g"
def test_extract_video_id_shorts_url_with_params(self):
"""Test extracting video ID from YouTube Shorts URL with query parameters."""
url = "https://www.youtube.com/shorts/dtUqwMu3e-g?feature=share"
video_id = self.youtube_block.extract_video_id(url)
assert video_id == "dtUqwMu3e-g"
@patch("backend.blocks.youtube.YouTubeTranscriptApi") @patch("backend.blocks.youtube.YouTubeTranscriptApi")
def test_get_transcript_english_available(self, mock_api_class): def test_get_transcript_english_available(self, mock_api_class):
"""Test getting transcript when English is available.""" """Test getting transcript when English is available."""

View File

@@ -64,7 +64,9 @@ export const useAnyOfField = (
const [selectedType, setSelectedType] = useState<string>(initialSelectedType); const [selectedType, setSelectedType] = useState<string>(initialSelectedType);
const isEnabled = formData !== null && formData !== undefined; // Only check for explicit null (set by toggle off), not undefined (empty input)
// This allows users to clear number inputs without the field disappearing
const isEnabled = formData !== null;
const handleTypeChange = (t: string) => { const handleTypeChange = (t: string) => {
setSelectedType(t); setSelectedType(t);
@@ -79,7 +81,13 @@ export const useAnyOfField = (
} }
}; };
const handleValueChange = (value: any) => onChange(value); const handleValueChange = (value: any) => {
if (isNullableType && value === null) {
onChange(undefined);
return;
}
onChange(value);
};
const currentTypeOption = typeOptions.find((o) => o.type === selectedType); const currentTypeOption = typeOptions.find((o) => o.type === selectedType);

View File

@@ -51,7 +51,7 @@ export const TextInputWidget = (props: WidgetProps) => {
handleChange: (v: string) => (v === "" ? undefined : Number(v)), handleChange: (v: string) => (v === "" ? undefined : Number(v)),
}, },
[InputType.INTEGER]: { [InputType.INTEGER]: {
htmlType: "account", htmlType: "number",
placeholder: "Enter integer value...", placeholder: "Enter integer value...",
handleChange: (v: string) => (v === "" ? undefined : Number(v)), handleChange: (v: string) => (v === "" ? undefined : Number(v)),
}, },