fix(backend): update dry-run tests for platform key + fix falsy value filter

- Mock `_get_platform_openrouter_key` in `test_prepare_dry_run_orchestrator_block`
  so the test doesn't depend on a real OpenRouter key being present in CI.
  Also fix incorrect assertion that model is preserved (it's overridden to
  the simulation model).

- Fix output filter in `simulate_block` that incorrectly dropped valid falsy
  values like `False`, `0`, and `[]`. Now only `None` and empty strings are
  skipped.

- Add `test_generic_block_preserves_falsy_values` test to cover the fix.
This commit is contained in:
Zamil Majdy
2026-04-02 07:52:09 +02:00
parent 9f2257daaa
commit a71396ee48
3 changed files with 45 additions and 8 deletions

View File

@@ -487,17 +487,22 @@ async def test_execute_block_dry_run_simulator_error_returns_error_response():
def test_prepare_dry_run_orchestrator_block():
"""prepare_dry_run caps iterations but keeps the user's model."""
"""prepare_dry_run caps iterations and overrides model to simulation model."""
from backend.blocks.orchestrator import OrchestratorBlock
block = OrchestratorBlock()
input_data = {"prompt": "hello", "model": "gpt-4o", "agent_mode_max_iterations": 10}
result = prepare_dry_run(block, input_data)
with patch(
"backend.executor.simulator._get_platform_openrouter_key",
return_value="sk-or-test-key",
):
result = prepare_dry_run(block, input_data)
assert result is not None
# Model is NOT swapped -- the user's own model/credentials are preserved.
assert result["model"] == "gpt-4o"
# Model is overridden to the simulation model (not the user's model).
assert result["model"] != "gpt-4o"
assert result["agent_mode_max_iterations"] == 1
assert result["_dry_run_api_key"] == "sk-or-test-key"
# Original input_data should not be mutated.
assert input_data["model"] == "gpt-4o"

View File

@@ -431,10 +431,15 @@ async def simulate_block(
try:
parsed = await _call_llm_for_simulation(system_prompt, user_prompt, label=label)
# Yield only pins that have meaningful values
# Yield only pins present in the LLM response with meaningful values.
# We skip None and empty strings but preserve valid falsy values
# like False, 0, and [].
for pin_name in output_properties:
value = parsed.get(pin_name)
if value is not None and value != "":
yield pin_name, value
if pin_name not in parsed:
continue
value = parsed[pin_name]
if value is None or value == "":
continue
yield pin_name, value
except (RuntimeError, ValueError) as e:
yield "error", str(e)

View File

@@ -426,6 +426,33 @@ class TestSimulateBlockPassthrough:
# Missing pins are omitted — only meaningful values are yielded
assert "error" not in outputs
@pytest.mark.asyncio
async def test_generic_block_preserves_falsy_values(self) -> None:
"""Valid falsy values like False, 0, and [] must be yielded, not dropped."""
block = _make_block(
output_schema={
"properties": {
"flag": {"type": "boolean"},
"count": {"type": "integer"},
"items": {"type": "array"},
},
"required": ["flag", "count", "items"],
}
)
with patch(
"backend.executor.simulator._call_llm_for_simulation",
new_callable=AsyncMock,
return_value={"flag": False, "count": 0, "items": []},
):
outputs: dict[str, Any] = {}
async for name, data in simulate_block(block, {"query": "test"}):
outputs[name] = data
assert outputs["flag"] is False
assert outputs["count"] == 0
assert outputs["items"] == []
@pytest.mark.asyncio
async def test_llm_failure_yields_error(self) -> None:
"""When LLM fails, should yield an error tuple."""