style(backend): Fix formatting (ruff + black + isort)

This commit is contained in:
Nick Tindle
2026-02-12 16:10:55 -06:00
parent bacbf1f0ab
commit e8ea6c537b
25 changed files with 309 additions and 304 deletions

View File

@@ -154,9 +154,9 @@ def test_log_raw_metric_validation_errors(
assert "detail" in error_detail, f"Missing 'detail' in error: {error_detail}"
error_text = json.dumps(error_detail)
assert (
expected_error in error_text
), f"Expected '{expected_error}' in error response: {error_text}"
assert expected_error in error_text, (
f"Expected '{expected_error}' in error response: {error_text}"
)
def test_log_raw_metric_service_error(
@@ -310,9 +310,9 @@ def test_log_raw_analytics_validation_errors(
assert "detail" in error_detail, f"Missing 'detail' in error: {error_detail}"
error_text = json.dumps(error_detail)
assert (
expected_error in error_text
), f"Expected '{expected_error}' in error response: {error_text}"
assert expected_error in error_text, (
f"Expected '{expected_error}' in error response: {error_text}"
)
def test_log_raw_analytics_service_error(

View File

@@ -96,9 +96,9 @@ async def test_chatsession_db_storage(setup_test_user, test_user_id):
)
assert s2 is not None, "Session not found after loading from DB"
assert len(s2.messages) == len(
s.messages
), f"Message count mismatch: expected {len(s.messages)}, got {len(s2.messages)}"
assert len(s2.messages) == len(s.messages), (
f"Message count mismatch: expected {len(s.messages)}, got {len(s2.messages)}"
)
# Verify all roles are present
roles = [m.role for m in s2.messages]
@@ -109,11 +109,11 @@ async def test_chatsession_db_storage(setup_test_user, test_user_id):
# Verify message content
for orig, loaded in zip(s.messages, s2.messages):
assert orig.role == loaded.role, f"Role mismatch: {orig.role} != {loaded.role}"
assert (
orig.content == loaded.content
), f"Content mismatch for {orig.role}: {orig.content} != {loaded.content}"
assert orig.content == loaded.content, (
f"Content mismatch for {orig.role}: {orig.content} != {loaded.content}"
)
if orig.tool_calls:
assert (
loaded.tool_calls is not None
), f"Tool calls missing for {orig.role} message"
assert loaded.tool_calls is not None, (
f"Tool calls missing for {orig.role} message"
)
assert len(orig.tool_calls) == len(loaded.tool_calls)

View File

@@ -1178,13 +1178,13 @@ async def _stream_chat_chunks(
tool_calls[idx]["id"] = tc_chunk.id
if tc_chunk.function:
if tc_chunk.function.name:
tool_calls[idx]["function"][
"name"
] = tc_chunk.function.name
tool_calls[idx]["function"]["name"] = (
tc_chunk.function.name
)
if tc_chunk.function.arguments:
tool_calls[idx]["function"][
"arguments"
] += tc_chunk.function.arguments
tool_calls[idx]["function"]["arguments"] += (
tc_chunk.function.arguments
)
# Emit StreamToolInputStart only after we have the tool call ID
if (

View File

@@ -152,9 +152,7 @@ async def test_add_agent_to_library(mocker):
# Mock graph_db.get_graph function that's called to check for HITL blocks
mock_graph_db = mocker.patch("backend.api.features.library.db.graph_db")
mock_graph_model = mocker.Mock()
mock_graph_model.nodes = (
[]
) # Empty list so _has_human_in_the_loop_blocks returns False
mock_graph_model.nodes = [] # Empty list so _has_human_in_the_loop_blocks returns False
mock_graph_db.get_graph = mocker.AsyncMock(return_value=mock_graph_model)
# Mock the model conversion

View File

@@ -102,12 +102,12 @@ def assert_mock_called_with_partial(mock_obj: Any, **expected_kwargs: Any) -> No
actual_kwargs = mock_obj.call_args.kwargs if mock_obj.call_args else {}
for key, expected_value in expected_kwargs.items():
assert (
key in actual_kwargs
), f"Missing key '{key}' in mock call. Actual keys: {list(actual_kwargs.keys())}"
assert (
actual_kwargs[key] == expected_value
), f"Mock called with {key}={actual_kwargs[key]}, expected {expected_value}"
assert key in actual_kwargs, (
f"Missing key '{key}' in mock call. Actual keys: {list(actual_kwargs.keys())}"
)
assert actual_kwargs[key] == expected_value, (
f"Mock called with {key}={actual_kwargs[key]}, expected {expected_value}"
)
@contextmanager

View File

@@ -556,9 +556,9 @@ async def create_table(
) -> dict:
for field in table_fields:
assert field.get("name"), "Field name is required"
assert (
field.get("type") in TABLE_FIELD_TYPES
), f"Field type {field.get('type')} is not valid. Valid types are {TABLE_FIELD_TYPES}."
assert field.get("type") in TABLE_FIELD_TYPES, (
f"Field type {field.get('type')} is not valid. Valid types are {TABLE_FIELD_TYPES}."
)
# Note fields have differnet options for different types we are not currently validating them
response = await Requests().post(
@@ -582,9 +582,9 @@ async def update_table(
date_dependency: dict | None = None,
) -> dict:
assert (
table_name or table_description or date_dependency
), "At least one of table_name, table_description, or date_dependency must be provided"
assert table_name or table_description or date_dependency, (
"At least one of table_name, table_description, or date_dependency must be provided"
)
params: dict[str, str | dict[str, str]] = {}
if table_name:
@@ -613,9 +613,9 @@ async def create_field(
options: dict[str, str] | None = None,
) -> dict[str, str | dict[str, str]]:
assert (
field_type in TABLE_FIELD_TYPES
), f"Field type {field_type} is not valid. Valid types are {TABLE_FIELD_TYPES}."
assert field_type in TABLE_FIELD_TYPES, (
f"Field type {field_type} is not valid. Valid types are {TABLE_FIELD_TYPES}."
)
params: dict[str, str | dict[str, str]] = {}
params["type"] = field_type
params["name"] = name
@@ -928,9 +928,9 @@ async def update_record(
typecast: bool | None = None,
fields: dict[str, Any] | None = None,
) -> dict[str, dict[str, dict[str, str]]]:
params: dict[str, str | bool | dict[str, Any] | list[dict[str, dict[str, str]]]] = (
{}
)
params: dict[
str, str | bool | dict[str, Any] | list[dict[str, dict[str, str]]]
] = {}
if return_fields_by_field_id:
params["returnFieldsByFieldId"] = return_fields_by_field_id
if typecast:
@@ -958,9 +958,9 @@ async def create_record(
assert fields or records, "At least one of fields or records must be provided"
assert not (fields and records), "Only one of fields or records can be provided"
if records is not None:
assert (
len(records) <= 10
), "Only up to 10 records can be provided when using records"
assert len(records) <= 10, (
"Only up to 10 records can be provided when using records"
)
params: dict[str, str | bool | dict[str, Any] | list[dict[str, Any]]] = {}
if fields:

View File

@@ -43,9 +43,9 @@ async def test_create_update_table():
workspace_id = "wsphuHmfllg7V3Brd"
response = await create_base(credentials, workspace_id, "API Testing Base")
assert response is not None, f"Checking create base response: {response}"
assert (
response.get("id") is not None
), f"Checking create base response id: {response}"
assert response.get("id") is not None, (
f"Checking create base response id: {response}"
)
base_id = response.get("id")
assert base_id is not None, f"Checking create base response id: {base_id}"
@@ -236,9 +236,9 @@ async def test_record_management():
updated_records = response.get("records")
assert updated_records is not None
assert len(updated_records) == 2, f"Updated records: {updated_records}"
assert isinstance(
updated_records, list
), f"Type of updated records: {type(updated_records)}"
assert isinstance(updated_records, list), (
f"Type of updated records: {type(updated_records)}"
)
first_updated = updated_records[0] # type: ignore
second_updated = updated_records[1] # type: ignore
first_updated_fields = first_updated.get("fields")
@@ -257,9 +257,9 @@ async def test_record_management():
deleted_records = response.get("records")
assert deleted_records is not None
assert len(deleted_records) == 2, f"Deleted records: {deleted_records}"
assert isinstance(
deleted_records, list
), f"Type of deleted records: {type(deleted_records)}"
assert isinstance(deleted_records, list), (
f"Type of deleted records: {type(deleted_records)}"
)
first_deleted = deleted_records[0] # type: ignore
second_deleted = deleted_records[1] # type: ignore
assert first_deleted.get("deleted")
@@ -293,12 +293,12 @@ async def test_webhook_management():
)
response = await create_webhook(credentials, base_id, webhook_specification)
assert response is not None, f"Checking create webhook response: {response}"
assert (
response.get("id") is not None
), f"Checking create webhook response id: {response}"
assert (
response.get("macSecretBase64") is not None
), f"Checking create webhook response macSecretBase64: {response}"
assert response.get("id") is not None, (
f"Checking create webhook response id: {response}"
)
assert response.get("macSecretBase64") is not None, (
f"Checking create webhook response macSecretBase64: {response}"
)
webhook_id = response.get("id")
assert webhook_id is not None, f"Webhook ID: {webhook_id}"
@@ -308,14 +308,14 @@ async def test_webhook_management():
credentials, base_id, table_id, fields={"test_field": "test_value"}
)
assert response is not None, f"Checking create record response: {response}"
assert (
response.get("id") is not None
), f"Checking create record response id: {response}"
assert response.get("id") is not None, (
f"Checking create record response id: {response}"
)
fields = response.get("fields")
assert fields is not None, f"Checking create record response fields: {response}"
assert (
fields.get("test_field") == "test_value"
), f"Checking create record response fields test_field: {response}"
assert fields.get("test_field") == "test_value", (
f"Checking create record response fields test_field: {response}"
)
response = await list_webhook_payloads(credentials, base_id, webhook_id)
assert response is not None, f"Checking list webhook payloads response: {response}"

View File

@@ -1267,7 +1267,9 @@ class ExaWebsetSummaryBlock(Block):
(
e.format.value
if e.format and hasattr(e.format, "value")
else str(e.format) if e.format else "text"
else str(e.format)
if e.format
else "text"
)
for e in enrichments
)

View File

@@ -97,9 +97,9 @@ class StagehandRecommendedLlmModel(str, Enum):
if len(model_name.split("/")) == 1 and not self.value.startswith(
model_metadata.provider
):
assert (
model_metadata.provider != "open_router"
), "Logic failed and open_router provider attempted to be prepended to model name! in stagehand/_config.py"
assert model_metadata.provider != "open_router", (
"Logic failed and open_router provider attempted to be prepended to model name! in stagehand/_config.py"
)
model_name = f"{model_metadata.provider}/{model_name}"
logger.error(f"Model name: {model_name}")

View File

@@ -128,9 +128,9 @@ async def test_block_ids_valid(block: Type[Block]):
try:
parsed_uuid = uuid.UUID(block_instance.id)
# Verify it's specifically UUID version 4
assert (
parsed_uuid.version == 4
), f"Block {block.name} ID is UUID version {parsed_uuid.version}, expected version 4"
assert parsed_uuid.version == 4, (
f"Block {block.name} ID is UUID version {parsed_uuid.version}, expected version 4"
)
except ValueError:
pytest.fail(f"Block {block.name} has invalid UUID format: {block_instance.id}")

View File

@@ -174,9 +174,9 @@ async def test_smart_decision_maker_function_signature(server: SpinTestServer):
)
assert tool_functions is not None, "Tool functions should not be None"
assert (
len(tool_functions) == 2
), f"Expected 2 tool functions, got {len(tool_functions)}"
assert len(tool_functions) == 2, (
f"Expected 2 tool functions, got {len(tool_functions)}"
)
# Check the first tool function (testgraph)
assert tool_functions[0]["type"] == "function"

View File

@@ -670,6 +670,6 @@ async def test_validation_errors_dont_pollute_conversation():
if msg.get("role") == "user"
and "parameter errors" in msg.get("content", "")
]
assert (
len(error_messages) == 0
), "Validation error leaked into final conversation"
assert len(error_messages) == 0, (
"Validation error leaked into final conversation"
)

View File

@@ -107,15 +107,15 @@ async def test_ceiling_balance_clamps_when_would_exceed(server: SpinTestServer):
)
# Balance should be clamped to ceiling
assert (
final_balance == 1000
), f"Balance should be clamped to 1000, got {final_balance}"
assert final_balance == 1000, (
f"Balance should be clamped to 1000, got {final_balance}"
)
# Verify with get_credits too
stored_balance = await credit_system.get_credits(user_id)
assert (
stored_balance == 1000
), f"Stored balance should be 1000, got {stored_balance}"
assert stored_balance == 1000, (
f"Stored balance should be 1000, got {stored_balance}"
)
# Verify transaction shows the clamped amount
transactions = await CreditTransaction.prisma().find_many(
@@ -164,9 +164,9 @@ async def test_ceiling_balance_allows_when_under_threshold(server: SpinTestServe
# Verify with get_credits too
stored_balance = await credit_system.get_credits(user_id)
assert (
stored_balance == 500
), f"Stored balance should be 500, got {stored_balance}"
assert stored_balance == 500, (
f"Stored balance should be 500, got {stored_balance}"
)
finally:
await cleanup_test_user(user_id)

View File

@@ -108,9 +108,9 @@ async def test_concurrent_spends_same_user(server: SpinTestServer):
transactions = await CreditTransaction.prisma().find_many(
where={"userId": user_id, "type": prisma.enums.CreditTransactionType.USAGE}
)
assert (
len(transactions) == 10
), f"Expected 10 transactions, got {len(transactions)}"
assert len(transactions) == 10, (
f"Expected 10 transactions, got {len(transactions)}"
)
finally:
await cleanup_test_user(user_id)
@@ -321,9 +321,9 @@ async def test_onboarding_reward_idempotency(server: SpinTestServer):
"transactionKey": f"REWARD-{user_id}-WELCOME",
}
)
assert (
len(transactions) == 1
), f"Expected 1 reward transaction, got {len(transactions)}"
assert len(transactions) == 1, (
f"Expected 1 reward transaction, got {len(transactions)}"
)
finally:
await cleanup_test_user(user_id)
@@ -358,9 +358,9 @@ async def test_integer_overflow_protection(server: SpinTestServer):
# Balance should be clamped to max_int, not overflowed
final_balance = await credit_system.get_credits(user_id)
assert (
final_balance == max_int
), f"Balance should be clamped to {max_int}, got {final_balance}"
assert final_balance == max_int, (
f"Balance should be clamped to {max_int}, got {final_balance}"
)
# Verify transaction was created with clamped amount
transactions = await CreditTransaction.prisma().find_many(
@@ -371,9 +371,9 @@ async def test_integer_overflow_protection(server: SpinTestServer):
order={"createdAt": "desc"},
)
assert len(transactions) > 0, "Transaction should be created"
assert (
transactions[0].runningBalance == max_int
), "Transaction should show clamped balance"
assert transactions[0].runningBalance == max_int, (
"Transaction should show clamped balance"
)
finally:
await cleanup_test_user(user_id)
@@ -432,9 +432,9 @@ async def test_high_concurrency_stress(server: SpinTestServer):
# Verify final balance
final_balance = await credit_system.get_credits(user_id)
assert (
final_balance == expected_balance
), f"Expected {expected_balance}, got {final_balance}"
assert final_balance == expected_balance, (
f"Expected {expected_balance}, got {final_balance}"
)
assert final_balance >= 0, "Balance went negative!"
finally:
@@ -533,9 +533,9 @@ async def test_concurrent_multiple_spends_sufficient_balance(server: SpinTestSer
print(f"Successful: {len(successful)}, Failed: {len(failed)}")
# All should succeed since 150 - (10 + 20 + 30) = 90 > 0
assert (
len(successful) == 3
), f"Expected all 3 to succeed, got {len(successful)} successes: {results}"
assert len(successful) == 3, (
f"Expected all 3 to succeed, got {len(successful)} successes: {results}"
)
assert final_balance == 90, f"Expected balance 90, got {final_balance}"
# Check transaction timestamps to confirm database-level serialization
@@ -575,38 +575,38 @@ async def test_concurrent_multiple_spends_sufficient_balance(server: SpinTestSer
# Verify all balances are valid intermediate states
for balance in actual_balances:
assert (
balance in expected_possible_balances
), f"Invalid balance {balance}, expected one of {expected_possible_balances}"
assert balance in expected_possible_balances, (
f"Invalid balance {balance}, expected one of {expected_possible_balances}"
)
# Final balance should always be 90 (150 - 60)
assert (
min(actual_balances) == 90
), f"Final balance should be 90, got {min(actual_balances)}"
assert min(actual_balances) == 90, (
f"Final balance should be 90, got {min(actual_balances)}"
)
# The final transaction should always have balance 90
# The other transactions should have valid intermediate balances
assert (
90 in actual_balances
), f"Final balance 90 should be in actual_balances: {actual_balances}"
assert 90 in actual_balances, (
f"Final balance 90 should be in actual_balances: {actual_balances}"
)
# All balances should be >= 90 (the final state)
assert all(
balance >= 90 for balance in actual_balances
), f"All balances should be >= 90, got {actual_balances}"
assert all(balance >= 90 for balance in actual_balances), (
f"All balances should be >= 90, got {actual_balances}"
)
# CRITICAL: Transactions are atomic but can complete in any order
# What matters is that all running balances are valid intermediate states
# Each balance should be between 90 (final) and 140 (after first transaction)
for balance in actual_balances:
assert (
90 <= balance <= 140
), f"Balance {balance} is outside valid range [90, 140]"
assert 90 <= balance <= 140, (
f"Balance {balance} is outside valid range [90, 140]"
)
# Final balance (minimum) should always be 90
assert (
min(actual_balances) == 90
), f"Final balance should be 90, got {min(actual_balances)}"
assert min(actual_balances) == 90, (
f"Final balance should be 90, got {min(actual_balances)}"
)
finally:
await cleanup_test_user(user_id)
@@ -722,9 +722,9 @@ async def test_prove_database_locking_behavior(server: SpinTestServer):
print(f"\n💰 Final balance: {final_balance}")
if len(successful) == 3:
assert (
final_balance == 0
), f"If all succeeded, balance should be 0, got {final_balance}"
assert final_balance == 0, (
f"If all succeeded, balance should be 0, got {final_balance}"
)
print(
"✅ CONCLUSION: Database row locking causes requests to WAIT and execute serially"
)

View File

@@ -109,9 +109,9 @@ async def test_deduct_credits_atomic(server: SpinTestServer):
where={"userId": REFUND_TEST_USER_ID}
)
assert user_balance is not None
assert (
user_balance.balance == 500
), f"Expected balance 500, got {user_balance.balance}"
assert user_balance.balance == 500, (
f"Expected balance 500, got {user_balance.balance}"
)
# Verify refund transaction was created
refund_tx = await CreditTransaction.prisma().find_first(
@@ -205,9 +205,9 @@ async def test_handle_dispute_with_sufficient_balance(
where={"userId": REFUND_TEST_USER_ID}
)
assert user_balance is not None
assert (
user_balance.balance == 1000
), f"Balance should remain 1000, got {user_balance.balance}"
assert user_balance.balance == 1000, (
f"Balance should remain 1000, got {user_balance.balance}"
)
finally:
await cleanup_test_user()
@@ -332,9 +332,9 @@ async def test_concurrent_refunds(server: SpinTestServer):
print(f"DEBUG: Final balance = {user_balance.balance}, expected = 500")
# With atomic implementation, all 5 refunds should process correctly
assert (
user_balance.balance == 500
), f"Expected balance 500 after 5 refunds of 100 each, got {user_balance.balance}"
assert user_balance.balance == 500, (
f"Expected balance 500 after 5 refunds of 100 each, got {user_balance.balance}"
)
# Verify all refund transactions exist
refund_txs = await CreditTransaction.prisma().find_many(
@@ -343,9 +343,9 @@ async def test_concurrent_refunds(server: SpinTestServer):
"type": CreditTransactionType.REFUND,
}
)
assert (
len(refund_txs) == 5
), f"Expected 5 refund transactions, got {len(refund_txs)}"
assert len(refund_txs) == 5, (
f"Expected 5 refund transactions, got {len(refund_txs)}"
)
running_balances: set[int] = {
tx.runningBalance for tx in refund_txs if tx.runningBalance is not None
@@ -353,20 +353,20 @@ async def test_concurrent_refunds(server: SpinTestServer):
# Verify all balances are valid intermediate states
for balance in running_balances:
assert (
500 <= balance <= 1000
), f"Invalid balance {balance}, should be between 500 and 1000"
assert 500 <= balance <= 1000, (
f"Invalid balance {balance}, should be between 500 and 1000"
)
# Final balance should be present
assert (
500 in running_balances
), f"Final balance 500 should be in {running_balances}"
assert 500 in running_balances, (
f"Final balance 500 should be in {running_balances}"
)
# All balances should be unique and form a valid sequence
sorted_balances = sorted(running_balances, reverse=True)
assert (
len(sorted_balances) == 5
), f"Expected 5 unique balances, got {len(sorted_balances)}"
assert len(sorted_balances) == 5, (
f"Expected 5 unique balances, got {len(sorted_balances)}"
)
finally:
await cleanup_test_user()

View File

@@ -82,9 +82,7 @@ async def test_debug_underflow_step_by_step(server: SpinTestServer):
# Test 2: Apply amount that should cause underflow
print("\n=== Test 2: Testing underflow protection ===")
test_amount = (
-200
) # This should cause underflow: (POSTGRES_INT_MIN + 100) + (-200) = POSTGRES_INT_MIN - 100
test_amount = -200 # This should cause underflow: (POSTGRES_INT_MIN + 100) + (-200) = POSTGRES_INT_MIN - 100
expected_without_protection = current_balance + test_amount
print(f"Current balance: {current_balance}")
print(f"Test amount: {test_amount}")
@@ -101,9 +99,9 @@ async def test_debug_underflow_step_by_step(server: SpinTestServer):
print(f"Actual result: {balance_result}")
# Check if underflow protection worked
assert (
balance_result == POSTGRES_INT_MIN
), f"Expected underflow protection to clamp balance to {POSTGRES_INT_MIN}, got {balance_result}"
assert balance_result == POSTGRES_INT_MIN, (
f"Expected underflow protection to clamp balance to {POSTGRES_INT_MIN}, got {balance_result}"
)
# Test 3: Edge case - exactly at POSTGRES_INT_MIN
print("\n=== Test 3: Testing exact POSTGRES_INT_MIN boundary ===")
@@ -128,9 +126,9 @@ async def test_debug_underflow_step_by_step(server: SpinTestServer):
)
print(f"After subtracting 1: {edge_result}")
assert (
edge_result == POSTGRES_INT_MIN
), f"Expected balance to remain clamped at {POSTGRES_INT_MIN}, got {edge_result}"
assert edge_result == POSTGRES_INT_MIN, (
f"Expected balance to remain clamped at {POSTGRES_INT_MIN}, got {edge_result}"
)
finally:
await cleanup_test_user(user_id)
@@ -176,18 +174,18 @@ async def test_underflow_protection_large_refunds(server: SpinTestServer):
)
# Balance should be clamped to POSTGRES_INT_MIN, not the calculated underflow value
assert (
final_balance == POSTGRES_INT_MIN
), f"Balance should be clamped to {POSTGRES_INT_MIN}, got {final_balance}"
assert (
final_balance > expected_without_protection
), f"Balance should be greater than underflow result {expected_without_protection}, got {final_balance}"
assert final_balance == POSTGRES_INT_MIN, (
f"Balance should be clamped to {POSTGRES_INT_MIN}, got {final_balance}"
)
assert final_balance > expected_without_protection, (
f"Balance should be greater than underflow result {expected_without_protection}, got {final_balance}"
)
# Verify with get_credits too
stored_balance = await credit_system.get_credits(user_id)
assert (
stored_balance == POSTGRES_INT_MIN
), f"Stored balance should be {POSTGRES_INT_MIN}, got {stored_balance}"
assert stored_balance == POSTGRES_INT_MIN, (
f"Stored balance should be {POSTGRES_INT_MIN}, got {stored_balance}"
)
# Verify transaction was created with the underflow-protected balance
transactions = await CreditTransaction.prisma().find_many(
@@ -195,9 +193,9 @@ async def test_underflow_protection_large_refunds(server: SpinTestServer):
order={"createdAt": "desc"},
)
assert len(transactions) > 0, "Refund transaction should be created"
assert (
transactions[0].runningBalance == POSTGRES_INT_MIN
), f"Transaction should show clamped balance {POSTGRES_INT_MIN}, got {transactions[0].runningBalance}"
assert transactions[0].runningBalance == POSTGRES_INT_MIN, (
f"Transaction should show clamped balance {POSTGRES_INT_MIN}, got {transactions[0].runningBalance}"
)
finally:
await cleanup_test_user(user_id)
@@ -238,12 +236,12 @@ async def test_multiple_large_refunds_cumulative_underflow(server: SpinTestServe
expected_balance_1 = (
initial_balance + refund_amount
) # Should be POSTGRES_INT_MIN + 200
assert (
balance_1 == expected_balance_1
), f"First refund should result in {expected_balance_1}, got {balance_1}"
assert (
balance_1 >= POSTGRES_INT_MIN
), f"First refund should not go below {POSTGRES_INT_MIN}, got {balance_1}"
assert balance_1 == expected_balance_1, (
f"First refund should result in {expected_balance_1}, got {balance_1}"
)
assert balance_1 >= POSTGRES_INT_MIN, (
f"First refund should not go below {POSTGRES_INT_MIN}, got {balance_1}"
)
# Second refund: (POSTGRES_INT_MIN + 200) + (-300) = POSTGRES_INT_MIN - 100 (would underflow)
balance_2, _ = await credit_system._add_transaction(
@@ -254,9 +252,9 @@ async def test_multiple_large_refunds_cumulative_underflow(server: SpinTestServe
)
# Should be clamped to minimum due to underflow protection
assert (
balance_2 == POSTGRES_INT_MIN
), f"Second refund should be clamped to {POSTGRES_INT_MIN}, got {balance_2}"
assert balance_2 == POSTGRES_INT_MIN, (
f"Second refund should be clamped to {POSTGRES_INT_MIN}, got {balance_2}"
)
# Third refund: Should stay at minimum
balance_3, _ = await credit_system._add_transaction(
@@ -267,15 +265,15 @@ async def test_multiple_large_refunds_cumulative_underflow(server: SpinTestServe
)
# Should still be at minimum
assert (
balance_3 == POSTGRES_INT_MIN
), f"Third refund should stay at {POSTGRES_INT_MIN}, got {balance_3}"
assert balance_3 == POSTGRES_INT_MIN, (
f"Third refund should stay at {POSTGRES_INT_MIN}, got {balance_3}"
)
# Final balance check
final_balance = await credit_system.get_credits(user_id)
assert (
final_balance == POSTGRES_INT_MIN
), f"Final balance should be {POSTGRES_INT_MIN}, got {final_balance}"
assert final_balance == POSTGRES_INT_MIN, (
f"Final balance should be {POSTGRES_INT_MIN}, got {final_balance}"
)
finally:
await cleanup_test_user(user_id)
@@ -327,35 +325,35 @@ async def test_concurrent_large_refunds_no_underflow(server: SpinTestServer):
for i, result in enumerate(results):
if isinstance(result, tuple):
balance, _ = result
assert (
balance >= POSTGRES_INT_MIN
), f"Result {i} balance {balance} underflowed below {POSTGRES_INT_MIN}"
assert balance >= POSTGRES_INT_MIN, (
f"Result {i} balance {balance} underflowed below {POSTGRES_INT_MIN}"
)
valid_results.append(balance)
elif isinstance(result, str) and "FAILED" in result:
# Some operations might fail due to validation, that's okay
pass
else:
# Unexpected exception
assert not isinstance(
result, Exception
), f"Unexpected exception in result {i}: {result}"
assert not isinstance(result, Exception), (
f"Unexpected exception in result {i}: {result}"
)
# At least one operation should succeed
assert (
len(valid_results) > 0
), f"At least one refund should succeed, got results: {results}"
assert len(valid_results) > 0, (
f"At least one refund should succeed, got results: {results}"
)
# All successful results should be >= POSTGRES_INT_MIN
for balance in valid_results:
assert (
balance >= POSTGRES_INT_MIN
), f"Balance {balance} should not be below {POSTGRES_INT_MIN}"
assert balance >= POSTGRES_INT_MIN, (
f"Balance {balance} should not be below {POSTGRES_INT_MIN}"
)
# Final balance should be valid and at or above POSTGRES_INT_MIN
final_balance = await credit_system.get_credits(user_id)
assert (
final_balance >= POSTGRES_INT_MIN
), f"Final balance {final_balance} should not underflow below {POSTGRES_INT_MIN}"
assert final_balance >= POSTGRES_INT_MIN, (
f"Final balance {final_balance} should not underflow below {POSTGRES_INT_MIN}"
)
finally:
await cleanup_test_user(user_id)

View File

@@ -60,9 +60,9 @@ async def test_user_balance_migration_complete(server: SpinTestServer):
# User.balance should not exist or should be None/0 if it exists
user_balance_attr = getattr(user, "balance", None)
if user_balance_attr is not None:
assert (
user_balance_attr == 0 or user_balance_attr is None
), f"User.balance should be 0 or None, got {user_balance_attr}"
assert user_balance_attr == 0 or user_balance_attr is None, (
f"User.balance should be 0 or None, got {user_balance_attr}"
)
# 2. Perform various credit operations using internal method (bypasses Stripe)
await credit_system._add_transaction(
@@ -87,9 +87,9 @@ async def test_user_balance_migration_complete(server: SpinTestServer):
# 3. Verify UserBalance table has correct values
user_balance = await UserBalance.prisma().find_unique(where={"userId": user_id})
assert user_balance is not None
assert (
user_balance.balance == 700
), f"UserBalance should be 700, got {user_balance.balance}"
assert user_balance.balance == 700, (
f"UserBalance should be 700, got {user_balance.balance}"
)
# 4. CRITICAL: Verify User.balance is NEVER updated during operations
user_after = await User.prisma().find_unique(where={"id": user_id})
@@ -97,15 +97,15 @@ async def test_user_balance_migration_complete(server: SpinTestServer):
user_balance_after = getattr(user_after, "balance", None)
if user_balance_after is not None:
# If User.balance exists, it should still be 0 (never updated)
assert (
user_balance_after == 0 or user_balance_after is None
), f"User.balance should remain 0/None after operations, got {user_balance_after}. This indicates User.balance is still being used!"
assert user_balance_after == 0 or user_balance_after is None, (
f"User.balance should remain 0/None after operations, got {user_balance_after}. This indicates User.balance is still being used!"
)
# 5. Verify get_credits always returns UserBalance value, not User.balance
final_balance = await credit_system.get_credits(user_id)
assert (
final_balance == user_balance.balance
), f"get_credits should return UserBalance value {user_balance.balance}, got {final_balance}"
assert final_balance == user_balance.balance, (
f"get_credits should return UserBalance value {user_balance.balance}, got {final_balance}"
)
finally:
await cleanup_test_user(user_id)
@@ -126,9 +126,9 @@ async def test_detect_stale_user_balance_queries(server: SpinTestServer):
# Verify that get_credits returns UserBalance value (5000), not any stale User.balance value
balance = await credit_system.get_credits(user_id)
assert (
balance == 5000
), f"Expected get_credits to return 5000 from UserBalance, got {balance}"
assert balance == 5000, (
f"Expected get_credits to return 5000 from UserBalance, got {balance}"
)
# Verify all operations use UserBalance using internal method (bypasses Stripe)
await credit_system._add_transaction(
@@ -143,9 +143,9 @@ async def test_detect_stale_user_balance_queries(server: SpinTestServer):
# Verify UserBalance table has the correct value
user_balance = await UserBalance.prisma().find_unique(where={"userId": user_id})
assert user_balance is not None
assert (
user_balance.balance == 6000
), f"UserBalance should be 6000, got {user_balance.balance}"
assert user_balance.balance == 6000, (
f"UserBalance should be 6000, got {user_balance.balance}"
)
finally:
await cleanup_test_user(user_id)
@@ -196,9 +196,9 @@ async def test_concurrent_operations_use_userbalance_only(server: SpinTestServer
# Verify UserBalance has correct value
user_balance = await UserBalance.prisma().find_unique(where={"userId": user_id})
assert user_balance is not None
assert (
user_balance.balance == 400
), f"UserBalance should be 400, got {user_balance.balance}"
assert user_balance.balance == 400, (
f"UserBalance should be 400, got {user_balance.balance}"
)
# Critical: If User.balance exists and was used, it might have wrong value
try:

View File

@@ -835,9 +835,9 @@ class GraphModel(Graph, GraphMeta):
# Check for missing dependencies when dependent field is present
missing_deps = [dep for dep in dependencies if not has_value(node, dep)]
if missing_deps and (field_has_value or field_is_required):
node_errors[node.id][
field_name
] = f"Requires {', '.join(missing_deps)} to be set"
node_errors[node.id][field_name] = (
f"Requires {', '.join(missing_deps)} to be set"
)
return node_errors

View File

@@ -224,7 +224,9 @@ class TestBuildExecutionSummary:
# Check that errors are now in node's recent_errors field
# Find the output node (with truncated UUID)
output_node = next(
n for n in summary["nodes"] if n["node_id"] == "678e9012" # Truncated
n
for n in summary["nodes"]
if n["node_id"] == "678e9012" # Truncated
)
assert output_node["error_count"] == 1
assert output_node["execution_count"] == 1
@@ -352,7 +354,9 @@ class TestBuildExecutionSummary:
# String error format - find node with truncated ID
string_error_node = next(
n for n in summary["nodes"] if n["node_id"] == "333e4444" # Truncated
n
for n in summary["nodes"]
if n["node_id"] == "333e4444" # Truncated
)
assert len(string_error_node["recent_errors"]) == 1
assert (
@@ -362,7 +366,9 @@ class TestBuildExecutionSummary:
# No error output format - find node with truncated ID
no_error_node = next(
n for n in summary["nodes"] if n["node_id"] == "777e8888" # Truncated
n
for n in summary["nodes"]
if n["node_id"] == "777e8888" # Truncated
)
assert len(no_error_node["recent_errors"]) == 1
assert no_error_node["recent_errors"][0]["error"] == "Unknown error"

View File

@@ -92,12 +92,12 @@ async def assert_sample_graph_executions(
logger.info(f"Checking first StoreValueBlock execution: {exec}")
assert exec.status == execution.ExecutionStatus.COMPLETED
assert exec.graph_exec_id == graph_exec_id
assert (
exec.output_data in output_list
), f"Output data: {exec.output_data} and {output_list}"
assert (
exec.input_data in input_list
), f"Input data: {exec.input_data} and {input_list}"
assert exec.output_data in output_list, (
f"Output data: {exec.output_data} and {output_list}"
)
assert exec.input_data in input_list, (
f"Input data: {exec.input_data} and {input_list}"
)
assert exec.node_id in [test_graph.nodes[0].id, test_graph.nodes[1].id]
# Executing StoreValueBlock
@@ -105,12 +105,12 @@ async def assert_sample_graph_executions(
logger.info(f"Checking second StoreValueBlock execution: {exec}")
assert exec.status == execution.ExecutionStatus.COMPLETED
assert exec.graph_exec_id == graph_exec_id
assert (
exec.output_data in output_list
), f"Output data: {exec.output_data} and {output_list}"
assert (
exec.input_data in input_list
), f"Input data: {exec.input_data} and {input_list}"
assert exec.output_data in output_list, (
f"Output data: {exec.output_data} and {output_list}"
)
assert exec.input_data in input_list, (
f"Input data: {exec.input_data} and {input_list}"
)
assert exec.node_id in [test_graph.nodes[0].id, test_graph.nodes[1].id]
# Executing FillTextTemplateBlock

View File

@@ -292,9 +292,9 @@ async def _validate_node_input_credentials(
if node.credentials_optional:
continue # Don't add error, will be marked for skip after loop
else:
credential_errors[node.id][
field_name
] = "These credentials are required"
credential_errors[node.id][field_name] = (
"These credentials are required"
)
continue
credentials_meta = credentials_meta_type.model_validate(field_value)
@@ -313,15 +313,15 @@ async def _validate_node_input_credentials(
except Exception as e:
# Handle any errors fetching credentials
# If credentials were explicitly configured but unavailable, it's an error
credential_errors[node.id][
field_name
] = f"Credentials not available: {e}"
credential_errors[node.id][field_name] = (
f"Credentials not available: {e}"
)
continue
if not credentials:
credential_errors[node.id][
field_name
] = f"Unknown credentials #{credentials_meta.id}"
credential_errors[node.id][field_name] = (
f"Unknown credentials #{credentials_meta.id}"
)
continue
if (
@@ -334,9 +334,9 @@ async def _validate_node_input_credentials(
f"{credentials_meta.type}<>{credentials.type};"
f"{credentials_meta.provider}<>{credentials.provider}"
)
credential_errors[node.id][
field_name
] = "Invalid credentials: type/provider mismatch"
credential_errors[node.id][field_name] = (
"Invalid credentials: type/provider mismatch"
)
continue
# If node has optional credentials and any are missing, mark for skipping
@@ -632,8 +632,7 @@ def create_execution_queue_config() -> RabbitMQConfig:
# Solution: Disable consumer timeout entirely - let graphs run indefinitely
# Safety: Heartbeat mechanism now handles dead consumer detection instead
# Use case: Graph executions that take hours to complete (AI model training, etc.)
"x-consumer-timeout": GRACEFUL_SHUTDOWN_TIMEOUT_SECONDS
* 1000,
"x-consumer-timeout": GRACEFUL_SHUTDOWN_TIMEOUT_SECONDS * 1000,
},
)
cancel_queue = Queue(

View File

@@ -168,7 +168,9 @@ def async_error_logged() -> Callable[
]: ...
def async_error_logged(*, swallow: bool = True) -> (
def async_error_logged(
*, swallow: bool = True
) -> (
Callable[
[Callable[P, Coroutine[Any, Any, T]]],
Callable[P, Coroutine[Any, Any, T | None]],

View File

@@ -203,12 +203,12 @@ class TestDynamicClientConnectionHealing:
sync_after = self.client.sync_client
async_after = self.client.async_client
assert (
sync_before is sync_after
), "Sync client should not be reset before threshold"
assert (
async_before is async_after
), "Async client should not be reset before threshold"
assert sync_before is sync_after, (
"Sync client should not be reset before threshold"
)
assert async_before is async_after, (
"Async client should not be reset before threshold"
)
assert self.client._connection_failure_count == 2
def test_no_reset_within_time_window(self):
@@ -228,12 +228,12 @@ class TestDynamicClientConnectionHealing:
sync_after = self.client.sync_client
async_after = self.client.async_client
assert (
sync_before is sync_after
), "Sync client should not be reset within time window"
assert (
async_before is async_after
), "Async client should not be reset within time window"
assert sync_before is sync_after, (
"Sync client should not be reset within time window"
)
assert async_before is async_after, (
"Async client should not be reset within time window"
)
assert self.client._connection_failure_count == 3
def test_reset_after_threshold_and_time(self):
@@ -253,15 +253,15 @@ class TestDynamicClientConnectionHealing:
sync_after = self.client.sync_client
async_after = self.client.async_client
assert (
sync_before is not sync_after
), "Sync client should be reset after threshold"
assert (
async_before is not async_after
), "Async client should be reset after threshold"
assert (
self.client._connection_failure_count == 0
), "Failure count should be reset"
assert sync_before is not sync_after, (
"Sync client should be reset after threshold"
)
assert async_before is not async_after, (
"Async client should be reset after threshold"
)
assert self.client._connection_failure_count == 0, (
"Failure count should be reset"
)
def test_reset_counters_after_healing(self):
"""Test that counters are properly reset after healing"""
@@ -313,9 +313,9 @@ class TestConnectionHealingIntegration:
time_condition = current_time - last_reset_time > 30
should_trigger_reset = failure_count >= 3 and time_condition
assert (
should_trigger_reset == should_reset
), f"Time window logic failed for {current_time - last_reset_time} seconds ago"
assert should_trigger_reset == should_reset, (
f"Time window logic failed for {current_time - last_reset_time} seconds ago"
)
def test_cached_property_behavior():

View File

@@ -708,9 +708,9 @@ class TestSafeJson:
]
for char in control_chars:
assert (
char not in json_string
), f"Control character {repr(char)} found in result"
assert char not in json_string, (
f"Control character {repr(char)} found in result"
)
# Verify specific sanitized content is present (control chars removed but text preserved)
result_data = cast(dict[str, Any], result.data)

View File

@@ -173,15 +173,15 @@ def test_queue_ordering_behavior():
messages = tester.consume_messages(max_messages=3)
assert len(messages) == 3, f"Expected 3 messages, got {len(messages)}"
assert (
messages[0]["graph_exec_id"] == "exec-A"
), f"First message should be A, got {messages[0]['graph_exec_id']}"
assert (
messages[1]["graph_exec_id"] == "exec-B"
), f"Second message should be B, got {messages[1]['graph_exec_id']}"
assert (
messages[2]["graph_exec_id"] == "exec-C"
), f"Third message should be C, got {messages[2]['graph_exec_id']}"
assert messages[0]["graph_exec_id"] == "exec-A", (
f"First message should be A, got {messages[0]['graph_exec_id']}"
)
assert messages[1]["graph_exec_id"] == "exec-B", (
f"Second message should be B, got {messages[1]['graph_exec_id']}"
)
assert messages[2]["graph_exec_id"] == "exec-C", (
f"Third message should be C, got {messages[2]['graph_exec_id']}"
)
print("✅ FIFO order confirmed: A -> B -> C")
@@ -250,9 +250,9 @@ def test_queue_ordering_behavior():
if msg["graph_exec_id"] == "exec-X"
)
assert (
y_index < republished_x_index
), f"Y should come before republished X, but got order: {[m['graph_exec_id'] for m in messages]}"
assert y_index < republished_x_index, (
f"Y should come before republished X, but got order: {[m['graph_exec_id'] for m in messages]}"
)
print("✅ Republishing confirmed: messages go to back of queue")
@@ -291,9 +291,9 @@ def test_traditional_requeue_behavior():
assert method_frame is not None, "Should have received message A"
consumed_msg = json.loads(body.decode())
assert (
consumed_msg["graph_exec_id"] == "exec-A"
), f"Should have consumed message A, got {consumed_msg['graph_exec_id']}"
assert consumed_msg["graph_exec_id"] == "exec-A", (
f"Should have consumed message A, got {consumed_msg['graph_exec_id']}"
)
# Traditional requeue: basic_nack with requeue=True (sends to FRONT)
channel.basic_nack(delivery_tag=method_frame.delivery_tag, requeue=True)
@@ -320,20 +320,20 @@ def test_traditional_requeue_behavior():
# CRITICAL ASSERTION: Traditional requeue should put A at FRONT
# Expected order: A (requeued to front), B
assert (
len(received_messages) == 2
), f"Expected 2 messages, got {len(received_messages)}"
assert len(received_messages) == 2, (
f"Expected 2 messages, got {len(received_messages)}"
)
first_msg = received_messages[0]["graph_exec_id"]
second_msg = received_messages[1]["graph_exec_id"]
# This is the critical test: requeued message A should come BEFORE B
assert (
first_msg == "exec-A"
), f"Traditional requeue should put A at FRONT, but first message was: {first_msg}"
assert (
second_msg == "exec-B"
), f"B should come after requeued A, but second message was: {second_msg}"
assert first_msg == "exec-A", (
f"Traditional requeue should put A at FRONT, but first message was: {first_msg}"
)
assert second_msg == "exec-B", (
f"B should come after requeued A, but second message was: {second_msg}"
)
print(
"✅ HYPOTHESIS CONFIRMED: Traditional requeue sends messages to FRONT of queue"