mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-11 23:35:25 -05:00
Revert "style: run ruff format and isort"
This reverts commit 40b58807ab.
This commit is contained in:
@@ -154,9 +154,9 @@ def test_log_raw_metric_validation_errors(
|
||||
assert "detail" in error_detail, f"Missing 'detail' in error: {error_detail}"
|
||||
|
||||
error_text = json.dumps(error_detail)
|
||||
assert expected_error in error_text, (
|
||||
f"Expected '{expected_error}' in error response: {error_text}"
|
||||
)
|
||||
assert (
|
||||
expected_error in error_text
|
||||
), f"Expected '{expected_error}' in error response: {error_text}"
|
||||
|
||||
|
||||
def test_log_raw_metric_service_error(
|
||||
@@ -310,9 +310,9 @@ def test_log_raw_analytics_validation_errors(
|
||||
assert "detail" in error_detail, f"Missing 'detail' in error: {error_detail}"
|
||||
|
||||
error_text = json.dumps(error_detail)
|
||||
assert expected_error in error_text, (
|
||||
f"Expected '{expected_error}' in error response: {error_text}"
|
||||
)
|
||||
assert (
|
||||
expected_error in error_text
|
||||
), f"Expected '{expected_error}' in error response: {error_text}"
|
||||
|
||||
|
||||
def test_log_raw_analytics_service_error(
|
||||
|
||||
@@ -96,9 +96,9 @@ async def test_chatsession_db_storage(setup_test_user, test_user_id):
|
||||
)
|
||||
|
||||
assert s2 is not None, "Session not found after loading from DB"
|
||||
assert len(s2.messages) == len(s.messages), (
|
||||
f"Message count mismatch: expected {len(s.messages)}, got {len(s2.messages)}"
|
||||
)
|
||||
assert len(s2.messages) == len(
|
||||
s.messages
|
||||
), f"Message count mismatch: expected {len(s.messages)}, got {len(s2.messages)}"
|
||||
|
||||
# Verify all roles are present
|
||||
roles = [m.role for m in s2.messages]
|
||||
@@ -109,11 +109,11 @@ async def test_chatsession_db_storage(setup_test_user, test_user_id):
|
||||
# Verify message content
|
||||
for orig, loaded in zip(s.messages, s2.messages):
|
||||
assert orig.role == loaded.role, f"Role mismatch: {orig.role} != {loaded.role}"
|
||||
assert orig.content == loaded.content, (
|
||||
f"Content mismatch for {orig.role}: {orig.content} != {loaded.content}"
|
||||
)
|
||||
assert (
|
||||
orig.content == loaded.content
|
||||
), f"Content mismatch for {orig.role}: {orig.content} != {loaded.content}"
|
||||
if orig.tool_calls:
|
||||
assert loaded.tool_calls is not None, (
|
||||
f"Tool calls missing for {orig.role} message"
|
||||
)
|
||||
assert (
|
||||
loaded.tool_calls is not None
|
||||
), f"Tool calls missing for {orig.role} message"
|
||||
assert len(orig.tool_calls) == len(loaded.tool_calls)
|
||||
|
||||
@@ -197,7 +197,8 @@ def _validate_bash_command(
|
||||
allowed = ", ".join(sorted(ALLOWED_BASH_COMMANDS))
|
||||
logger.warning(f"Blocked Bash command: {cmd_name}")
|
||||
return _deny(
|
||||
f"Command '{cmd_name}' is not allowed. Allowed commands: {allowed}"
|
||||
f"Command '{cmd_name}' is not allowed. "
|
||||
f"Allowed commands: {allowed}"
|
||||
)
|
||||
expect_command = False
|
||||
|
||||
|
||||
@@ -120,9 +120,7 @@ def _cleanup_sdk_tool_results(cwd: str) -> None:
|
||||
# Security check 3: Validate project_dir is under ~/.claude/projects
|
||||
project_dir = os.path.normpath(project_dir)
|
||||
if not project_dir.startswith(claude_projects):
|
||||
logger.warning(
|
||||
f"[SDK] Rejecting cleanup for escaped project path: {project_dir}"
|
||||
)
|
||||
logger.warning(f"[SDK] Rejecting cleanup for escaped project path: {project_dir}")
|
||||
return
|
||||
|
||||
results_dir = os.path.join(project_dir, "tool-results")
|
||||
|
||||
@@ -37,8 +37,7 @@ _current_tool_call_id: ContextVar[str | None] = ContextVar(
|
||||
# Keyed by tool_name → full output string. Consumed (popped) by the
|
||||
# response adapter when it builds StreamToolOutputAvailable.
|
||||
_pending_tool_outputs: ContextVar[dict[str, str]] = ContextVar(
|
||||
"pending_tool_outputs",
|
||||
default=None, # type: ignore[arg-type]
|
||||
"pending_tool_outputs", default=None # type: ignore[arg-type]
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -1184,13 +1184,13 @@ async def _stream_chat_chunks(
|
||||
tool_calls[idx]["id"] = tc_chunk.id
|
||||
if tc_chunk.function:
|
||||
if tc_chunk.function.name:
|
||||
tool_calls[idx]["function"]["name"] = (
|
||||
tc_chunk.function.name
|
||||
)
|
||||
tool_calls[idx]["function"][
|
||||
"name"
|
||||
] = tc_chunk.function.name
|
||||
if tc_chunk.function.arguments:
|
||||
tool_calls[idx]["function"]["arguments"] += (
|
||||
tc_chunk.function.arguments
|
||||
)
|
||||
tool_calls[idx]["function"][
|
||||
"arguments"
|
||||
] += tc_chunk.function.arguments
|
||||
|
||||
# Emit StreamToolInputStart only after we have the tool call ID
|
||||
if (
|
||||
|
||||
@@ -569,7 +569,7 @@ async def _stream_listener(
|
||||
if isinstance(chunk, StreamFinish):
|
||||
total_time = (time.perf_counter() - start_time) * 1000
|
||||
logger.info(
|
||||
f"[TIMING] StreamFinish received in {total_time / 1000:.1f}s; delivered={messages_delivered}",
|
||||
f"[TIMING] StreamFinish received in {total_time/1000:.1f}s; delivered={messages_delivered}",
|
||||
extra={
|
||||
"json_fields": {
|
||||
**log_meta,
|
||||
@@ -620,7 +620,7 @@ async def _stream_listener(
|
||||
# Clean up listener task mapping on exit
|
||||
total_time = (time.perf_counter() - start_time) * 1000
|
||||
logger.info(
|
||||
f"[TIMING] _stream_listener FINISHED in {total_time / 1000:.1f}s; task={task_id}, "
|
||||
f"[TIMING] _stream_listener FINISHED in {total_time/1000:.1f}s; task={task_id}, "
|
||||
f"delivered={messages_delivered}, xread_count={xread_count}",
|
||||
extra={
|
||||
"json_fields": {
|
||||
|
||||
@@ -151,10 +151,9 @@ class RunBlockTool(BaseTool):
|
||||
logger.info(f"Executing block {block.name} ({block_id}) for user {user_id}")
|
||||
|
||||
creds_manager = IntegrationCredentialsManager()
|
||||
(
|
||||
matched_credentials,
|
||||
missing_credentials,
|
||||
) = await self._resolve_block_credentials(user_id, block, input_data)
|
||||
matched_credentials, missing_credentials = (
|
||||
await self._resolve_block_credentials(user_id, block, input_data)
|
||||
)
|
||||
|
||||
if missing_credentials:
|
||||
# Return setup requirements response with missing credentials
|
||||
|
||||
@@ -152,7 +152,9 @@ async def test_add_agent_to_library(mocker):
|
||||
# Mock graph_db.get_graph function that's called to check for HITL blocks
|
||||
mock_graph_db = mocker.patch("backend.api.features.library.db.graph_db")
|
||||
mock_graph_model = mocker.Mock()
|
||||
mock_graph_model.nodes = [] # Empty list so _has_human_in_the_loop_blocks returns False
|
||||
mock_graph_model.nodes = (
|
||||
[]
|
||||
) # Empty list so _has_human_in_the_loop_blocks returns False
|
||||
mock_graph_db.get_graph = mocker.AsyncMock(return_value=mock_graph_model)
|
||||
|
||||
# Mock the model conversion
|
||||
|
||||
@@ -57,7 +57,7 @@ async def postmark_webhook_handler(
|
||||
webhook: Annotated[
|
||||
PostmarkWebhook,
|
||||
Body(discriminator="RecordType"),
|
||||
],
|
||||
]
|
||||
):
|
||||
logger.info(f"Received webhook from Postmark: {webhook}")
|
||||
match webhook:
|
||||
|
||||
@@ -164,7 +164,7 @@ class BlockHandler(ContentHandler):
|
||||
block_ids = list(all_blocks.keys())
|
||||
|
||||
# Query for existing embeddings
|
||||
placeholders = ",".join([f"${i + 1}" for i in range(len(block_ids))])
|
||||
placeholders = ",".join([f"${i+1}" for i in range(len(block_ids))])
|
||||
existing_result = await query_raw_with_schema(
|
||||
f"""
|
||||
SELECT "contentId"
|
||||
@@ -265,7 +265,7 @@ class BlockHandler(ContentHandler):
|
||||
return {"total": 0, "with_embeddings": 0, "without_embeddings": 0}
|
||||
|
||||
block_ids = enabled_block_ids
|
||||
placeholders = ",".join([f"${i + 1}" for i in range(len(block_ids))])
|
||||
placeholders = ",".join([f"${i+1}" for i in range(len(block_ids))])
|
||||
|
||||
embedded_result = await query_raw_with_schema(
|
||||
f"""
|
||||
@@ -508,7 +508,7 @@ class DocumentationHandler(ContentHandler):
|
||||
]
|
||||
|
||||
# Check which ones have embeddings
|
||||
placeholders = ",".join([f"${i + 1}" for i in range(len(section_content_ids))])
|
||||
placeholders = ",".join([f"${i+1}" for i in range(len(section_content_ids))])
|
||||
existing_result = await query_raw_with_schema(
|
||||
f"""
|
||||
SELECT "contentId"
|
||||
|
||||
@@ -47,7 +47,7 @@ def mock_storage_client(mocker):
|
||||
|
||||
async def test_upload_media_success(mock_settings, mock_storage_client):
|
||||
# Create test JPEG data with valid signature
|
||||
test_data = b"\xff\xd8\xff" + b"test data"
|
||||
test_data = b"\xFF\xD8\xFF" + b"test data"
|
||||
|
||||
test_file = fastapi.UploadFile(
|
||||
filename="laptop.jpeg",
|
||||
@@ -85,7 +85,7 @@ async def test_upload_media_missing_credentials(monkeypatch):
|
||||
|
||||
test_file = fastapi.UploadFile(
|
||||
filename="laptop.jpeg",
|
||||
file=io.BytesIO(b"\xff\xd8\xff" + b"test data"), # Valid JPEG signature
|
||||
file=io.BytesIO(b"\xFF\xD8\xFF" + b"test data"), # Valid JPEG signature
|
||||
headers=starlette.datastructures.Headers({"content-type": "image/jpeg"}),
|
||||
)
|
||||
|
||||
@@ -110,7 +110,7 @@ async def test_upload_media_video_type(mock_settings, mock_storage_client):
|
||||
|
||||
|
||||
async def test_upload_media_file_too_large(mock_settings, mock_storage_client):
|
||||
large_data = b"\xff\xd8\xff" + b"x" * (
|
||||
large_data = b"\xFF\xD8\xFF" + b"x" * (
|
||||
50 * 1024 * 1024 + 1
|
||||
) # 50MB + 1 byte with valid JPEG signature
|
||||
test_file = fastapi.UploadFile(
|
||||
|
||||
@@ -499,12 +499,10 @@ async def test_upload_file_success(test_user_id: str):
|
||||
)
|
||||
|
||||
# Mock dependencies
|
||||
with (
|
||||
patch("backend.api.features.v1.scan_content_safe") as mock_scan,
|
||||
patch(
|
||||
"backend.api.features.v1.get_cloud_storage_handler"
|
||||
) as mock_handler_getter,
|
||||
):
|
||||
with patch("backend.api.features.v1.scan_content_safe") as mock_scan, patch(
|
||||
"backend.api.features.v1.get_cloud_storage_handler"
|
||||
) as mock_handler_getter:
|
||||
|
||||
mock_scan.return_value = None
|
||||
mock_handler = AsyncMock()
|
||||
mock_handler.store_file.return_value = "gcs://test-bucket/uploads/123/test.txt"
|
||||
@@ -553,12 +551,10 @@ async def test_upload_file_no_filename(test_user_id: str):
|
||||
),
|
||||
)
|
||||
|
||||
with (
|
||||
patch("backend.api.features.v1.scan_content_safe") as mock_scan,
|
||||
patch(
|
||||
"backend.api.features.v1.get_cloud_storage_handler"
|
||||
) as mock_handler_getter,
|
||||
):
|
||||
with patch("backend.api.features.v1.scan_content_safe") as mock_scan, patch(
|
||||
"backend.api.features.v1.get_cloud_storage_handler"
|
||||
) as mock_handler_getter:
|
||||
|
||||
mock_scan.return_value = None
|
||||
mock_handler = AsyncMock()
|
||||
mock_handler.store_file.return_value = (
|
||||
@@ -636,12 +632,10 @@ async def test_upload_file_cloud_storage_failure(test_user_id: str):
|
||||
headers=starlette.datastructures.Headers({"content-type": "text/plain"}),
|
||||
)
|
||||
|
||||
with (
|
||||
patch("backend.api.features.v1.scan_content_safe") as mock_scan,
|
||||
patch(
|
||||
"backend.api.features.v1.get_cloud_storage_handler"
|
||||
) as mock_handler_getter,
|
||||
):
|
||||
with patch("backend.api.features.v1.scan_content_safe") as mock_scan, patch(
|
||||
"backend.api.features.v1.get_cloud_storage_handler"
|
||||
) as mock_handler_getter:
|
||||
|
||||
mock_scan.return_value = None
|
||||
mock_handler = AsyncMock()
|
||||
mock_handler.store_file.side_effect = RuntimeError("Storage error!")
|
||||
@@ -685,12 +679,10 @@ async def test_upload_file_gcs_not_configured_fallback(test_user_id: str):
|
||||
headers=starlette.datastructures.Headers({"content-type": "text/plain"}),
|
||||
)
|
||||
|
||||
with (
|
||||
patch("backend.api.features.v1.scan_content_safe") as mock_scan,
|
||||
patch(
|
||||
"backend.api.features.v1.get_cloud_storage_handler"
|
||||
) as mock_handler_getter,
|
||||
):
|
||||
with patch("backend.api.features.v1.scan_content_safe") as mock_scan, patch(
|
||||
"backend.api.features.v1.get_cloud_storage_handler"
|
||||
) as mock_handler_getter:
|
||||
|
||||
mock_scan.return_value = None
|
||||
mock_handler = AsyncMock()
|
||||
mock_handler.config.gcs_bucket_name = "" # Simulate no GCS bucket configured
|
||||
|
||||
@@ -102,12 +102,12 @@ def assert_mock_called_with_partial(mock_obj: Any, **expected_kwargs: Any) -> No
|
||||
actual_kwargs = mock_obj.call_args.kwargs if mock_obj.call_args else {}
|
||||
|
||||
for key, expected_value in expected_kwargs.items():
|
||||
assert key in actual_kwargs, (
|
||||
f"Missing key '{key}' in mock call. Actual keys: {list(actual_kwargs.keys())}"
|
||||
)
|
||||
assert actual_kwargs[key] == expected_value, (
|
||||
f"Mock called with {key}={actual_kwargs[key]}, expected {expected_value}"
|
||||
)
|
||||
assert (
|
||||
key in actual_kwargs
|
||||
), f"Missing key '{key}' in mock call. Actual keys: {list(actual_kwargs.keys())}"
|
||||
assert (
|
||||
actual_kwargs[key] == expected_value
|
||||
), f"Mock called with {key}={actual_kwargs[key]}, expected {expected_value}"
|
||||
|
||||
|
||||
@contextmanager
|
||||
|
||||
@@ -457,8 +457,7 @@ async def test_api_key_with_unicode_characters_normalization_attack(mock_request
|
||||
"""Test that Unicode normalization doesn't bypass validation."""
|
||||
# Create auth with composed Unicode character
|
||||
auth = APIKeyAuthenticator(
|
||||
header_name="X-API-Key",
|
||||
expected_token="café", # é is composed
|
||||
header_name="X-API-Key", expected_token="café" # é is composed
|
||||
)
|
||||
|
||||
# Try with decomposed version (c + a + f + e + ´)
|
||||
@@ -523,8 +522,8 @@ async def test_api_keys_with_newline_variations(mock_request):
|
||||
"valid\r\ntoken", # Windows newline
|
||||
"valid\rtoken", # Mac newline
|
||||
"valid\x85token", # NEL (Next Line)
|
||||
"valid\x0btoken", # Vertical Tab
|
||||
"valid\x0ctoken", # Form Feed
|
||||
"valid\x0Btoken", # Vertical Tab
|
||||
"valid\x0Ctoken", # Form Feed
|
||||
]
|
||||
|
||||
for api_key in newline_variations:
|
||||
|
||||
@@ -44,12 +44,9 @@ def test_websocket_server_uses_cors_helper(mocker) -> None:
|
||||
"backend.api.ws_api.build_cors_params", return_value=cors_params
|
||||
)
|
||||
|
||||
with (
|
||||
override_config(
|
||||
settings, "backend_cors_allow_origins", cors_params["allow_origins"]
|
||||
),
|
||||
override_config(settings, "app_env", AppEnvironment.LOCAL),
|
||||
):
|
||||
with override_config(
|
||||
settings, "backend_cors_allow_origins", cors_params["allow_origins"]
|
||||
), override_config(settings, "app_env", AppEnvironment.LOCAL):
|
||||
WebsocketServer().run()
|
||||
|
||||
build_cors.assert_called_once_with(
|
||||
@@ -68,12 +65,9 @@ def test_websocket_server_uses_cors_helper(mocker) -> None:
|
||||
def test_websocket_server_blocks_localhost_in_production(mocker) -> None:
|
||||
mocker.patch("backend.api.ws_api.uvicorn.run")
|
||||
|
||||
with (
|
||||
override_config(
|
||||
settings, "backend_cors_allow_origins", ["http://localhost:3000"]
|
||||
),
|
||||
override_config(settings, "app_env", AppEnvironment.PRODUCTION),
|
||||
):
|
||||
with override_config(
|
||||
settings, "backend_cors_allow_origins", ["http://localhost:3000"]
|
||||
), override_config(settings, "app_env", AppEnvironment.PRODUCTION):
|
||||
with pytest.raises(ValueError):
|
||||
WebsocketServer().run()
|
||||
|
||||
|
||||
@@ -174,9 +174,7 @@ class AIImageGeneratorBlock(Block):
|
||||
],
|
||||
test_mock={
|
||||
# Return a data URI directly so store_media_file doesn't need to download
|
||||
"_run_client": lambda *args, **kwargs: (
|
||||
"data:image/webp;base64,UklGRiQAAABXRUJQVlA4IBgAAAAwAQCdASoBAAEAAQAcJYgCdAEO"
|
||||
)
|
||||
"_run_client": lambda *args, **kwargs: "data:image/webp;base64,UklGRiQAAABXRUJQVlA4IBgAAAAwAQCdASoBAAEAAQAcJYgCdAEO"
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@@ -142,9 +142,7 @@ class AIMusicGeneratorBlock(Block):
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"run_model": lambda api_key, music_gen_model_version, prompt, duration, temperature, top_k, top_p, classifier_free_guidance, output_format, normalization_strategy: (
|
||||
"https://replicate.com/output/generated-audio-url.wav"
|
||||
),
|
||||
"run_model": lambda api_key, music_gen_model_version, prompt, duration, temperature, top_k, top_p, classifier_free_guidance, output_format, normalization_strategy: "https://replicate.com/output/generated-audio-url.wav",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
@@ -556,9 +556,9 @@ async def create_table(
|
||||
) -> dict:
|
||||
for field in table_fields:
|
||||
assert field.get("name"), "Field name is required"
|
||||
assert field.get("type") in TABLE_FIELD_TYPES, (
|
||||
f"Field type {field.get('type')} is not valid. Valid types are {TABLE_FIELD_TYPES}."
|
||||
)
|
||||
assert (
|
||||
field.get("type") in TABLE_FIELD_TYPES
|
||||
), f"Field type {field.get('type')} is not valid. Valid types are {TABLE_FIELD_TYPES}."
|
||||
# Note fields have differnet options for different types we are not currently validating them
|
||||
|
||||
response = await Requests().post(
|
||||
@@ -582,9 +582,9 @@ async def update_table(
|
||||
date_dependency: dict | None = None,
|
||||
) -> dict:
|
||||
|
||||
assert table_name or table_description or date_dependency, (
|
||||
"At least one of table_name, table_description, or date_dependency must be provided"
|
||||
)
|
||||
assert (
|
||||
table_name or table_description or date_dependency
|
||||
), "At least one of table_name, table_description, or date_dependency must be provided"
|
||||
|
||||
params: dict[str, str | dict[str, str]] = {}
|
||||
if table_name:
|
||||
@@ -613,9 +613,9 @@ async def create_field(
|
||||
options: dict[str, str] | None = None,
|
||||
) -> dict[str, str | dict[str, str]]:
|
||||
|
||||
assert field_type in TABLE_FIELD_TYPES, (
|
||||
f"Field type {field_type} is not valid. Valid types are {TABLE_FIELD_TYPES}."
|
||||
)
|
||||
assert (
|
||||
field_type in TABLE_FIELD_TYPES
|
||||
), f"Field type {field_type} is not valid. Valid types are {TABLE_FIELD_TYPES}."
|
||||
params: dict[str, str | dict[str, str]] = {}
|
||||
params["type"] = field_type
|
||||
params["name"] = name
|
||||
@@ -928,9 +928,9 @@ async def update_record(
|
||||
typecast: bool | None = None,
|
||||
fields: dict[str, Any] | None = None,
|
||||
) -> dict[str, dict[str, dict[str, str]]]:
|
||||
params: dict[
|
||||
str, str | bool | dict[str, Any] | list[dict[str, dict[str, str]]]
|
||||
] = {}
|
||||
params: dict[str, str | bool | dict[str, Any] | list[dict[str, dict[str, str]]]] = (
|
||||
{}
|
||||
)
|
||||
if return_fields_by_field_id:
|
||||
params["returnFieldsByFieldId"] = return_fields_by_field_id
|
||||
if typecast:
|
||||
@@ -958,9 +958,9 @@ async def create_record(
|
||||
assert fields or records, "At least one of fields or records must be provided"
|
||||
assert not (fields and records), "Only one of fields or records can be provided"
|
||||
if records is not None:
|
||||
assert len(records) <= 10, (
|
||||
"Only up to 10 records can be provided when using records"
|
||||
)
|
||||
assert (
|
||||
len(records) <= 10
|
||||
), "Only up to 10 records can be provided when using records"
|
||||
|
||||
params: dict[str, str | bool | dict[str, Any] | list[dict[str, Any]]] = {}
|
||||
if fields:
|
||||
|
||||
@@ -43,9 +43,9 @@ async def test_create_update_table():
|
||||
workspace_id = "wsphuHmfllg7V3Brd"
|
||||
response = await create_base(credentials, workspace_id, "API Testing Base")
|
||||
assert response is not None, f"Checking create base response: {response}"
|
||||
assert response.get("id") is not None, (
|
||||
f"Checking create base response id: {response}"
|
||||
)
|
||||
assert (
|
||||
response.get("id") is not None
|
||||
), f"Checking create base response id: {response}"
|
||||
base_id = response.get("id")
|
||||
assert base_id is not None, f"Checking create base response id: {base_id}"
|
||||
|
||||
@@ -236,9 +236,9 @@ async def test_record_management():
|
||||
updated_records = response.get("records")
|
||||
assert updated_records is not None
|
||||
assert len(updated_records) == 2, f"Updated records: {updated_records}"
|
||||
assert isinstance(updated_records, list), (
|
||||
f"Type of updated records: {type(updated_records)}"
|
||||
)
|
||||
assert isinstance(
|
||||
updated_records, list
|
||||
), f"Type of updated records: {type(updated_records)}"
|
||||
first_updated = updated_records[0] # type: ignore
|
||||
second_updated = updated_records[1] # type: ignore
|
||||
first_updated_fields = first_updated.get("fields")
|
||||
@@ -257,9 +257,9 @@ async def test_record_management():
|
||||
deleted_records = response.get("records")
|
||||
assert deleted_records is not None
|
||||
assert len(deleted_records) == 2, f"Deleted records: {deleted_records}"
|
||||
assert isinstance(deleted_records, list), (
|
||||
f"Type of deleted records: {type(deleted_records)}"
|
||||
)
|
||||
assert isinstance(
|
||||
deleted_records, list
|
||||
), f"Type of deleted records: {type(deleted_records)}"
|
||||
first_deleted = deleted_records[0] # type: ignore
|
||||
second_deleted = deleted_records[1] # type: ignore
|
||||
assert first_deleted.get("deleted")
|
||||
@@ -293,12 +293,12 @@ async def test_webhook_management():
|
||||
)
|
||||
response = await create_webhook(credentials, base_id, webhook_specification)
|
||||
assert response is not None, f"Checking create webhook response: {response}"
|
||||
assert response.get("id") is not None, (
|
||||
f"Checking create webhook response id: {response}"
|
||||
)
|
||||
assert response.get("macSecretBase64") is not None, (
|
||||
f"Checking create webhook response macSecretBase64: {response}"
|
||||
)
|
||||
assert (
|
||||
response.get("id") is not None
|
||||
), f"Checking create webhook response id: {response}"
|
||||
assert (
|
||||
response.get("macSecretBase64") is not None
|
||||
), f"Checking create webhook response macSecretBase64: {response}"
|
||||
|
||||
webhook_id = response.get("id")
|
||||
assert webhook_id is not None, f"Webhook ID: {webhook_id}"
|
||||
@@ -308,14 +308,14 @@ async def test_webhook_management():
|
||||
credentials, base_id, table_id, fields={"test_field": "test_value"}
|
||||
)
|
||||
assert response is not None, f"Checking create record response: {response}"
|
||||
assert response.get("id") is not None, (
|
||||
f"Checking create record response id: {response}"
|
||||
)
|
||||
assert (
|
||||
response.get("id") is not None
|
||||
), f"Checking create record response id: {response}"
|
||||
fields = response.get("fields")
|
||||
assert fields is not None, f"Checking create record response fields: {response}"
|
||||
assert fields.get("test_field") == "test_value", (
|
||||
f"Checking create record response fields test_field: {response}"
|
||||
)
|
||||
assert (
|
||||
fields.get("test_field") == "test_value"
|
||||
), f"Checking create record response fields test_field: {response}"
|
||||
|
||||
response = await list_webhook_payloads(credentials, base_id, webhook_id)
|
||||
assert response is not None, f"Checking list webhook payloads response: {response}"
|
||||
|
||||
@@ -69,18 +69,12 @@ class PostToBlueskyBlock(Block):
|
||||
|
||||
client = create_ayrshare_client()
|
||||
if not client:
|
||||
yield (
|
||||
"error",
|
||||
"Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY.",
|
||||
)
|
||||
yield "error", "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY."
|
||||
return
|
||||
|
||||
# Validate character limit for Bluesky
|
||||
if len(input_data.post) > 300:
|
||||
yield (
|
||||
"error",
|
||||
f"Post text exceeds Bluesky's 300 character limit ({len(input_data.post)} characters)",
|
||||
)
|
||||
yield "error", f"Post text exceeds Bluesky's 300 character limit ({len(input_data.post)} characters)"
|
||||
return
|
||||
|
||||
# Validate media constraints for Bluesky
|
||||
|
||||
@@ -131,10 +131,7 @@ class PostToFacebookBlock(Block):
|
||||
|
||||
client = create_ayrshare_client()
|
||||
if not client:
|
||||
yield (
|
||||
"error",
|
||||
"Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY.",
|
||||
)
|
||||
yield "error", "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY."
|
||||
return
|
||||
|
||||
# Convert datetime to ISO format if provided
|
||||
|
||||
@@ -120,18 +120,12 @@ class PostToGMBBlock(Block):
|
||||
|
||||
client = create_ayrshare_client()
|
||||
if not client:
|
||||
yield (
|
||||
"error",
|
||||
"Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY.",
|
||||
)
|
||||
yield "error", "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY."
|
||||
return
|
||||
|
||||
# Validate GMB constraints
|
||||
if len(input_data.media_urls) > 1:
|
||||
yield (
|
||||
"error",
|
||||
"Google My Business supports only one image or video per post",
|
||||
)
|
||||
yield "error", "Google My Business supports only one image or video per post"
|
||||
return
|
||||
|
||||
# Validate offer coupon code length
|
||||
|
||||
@@ -123,25 +123,16 @@ class PostToInstagramBlock(Block):
|
||||
|
||||
client = create_ayrshare_client()
|
||||
if not client:
|
||||
yield (
|
||||
"error",
|
||||
"Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY.",
|
||||
)
|
||||
yield "error", "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY."
|
||||
return
|
||||
|
||||
# Validate Instagram constraints
|
||||
if len(input_data.post) > 2200:
|
||||
yield (
|
||||
"error",
|
||||
f"Instagram post text exceeds 2,200 character limit ({len(input_data.post)} characters)",
|
||||
)
|
||||
yield "error", f"Instagram post text exceeds 2,200 character limit ({len(input_data.post)} characters)"
|
||||
return
|
||||
|
||||
if len(input_data.media_urls) > 10:
|
||||
yield (
|
||||
"error",
|
||||
"Instagram supports a maximum of 10 images/videos in a carousel",
|
||||
)
|
||||
yield "error", "Instagram supports a maximum of 10 images/videos in a carousel"
|
||||
return
|
||||
|
||||
if len(input_data.collaborators) > 3:
|
||||
@@ -156,10 +147,7 @@ class PostToInstagramBlock(Block):
|
||||
]
|
||||
|
||||
if any(reel_options) and not all(reel_options):
|
||||
yield (
|
||||
"error",
|
||||
"When posting a reel, all reel options must be set: share_reels_feed, audio_name, and either thumbnail or thumbnail_offset",
|
||||
)
|
||||
yield "error", "When posting a reel, all reel options must be set: share_reels_feed, audio_name, and either thumbnail or thumbnail_offset"
|
||||
return
|
||||
|
||||
# Count hashtags and mentions
|
||||
@@ -167,17 +155,11 @@ class PostToInstagramBlock(Block):
|
||||
mention_count = input_data.post.count("@")
|
||||
|
||||
if hashtag_count > 30:
|
||||
yield (
|
||||
"error",
|
||||
f"Instagram allows maximum 30 hashtags ({hashtag_count} found)",
|
||||
)
|
||||
yield "error", f"Instagram allows maximum 30 hashtags ({hashtag_count} found)"
|
||||
return
|
||||
|
||||
if mention_count > 3:
|
||||
yield (
|
||||
"error",
|
||||
f"Instagram allows maximum 3 @mentions ({mention_count} found)",
|
||||
)
|
||||
yield "error", f"Instagram allows maximum 3 @mentions ({mention_count} found)"
|
||||
return
|
||||
|
||||
# Convert datetime to ISO format if provided
|
||||
@@ -209,10 +191,7 @@ class PostToInstagramBlock(Block):
|
||||
# Validate alt text length
|
||||
for i, alt in enumerate(input_data.alt_text):
|
||||
if len(alt) > 1000:
|
||||
yield (
|
||||
"error",
|
||||
f"Alt text {i + 1} exceeds 1,000 character limit ({len(alt)} characters)",
|
||||
)
|
||||
yield "error", f"Alt text {i+1} exceeds 1,000 character limit ({len(alt)} characters)"
|
||||
return
|
||||
instagram_options["altText"] = input_data.alt_text
|
||||
|
||||
@@ -227,19 +206,13 @@ class PostToInstagramBlock(Block):
|
||||
try:
|
||||
tag_obj = InstagramUserTag(**tag)
|
||||
except Exception as e:
|
||||
yield (
|
||||
"error",
|
||||
f"Invalid user tag: {e}, tages need to be a dictionary with a 3 items: username (str), x (float) and y (float)",
|
||||
)
|
||||
yield "error", f"Invalid user tag: {e}, tages need to be a dictionary with a 3 items: username (str), x (float) and y (float)"
|
||||
return
|
||||
tag_dict: dict[str, float | str] = {"username": tag_obj.username}
|
||||
if tag_obj.x is not None and tag_obj.y is not None:
|
||||
# Validate coordinates
|
||||
if not (0.0 <= tag_obj.x <= 1.0) or not (0.0 <= tag_obj.y <= 1.0):
|
||||
yield (
|
||||
"error",
|
||||
f"User tag coordinates must be between 0.0 and 1.0 (user: {tag_obj.username})",
|
||||
)
|
||||
yield "error", f"User tag coordinates must be between 0.0 and 1.0 (user: {tag_obj.username})"
|
||||
return
|
||||
tag_dict["x"] = tag_obj.x
|
||||
tag_dict["y"] = tag_obj.y
|
||||
|
||||
@@ -123,18 +123,12 @@ class PostToLinkedInBlock(Block):
|
||||
|
||||
client = create_ayrshare_client()
|
||||
if not client:
|
||||
yield (
|
||||
"error",
|
||||
"Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY.",
|
||||
)
|
||||
yield "error", "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY."
|
||||
return
|
||||
|
||||
# Validate LinkedIn constraints
|
||||
if len(input_data.post) > 3000:
|
||||
yield (
|
||||
"error",
|
||||
f"LinkedIn post text exceeds 3,000 character limit ({len(input_data.post)} characters)",
|
||||
)
|
||||
yield "error", f"LinkedIn post text exceeds 3,000 character limit ({len(input_data.post)} characters)"
|
||||
return
|
||||
|
||||
if len(input_data.media_urls) > 9:
|
||||
@@ -142,19 +136,13 @@ class PostToLinkedInBlock(Block):
|
||||
return
|
||||
|
||||
if input_data.document_title and len(input_data.document_title) > 400:
|
||||
yield (
|
||||
"error",
|
||||
f"LinkedIn document title exceeds 400 character limit ({len(input_data.document_title)} characters)",
|
||||
)
|
||||
yield "error", f"LinkedIn document title exceeds 400 character limit ({len(input_data.document_title)} characters)"
|
||||
return
|
||||
|
||||
# Validate visibility option
|
||||
valid_visibility = ["public", "connections", "loggedin"]
|
||||
if input_data.visibility not in valid_visibility:
|
||||
yield (
|
||||
"error",
|
||||
f"LinkedIn visibility must be one of: {', '.join(valid_visibility)}",
|
||||
)
|
||||
yield "error", f"LinkedIn visibility must be one of: {', '.join(valid_visibility)}"
|
||||
return
|
||||
|
||||
# Check for document extensions
|
||||
|
||||
@@ -103,32 +103,20 @@ class PostToPinterestBlock(Block):
|
||||
|
||||
client = create_ayrshare_client()
|
||||
if not client:
|
||||
yield (
|
||||
"error",
|
||||
"Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY.",
|
||||
)
|
||||
yield "error", "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY."
|
||||
return
|
||||
|
||||
# Validate Pinterest constraints
|
||||
if len(input_data.post) > 500:
|
||||
yield (
|
||||
"error",
|
||||
f"Pinterest pin description exceeds 500 character limit ({len(input_data.post)} characters)",
|
||||
)
|
||||
yield "error", f"Pinterest pin description exceeds 500 character limit ({len(input_data.post)} characters)"
|
||||
return
|
||||
|
||||
if len(input_data.pin_title) > 100:
|
||||
yield (
|
||||
"error",
|
||||
f"Pinterest pin title exceeds 100 character limit ({len(input_data.pin_title)} characters)",
|
||||
)
|
||||
yield "error", f"Pinterest pin title exceeds 100 character limit ({len(input_data.pin_title)} characters)"
|
||||
return
|
||||
|
||||
if len(input_data.link) > 2048:
|
||||
yield (
|
||||
"error",
|
||||
f"Pinterest link URL exceeds 2048 character limit ({len(input_data.link)} characters)",
|
||||
)
|
||||
yield "error", f"Pinterest link URL exceeds 2048 character limit ({len(input_data.link)} characters)"
|
||||
return
|
||||
|
||||
if len(input_data.media_urls) == 0:
|
||||
@@ -153,10 +141,7 @@ class PostToPinterestBlock(Block):
|
||||
# Validate alt text length
|
||||
for i, alt in enumerate(input_data.alt_text):
|
||||
if len(alt) > 500:
|
||||
yield (
|
||||
"error",
|
||||
f"Pinterest alt text {i + 1} exceeds 500 character limit ({len(alt)} characters)",
|
||||
)
|
||||
yield "error", f"Pinterest alt text {i+1} exceeds 500 character limit ({len(alt)} characters)"
|
||||
return
|
||||
|
||||
# Convert datetime to ISO format if provided
|
||||
|
||||
@@ -73,10 +73,7 @@ class PostToSnapchatBlock(Block):
|
||||
|
||||
client = create_ayrshare_client()
|
||||
if not client:
|
||||
yield (
|
||||
"error",
|
||||
"Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY.",
|
||||
)
|
||||
yield "error", "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY."
|
||||
return
|
||||
|
||||
# Validate Snapchat constraints
|
||||
@@ -91,10 +88,7 @@ class PostToSnapchatBlock(Block):
|
||||
# Validate story type
|
||||
valid_story_types = ["story", "saved_story", "spotlight"]
|
||||
if input_data.story_type not in valid_story_types:
|
||||
yield (
|
||||
"error",
|
||||
f"Snapchat story type must be one of: {', '.join(valid_story_types)}",
|
||||
)
|
||||
yield "error", f"Snapchat story type must be one of: {', '.join(valid_story_types)}"
|
||||
return
|
||||
|
||||
# Convert datetime to ISO format if provided
|
||||
|
||||
@@ -68,10 +68,7 @@ class PostToTelegramBlock(Block):
|
||||
|
||||
client = create_ayrshare_client()
|
||||
if not client:
|
||||
yield (
|
||||
"error",
|
||||
"Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY.",
|
||||
)
|
||||
yield "error", "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY."
|
||||
return
|
||||
|
||||
# Validate Telegram constraints
|
||||
|
||||
@@ -61,34 +61,22 @@ class PostToThreadsBlock(Block):
|
||||
|
||||
client = create_ayrshare_client()
|
||||
if not client:
|
||||
yield (
|
||||
"error",
|
||||
"Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY.",
|
||||
)
|
||||
yield "error", "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY."
|
||||
return
|
||||
|
||||
# Validate Threads constraints
|
||||
if len(input_data.post) > 500:
|
||||
yield (
|
||||
"error",
|
||||
f"Threads post text exceeds 500 character limit ({len(input_data.post)} characters)",
|
||||
)
|
||||
yield "error", f"Threads post text exceeds 500 character limit ({len(input_data.post)} characters)"
|
||||
return
|
||||
|
||||
if len(input_data.media_urls) > 20:
|
||||
yield (
|
||||
"error",
|
||||
"Threads supports a maximum of 20 images/videos in a carousel",
|
||||
)
|
||||
yield "error", "Threads supports a maximum of 20 images/videos in a carousel"
|
||||
return
|
||||
|
||||
# Count hashtags (only 1 allowed)
|
||||
hashtag_count = input_data.post.count("#")
|
||||
if hashtag_count > 1:
|
||||
yield (
|
||||
"error",
|
||||
f"Threads allows only 1 hashtag per post ({hashtag_count} found)",
|
||||
)
|
||||
yield "error", f"Threads allows only 1 hashtag per post ({hashtag_count} found)"
|
||||
return
|
||||
|
||||
# Convert datetime to ISO format if provided
|
||||
|
||||
@@ -123,25 +123,16 @@ class PostToTikTokBlock(Block):
|
||||
|
||||
client = create_ayrshare_client()
|
||||
if not client:
|
||||
yield (
|
||||
"error",
|
||||
"Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY.",
|
||||
)
|
||||
yield "error", "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY."
|
||||
return
|
||||
|
||||
# Validate TikTok constraints
|
||||
if len(input_data.post) > 2200:
|
||||
yield (
|
||||
"error",
|
||||
f"TikTok post text exceeds 2,200 character limit ({len(input_data.post)} characters)",
|
||||
)
|
||||
yield "error", f"TikTok post text exceeds 2,200 character limit ({len(input_data.post)} characters)"
|
||||
return
|
||||
|
||||
if not input_data.media_urls:
|
||||
yield (
|
||||
"error",
|
||||
"TikTok requires at least one media URL (either 1 video or up to 35 images)",
|
||||
)
|
||||
yield "error", "TikTok requires at least one media URL (either 1 video or up to 35 images)"
|
||||
return
|
||||
|
||||
# Check for video vs image constraints
|
||||
@@ -159,10 +150,7 @@ class PostToTikTokBlock(Block):
|
||||
)
|
||||
|
||||
if has_video and has_images:
|
||||
yield (
|
||||
"error",
|
||||
"TikTok does not support mixing video and images in the same post",
|
||||
)
|
||||
yield "error", "TikTok does not support mixing video and images in the same post"
|
||||
return
|
||||
|
||||
if has_video and len(input_data.media_urls) > 1:
|
||||
@@ -175,19 +163,13 @@ class PostToTikTokBlock(Block):
|
||||
|
||||
# Validate image cover index
|
||||
if has_images and input_data.image_cover_index >= len(input_data.media_urls):
|
||||
yield (
|
||||
"error",
|
||||
f"Image cover index {input_data.image_cover_index} is out of range (max: {len(input_data.media_urls) - 1})",
|
||||
)
|
||||
yield "error", f"Image cover index {input_data.image_cover_index} is out of range (max: {len(input_data.media_urls) - 1})"
|
||||
return
|
||||
|
||||
# Check for PNG files (not supported)
|
||||
has_png = any(url.lower().endswith(".png") for url in input_data.media_urls)
|
||||
if has_png:
|
||||
yield (
|
||||
"error",
|
||||
"TikTok does not support PNG files. Please use JPG, JPEG, or WEBP for images.",
|
||||
)
|
||||
yield "error", "TikTok does not support PNG files. Please use JPG, JPEG, or WEBP for images."
|
||||
return
|
||||
|
||||
# Convert datetime to ISO format if provided
|
||||
|
||||
@@ -126,25 +126,16 @@ class PostToXBlock(Block):
|
||||
|
||||
client = create_ayrshare_client()
|
||||
if not client:
|
||||
yield (
|
||||
"error",
|
||||
"Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY.",
|
||||
)
|
||||
yield "error", "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY."
|
||||
return
|
||||
|
||||
# Validate X constraints
|
||||
if not input_data.long_post and len(input_data.post) > 280:
|
||||
yield (
|
||||
"error",
|
||||
f"X post text exceeds 280 character limit ({len(input_data.post)} characters). Enable 'long_post' for Premium accounts.",
|
||||
)
|
||||
yield "error", f"X post text exceeds 280 character limit ({len(input_data.post)} characters). Enable 'long_post' for Premium accounts."
|
||||
return
|
||||
|
||||
if input_data.long_post and len(input_data.post) > 25000:
|
||||
yield (
|
||||
"error",
|
||||
f"X long post text exceeds 25,000 character limit ({len(input_data.post)} characters)",
|
||||
)
|
||||
yield "error", f"X long post text exceeds 25,000 character limit ({len(input_data.post)} characters)"
|
||||
return
|
||||
|
||||
if len(input_data.media_urls) > 4:
|
||||
@@ -158,20 +149,14 @@ class PostToXBlock(Block):
|
||||
return
|
||||
|
||||
if input_data.poll_duration < 1 or input_data.poll_duration > 10080:
|
||||
yield (
|
||||
"error",
|
||||
"X poll duration must be between 1 and 10,080 minutes (7 days)",
|
||||
)
|
||||
yield "error", "X poll duration must be between 1 and 10,080 minutes (7 days)"
|
||||
return
|
||||
|
||||
# Validate alt text
|
||||
if input_data.alt_text:
|
||||
for i, alt in enumerate(input_data.alt_text):
|
||||
if len(alt) > 1000:
|
||||
yield (
|
||||
"error",
|
||||
f"X alt text {i + 1} exceeds 1,000 character limit ({len(alt)} characters)",
|
||||
)
|
||||
yield "error", f"X alt text {i+1} exceeds 1,000 character limit ({len(alt)} characters)"
|
||||
return
|
||||
|
||||
# Validate subtitle settings
|
||||
@@ -183,10 +168,7 @@ class PostToXBlock(Block):
|
||||
return
|
||||
|
||||
if len(input_data.subtitle_name) > 150:
|
||||
yield (
|
||||
"error",
|
||||
f"Subtitle name exceeds 150 character limit ({len(input_data.subtitle_name)} characters)",
|
||||
)
|
||||
yield "error", f"Subtitle name exceeds 150 character limit ({len(input_data.subtitle_name)} characters)"
|
||||
return
|
||||
|
||||
# Convert datetime to ISO format if provided
|
||||
|
||||
@@ -149,10 +149,7 @@ class PostToYouTubeBlock(Block):
|
||||
|
||||
client = create_ayrshare_client()
|
||||
if not client:
|
||||
yield (
|
||||
"error",
|
||||
"Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY.",
|
||||
)
|
||||
yield "error", "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY."
|
||||
return
|
||||
|
||||
# Validate YouTube constraints
|
||||
@@ -161,17 +158,11 @@ class PostToYouTubeBlock(Block):
|
||||
return
|
||||
|
||||
if len(input_data.title) > 100:
|
||||
yield (
|
||||
"error",
|
||||
f"YouTube title exceeds 100 character limit ({len(input_data.title)} characters)",
|
||||
)
|
||||
yield "error", f"YouTube title exceeds 100 character limit ({len(input_data.title)} characters)"
|
||||
return
|
||||
|
||||
if len(input_data.post) > 5000:
|
||||
yield (
|
||||
"error",
|
||||
f"YouTube description exceeds 5,000 character limit ({len(input_data.post)} characters)",
|
||||
)
|
||||
yield "error", f"YouTube description exceeds 5,000 character limit ({len(input_data.post)} characters)"
|
||||
return
|
||||
|
||||
# Check for forbidden characters
|
||||
@@ -195,10 +186,7 @@ class PostToYouTubeBlock(Block):
|
||||
# Validate visibility option
|
||||
valid_visibility = ["private", "public", "unlisted"]
|
||||
if input_data.visibility not in valid_visibility:
|
||||
yield (
|
||||
"error",
|
||||
f"YouTube visibility must be one of: {', '.join(valid_visibility)}",
|
||||
)
|
||||
yield "error", f"YouTube visibility must be one of: {', '.join(valid_visibility)}"
|
||||
return
|
||||
|
||||
# Validate thumbnail URL format
|
||||
@@ -214,18 +202,12 @@ class PostToYouTubeBlock(Block):
|
||||
if input_data.tags:
|
||||
total_tag_length = sum(len(tag) for tag in input_data.tags)
|
||||
if total_tag_length > 500:
|
||||
yield (
|
||||
"error",
|
||||
f"YouTube tags total length exceeds 500 characters ({total_tag_length} characters)",
|
||||
)
|
||||
yield "error", f"YouTube tags total length exceeds 500 characters ({total_tag_length} characters)"
|
||||
return
|
||||
|
||||
for tag in input_data.tags:
|
||||
if len(tag) < 2:
|
||||
yield (
|
||||
"error",
|
||||
f"YouTube tag '{tag}' is too short (minimum 2 characters)",
|
||||
)
|
||||
yield "error", f"YouTube tag '{tag}' is too short (minimum 2 characters)"
|
||||
return
|
||||
|
||||
# Validate subtitle URL
|
||||
@@ -243,18 +225,12 @@ class PostToYouTubeBlock(Block):
|
||||
return
|
||||
|
||||
if input_data.subtitle_name and len(input_data.subtitle_name) > 150:
|
||||
yield (
|
||||
"error",
|
||||
f"YouTube subtitle name exceeds 150 character limit ({len(input_data.subtitle_name)} characters)",
|
||||
)
|
||||
yield "error", f"YouTube subtitle name exceeds 150 character limit ({len(input_data.subtitle_name)} characters)"
|
||||
return
|
||||
|
||||
# Validate publish_at format if provided
|
||||
if input_data.publish_at and input_data.schedule_date:
|
||||
yield (
|
||||
"error",
|
||||
"Cannot use both 'publish_at' and 'schedule_date'. Use 'publish_at' for YouTube-controlled publishing.",
|
||||
)
|
||||
yield "error", "Cannot use both 'publish_at' and 'schedule_date'. Use 'publish_at' for YouTube-controlled publishing."
|
||||
return
|
||||
|
||||
# Convert datetime to ISO format if provided (only if not using publish_at)
|
||||
|
||||
@@ -59,13 +59,10 @@ class FileStoreBlock(Block):
|
||||
# for_block_output: smart format - workspace:// in CoPilot, data URI in graphs
|
||||
return_format = "for_external_api" if input_data.base_64 else "for_block_output"
|
||||
|
||||
yield (
|
||||
"file_out",
|
||||
await store_media_file(
|
||||
file=input_data.file_in,
|
||||
execution_context=execution_context,
|
||||
return_format=return_format,
|
||||
),
|
||||
yield "file_out", await store_media_file(
|
||||
file=input_data.file_in,
|
||||
execution_context=execution_context,
|
||||
return_format=return_format,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -728,12 +728,9 @@ class ConcatenateListsBlock(Block):
|
||||
# Type validation: each item must be a list
|
||||
# Strings are iterable and would cause extend() to iterate character-by-character
|
||||
# Non-iterable types would raise TypeError
|
||||
yield (
|
||||
"error",
|
||||
(
|
||||
f"Invalid input at index {idx}: expected a list, got {type(lst).__name__}. "
|
||||
f"All items in 'lists' must be lists (e.g., [[1, 2], [3, 4]])."
|
||||
),
|
||||
yield "error", (
|
||||
f"Invalid input at index {idx}: expected a list, got {type(lst).__name__}. "
|
||||
f"All items in 'lists' must be lists (e.g., [[1, 2], [3, 4]])."
|
||||
)
|
||||
return
|
||||
concatenated.extend(lst)
|
||||
|
||||
@@ -110,10 +110,8 @@ class DataForSeoKeywordSuggestionsBlock(Block):
|
||||
test_output=[
|
||||
(
|
||||
"suggestion",
|
||||
lambda x: (
|
||||
hasattr(x, "keyword")
|
||||
and x.keyword == "digital marketing strategy"
|
||||
),
|
||||
lambda x: hasattr(x, "keyword")
|
||||
and x.keyword == "digital marketing strategy",
|
||||
),
|
||||
("suggestions", lambda x: isinstance(x, list) and len(x) == 1),
|
||||
("total_count", 1),
|
||||
|
||||
@@ -137,71 +137,47 @@ class SendEmailBlock(Block):
|
||||
)
|
||||
yield "status", status
|
||||
except socket.gaierror:
|
||||
yield (
|
||||
"error",
|
||||
(
|
||||
f"Cannot connect to SMTP server '{input_data.config.smtp_server}'. "
|
||||
"Please verify the server address is correct."
|
||||
),
|
||||
yield "error", (
|
||||
f"Cannot connect to SMTP server '{input_data.config.smtp_server}'. "
|
||||
"Please verify the server address is correct."
|
||||
)
|
||||
except socket.timeout:
|
||||
yield (
|
||||
"error",
|
||||
(
|
||||
f"Connection timeout to '{input_data.config.smtp_server}' "
|
||||
f"on port {input_data.config.smtp_port}. "
|
||||
"The server may be down or unreachable."
|
||||
),
|
||||
yield "error", (
|
||||
f"Connection timeout to '{input_data.config.smtp_server}' "
|
||||
f"on port {input_data.config.smtp_port}. "
|
||||
"The server may be down or unreachable."
|
||||
)
|
||||
except ConnectionRefusedError:
|
||||
yield (
|
||||
"error",
|
||||
(
|
||||
f"Connection refused to '{input_data.config.smtp_server}' "
|
||||
f"on port {input_data.config.smtp_port}. "
|
||||
"Common SMTP ports are: 587 (TLS), 465 (SSL), 25 (plain). "
|
||||
"Please verify the port is correct."
|
||||
),
|
||||
yield "error", (
|
||||
f"Connection refused to '{input_data.config.smtp_server}' "
|
||||
f"on port {input_data.config.smtp_port}. "
|
||||
"Common SMTP ports are: 587 (TLS), 465 (SSL), 25 (plain). "
|
||||
"Please verify the port is correct."
|
||||
)
|
||||
except smtplib.SMTPNotSupportedError:
|
||||
yield (
|
||||
"error",
|
||||
(
|
||||
f"STARTTLS not supported by server '{input_data.config.smtp_server}'. "
|
||||
"Try using port 465 for SSL or port 25 for unencrypted connection."
|
||||
),
|
||||
yield "error", (
|
||||
f"STARTTLS not supported by server '{input_data.config.smtp_server}'. "
|
||||
"Try using port 465 for SSL or port 25 for unencrypted connection."
|
||||
)
|
||||
except ssl.SSLError as e:
|
||||
yield (
|
||||
"error",
|
||||
(
|
||||
f"SSL/TLS error when connecting to '{input_data.config.smtp_server}': {str(e)}. "
|
||||
"The server may require a different security protocol."
|
||||
),
|
||||
yield "error", (
|
||||
f"SSL/TLS error when connecting to '{input_data.config.smtp_server}': {str(e)}. "
|
||||
"The server may require a different security protocol."
|
||||
)
|
||||
except smtplib.SMTPAuthenticationError:
|
||||
yield (
|
||||
"error",
|
||||
(
|
||||
"Authentication failed. Please verify your username and password are correct."
|
||||
),
|
||||
yield "error", (
|
||||
"Authentication failed. Please verify your username and password are correct."
|
||||
)
|
||||
except smtplib.SMTPRecipientsRefused:
|
||||
yield (
|
||||
"error",
|
||||
(
|
||||
f"Recipient email address '{input_data.to_email}' was rejected by the server. "
|
||||
"Please verify the email address is valid."
|
||||
),
|
||||
yield "error", (
|
||||
f"Recipient email address '{input_data.to_email}' was rejected by the server. "
|
||||
"Please verify the email address is valid."
|
||||
)
|
||||
except smtplib.SMTPSenderRefused:
|
||||
yield (
|
||||
"error",
|
||||
(
|
||||
"Sender email address defined in the credentials that where used"
|
||||
"was rejected by the server. "
|
||||
"Please verify your account is authorized to send emails."
|
||||
),
|
||||
yield "error", (
|
||||
"Sender email address defined in the credentials that where used"
|
||||
"was rejected by the server. "
|
||||
"Please verify your account is authorized to send emails."
|
||||
)
|
||||
except smtplib.SMTPDataError as e:
|
||||
yield "error", f"Email data rejected by server: {str(e)}"
|
||||
|
||||
@@ -490,9 +490,7 @@ class GetLinkedinProfilePictureBlock(Block):
|
||||
],
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_mock={
|
||||
"_get_profile_picture": lambda *args, **kwargs: (
|
||||
"https://media.licdn.com/dms/image/C4D03AQFj-xjuXrLFSQ/profile-displayphoto-shrink_800_800/0/1576881858598?e=1686787200&v=beta&t=zrQC76QwsfQQIWthfOnrKRBMZ5D-qIAvzLXLmWgYvTk"
|
||||
),
|
||||
"_get_profile_picture": lambda *args, **kwargs: "https://media.licdn.com/dms/image/C4D03AQFj-xjuXrLFSQ/profile-displayphoto-shrink_800_800/0/1576881858598?e=1686787200&v=beta&t=zrQC76QwsfQQIWthfOnrKRBMZ5D-qIAvzLXLmWgYvTk",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@@ -319,7 +319,7 @@ class CostDollars(BaseModel):
|
||||
|
||||
# Helper functions for payload processing
|
||||
def process_text_field(
|
||||
text: Union[bool, TextEnabled, TextDisabled, TextAdvanced, None],
|
||||
text: Union[bool, TextEnabled, TextDisabled, TextAdvanced, None]
|
||||
) -> Optional[Union[bool, Dict[str, Any]]]:
|
||||
"""Process text field for API payload."""
|
||||
if text is None:
|
||||
@@ -400,7 +400,7 @@ def process_contents_settings(contents: Optional[ContentSettings]) -> Dict[str,
|
||||
|
||||
|
||||
def process_context_field(
|
||||
context: Union[bool, dict, ContextEnabled, ContextDisabled, ContextAdvanced, None],
|
||||
context: Union[bool, dict, ContextEnabled, ContextDisabled, ContextAdvanced, None]
|
||||
) -> Optional[Union[bool, Dict[str, int]]]:
|
||||
"""Process context field for API payload."""
|
||||
if context is None:
|
||||
|
||||
@@ -566,9 +566,8 @@ class ExaUpdateWebsetBlock(Block):
|
||||
yield "status", status_str
|
||||
yield "external_id", sdk_webset.external_id
|
||||
yield "metadata", sdk_webset.metadata or {}
|
||||
yield (
|
||||
"updated_at",
|
||||
(sdk_webset.updated_at.isoformat() if sdk_webset.updated_at else ""),
|
||||
yield "updated_at", (
|
||||
sdk_webset.updated_at.isoformat() if sdk_webset.updated_at else ""
|
||||
)
|
||||
|
||||
|
||||
@@ -707,13 +706,11 @@ class ExaGetWebsetBlock(Block):
|
||||
yield "enrichments", enrichments_data
|
||||
yield "monitors", monitors_data
|
||||
yield "metadata", sdk_webset.metadata or {}
|
||||
yield (
|
||||
"created_at",
|
||||
(sdk_webset.created_at.isoformat() if sdk_webset.created_at else ""),
|
||||
yield "created_at", (
|
||||
sdk_webset.created_at.isoformat() if sdk_webset.created_at else ""
|
||||
)
|
||||
yield (
|
||||
"updated_at",
|
||||
(sdk_webset.updated_at.isoformat() if sdk_webset.updated_at else ""),
|
||||
yield "updated_at", (
|
||||
sdk_webset.updated_at.isoformat() if sdk_webset.updated_at else ""
|
||||
)
|
||||
|
||||
|
||||
@@ -1267,9 +1264,7 @@ class ExaWebsetSummaryBlock(Block):
|
||||
(
|
||||
e.format.value
|
||||
if e.format and hasattr(e.format, "value")
|
||||
else str(e.format)
|
||||
if e.format
|
||||
else "text"
|
||||
else str(e.format) if e.format else "text"
|
||||
)
|
||||
for e in enrichments
|
||||
)
|
||||
|
||||
@@ -523,20 +523,16 @@ class ExaWaitForEnrichmentBlock(Block):
|
||||
items_enriched = 0
|
||||
|
||||
if input_data.sample_results and status == "completed":
|
||||
(
|
||||
sample_data,
|
||||
items_enriched,
|
||||
) = await self._get_sample_enrichments(
|
||||
input_data.webset_id, input_data.enrichment_id, aexa
|
||||
sample_data, items_enriched = (
|
||||
await self._get_sample_enrichments(
|
||||
input_data.webset_id, input_data.enrichment_id, aexa
|
||||
)
|
||||
)
|
||||
|
||||
yield "enrichment_id", input_data.enrichment_id
|
||||
yield "final_status", status
|
||||
yield "items_enriched", items_enriched
|
||||
yield (
|
||||
"enrichment_title",
|
||||
enrichment.title or enrichment.description or "",
|
||||
)
|
||||
yield "enrichment_title", enrichment.title or enrichment.description or ""
|
||||
yield "elapsed_time", elapsed
|
||||
if input_data.sample_results:
|
||||
yield "sample_data", sample_data
|
||||
|
||||
@@ -127,9 +127,7 @@ class AIImageEditorBlock(Block):
|
||||
],
|
||||
test_mock={
|
||||
# Use data URI to avoid HTTP requests during tests
|
||||
"run_model": lambda *args, **kwargs: (
|
||||
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
|
||||
),
|
||||
"run_model": lambda *args, **kwargs: "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
@@ -798,9 +798,7 @@ class GithubUnassignIssueBlock(Block):
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("status", "Issue unassigned successfully")],
|
||||
test_mock={
|
||||
"unassign_issue": lambda *args, **kwargs: (
|
||||
"Issue unassigned successfully"
|
||||
)
|
||||
"unassign_issue": lambda *args, **kwargs: "Issue unassigned successfully"
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@@ -261,9 +261,7 @@ class GithubReadPullRequestBlock(Block):
|
||||
"This is the body of the pull request.",
|
||||
"username",
|
||||
),
|
||||
"read_pr_changes": lambda *args, **kwargs: (
|
||||
"List of changes made in the pull request."
|
||||
),
|
||||
"read_pr_changes": lambda *args, **kwargs: "List of changes made in the pull request.",
|
||||
},
|
||||
)
|
||||
|
||||
@@ -367,9 +365,7 @@ class GithubAssignPRReviewerBlock(Block):
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("status", "Reviewer assigned successfully")],
|
||||
test_mock={
|
||||
"assign_reviewer": lambda *args, **kwargs: (
|
||||
"Reviewer assigned successfully"
|
||||
)
|
||||
"assign_reviewer": lambda *args, **kwargs: "Reviewer assigned successfully"
|
||||
},
|
||||
)
|
||||
|
||||
@@ -436,9 +432,7 @@ class GithubUnassignPRReviewerBlock(Block):
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("status", "Reviewer unassigned successfully")],
|
||||
test_mock={
|
||||
"unassign_reviewer": lambda *args, **kwargs: (
|
||||
"Reviewer unassigned successfully"
|
||||
)
|
||||
"unassign_reviewer": lambda *args, **kwargs: "Reviewer unassigned successfully"
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@@ -341,17 +341,14 @@ class GoogleDocsCreateBlock(Block):
|
||||
)
|
||||
doc_id = result["document_id"]
|
||||
doc_url = result["document_url"]
|
||||
yield (
|
||||
"document",
|
||||
GoogleDriveFile(
|
||||
id=doc_id,
|
||||
name=input_data.title,
|
||||
mimeType="application/vnd.google-apps.document",
|
||||
url=doc_url,
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/docs_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.credentials.id,
|
||||
),
|
||||
yield "document", GoogleDriveFile(
|
||||
id=doc_id,
|
||||
name=input_data.title,
|
||||
mimeType="application/vnd.google-apps.document",
|
||||
url=doc_url,
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/docs_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.credentials.id,
|
||||
)
|
||||
yield "document_id", doc_id
|
||||
yield "document_url", doc_url
|
||||
@@ -818,10 +815,7 @@ class GoogleDocsGetMetadataBlock(Block):
|
||||
yield "title", result["title"]
|
||||
yield "document_id", input_data.document.id
|
||||
yield "revision_id", result["revision_id"]
|
||||
yield (
|
||||
"document_url",
|
||||
f"https://docs.google.com/document/d/{input_data.document.id}/edit",
|
||||
)
|
||||
yield "document_url", f"https://docs.google.com/document/d/{input_data.document.id}/edit"
|
||||
yield "document", _make_document_output(input_data.document)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to get metadata: {str(e)}"
|
||||
|
||||
@@ -278,13 +278,11 @@ class GmailBase(Block, ABC):
|
||||
"""Download attachment content when email body is stored as attachment."""
|
||||
try:
|
||||
attachment = await asyncio.to_thread(
|
||||
lambda: (
|
||||
service.users()
|
||||
.messages()
|
||||
.attachments()
|
||||
.get(userId="me", messageId=msg_id, id=attachment_id)
|
||||
.execute()
|
||||
)
|
||||
lambda: service.users()
|
||||
.messages()
|
||||
.attachments()
|
||||
.get(userId="me", messageId=msg_id, id=attachment_id)
|
||||
.execute()
|
||||
)
|
||||
return attachment.get("data")
|
||||
except Exception:
|
||||
@@ -306,13 +304,11 @@ class GmailBase(Block, ABC):
|
||||
|
||||
async def download_attachment(self, service, message_id: str, attachment_id: str):
|
||||
attachment = await asyncio.to_thread(
|
||||
lambda: (
|
||||
service.users()
|
||||
.messages()
|
||||
.attachments()
|
||||
.get(userId="me", messageId=message_id, id=attachment_id)
|
||||
.execute()
|
||||
)
|
||||
lambda: service.users()
|
||||
.messages()
|
||||
.attachments()
|
||||
.get(userId="me", messageId=message_id, id=attachment_id)
|
||||
.execute()
|
||||
)
|
||||
file_data = base64.urlsafe_b64decode(attachment["data"].encode("UTF-8"))
|
||||
return file_data
|
||||
@@ -470,12 +466,10 @@ class GmailReadBlock(GmailBase):
|
||||
else "full"
|
||||
)
|
||||
msg = await asyncio.to_thread(
|
||||
lambda: (
|
||||
service.users()
|
||||
.messages()
|
||||
.get(userId="me", id=message["id"], format=format_type)
|
||||
.execute()
|
||||
)
|
||||
lambda: service.users()
|
||||
.messages()
|
||||
.get(userId="me", id=message["id"], format=format_type)
|
||||
.execute()
|
||||
)
|
||||
|
||||
headers = {
|
||||
@@ -608,12 +602,10 @@ class GmailSendBlock(GmailBase):
|
||||
)
|
||||
raw_message = await create_mime_message(input_data, execution_context)
|
||||
sent_message = await asyncio.to_thread(
|
||||
lambda: (
|
||||
service.users()
|
||||
.messages()
|
||||
.send(userId="me", body={"raw": raw_message})
|
||||
.execute()
|
||||
)
|
||||
lambda: service.users()
|
||||
.messages()
|
||||
.send(userId="me", body={"raw": raw_message})
|
||||
.execute()
|
||||
)
|
||||
return {"id": sent_message["id"], "status": "sent"}
|
||||
|
||||
@@ -707,13 +699,8 @@ class GmailCreateDraftBlock(GmailBase):
|
||||
input_data,
|
||||
execution_context,
|
||||
)
|
||||
yield (
|
||||
"result",
|
||||
GmailDraftResult(
|
||||
id=result["id"],
|
||||
message_id=result["message"]["id"],
|
||||
status="draft_created",
|
||||
),
|
||||
yield "result", GmailDraftResult(
|
||||
id=result["id"], message_id=result["message"]["id"], status="draft_created"
|
||||
)
|
||||
|
||||
async def _create_draft(
|
||||
@@ -726,12 +713,10 @@ class GmailCreateDraftBlock(GmailBase):
|
||||
|
||||
raw_message = await create_mime_message(input_data, execution_context)
|
||||
draft = await asyncio.to_thread(
|
||||
lambda: (
|
||||
service.users()
|
||||
.drafts()
|
||||
.create(userId="me", body={"message": {"raw": raw_message}})
|
||||
.execute()
|
||||
)
|
||||
lambda: service.users()
|
||||
.drafts()
|
||||
.create(userId="me", body={"message": {"raw": raw_message}})
|
||||
.execute()
|
||||
)
|
||||
|
||||
return draft
|
||||
@@ -855,12 +840,10 @@ class GmailAddLabelBlock(GmailBase):
|
||||
async def _add_label(self, service, message_id: str, label_name: str) -> dict:
|
||||
label_id = await self._get_or_create_label(service, label_name)
|
||||
result = await asyncio.to_thread(
|
||||
lambda: (
|
||||
service.users()
|
||||
.messages()
|
||||
.modify(userId="me", id=message_id, body={"addLabelIds": [label_id]})
|
||||
.execute()
|
||||
)
|
||||
lambda: service.users()
|
||||
.messages()
|
||||
.modify(userId="me", id=message_id, body={"addLabelIds": [label_id]})
|
||||
.execute()
|
||||
)
|
||||
if not result.get("labelIds"):
|
||||
return {
|
||||
@@ -874,12 +857,10 @@ class GmailAddLabelBlock(GmailBase):
|
||||
label_id = await self._get_label_id(service, label_name)
|
||||
if not label_id:
|
||||
label = await asyncio.to_thread(
|
||||
lambda: (
|
||||
service.users()
|
||||
.labels()
|
||||
.create(userId="me", body={"name": label_name})
|
||||
.execute()
|
||||
)
|
||||
lambda: service.users()
|
||||
.labels()
|
||||
.create(userId="me", body={"name": label_name})
|
||||
.execute()
|
||||
)
|
||||
label_id = label["id"]
|
||||
return label_id
|
||||
@@ -946,14 +927,10 @@ class GmailRemoveLabelBlock(GmailBase):
|
||||
label_id = await self._get_label_id(service, label_name)
|
||||
if label_id:
|
||||
result = await asyncio.to_thread(
|
||||
lambda: (
|
||||
service.users()
|
||||
.messages()
|
||||
.modify(
|
||||
userId="me", id=message_id, body={"removeLabelIds": [label_id]}
|
||||
)
|
||||
.execute()
|
||||
)
|
||||
lambda: service.users()
|
||||
.messages()
|
||||
.modify(userId="me", id=message_id, body={"removeLabelIds": [label_id]})
|
||||
.execute()
|
||||
)
|
||||
if not result.get("labelIds"):
|
||||
return {
|
||||
@@ -1071,12 +1048,10 @@ class GmailGetThreadBlock(GmailBase):
|
||||
else "full"
|
||||
)
|
||||
thread = await asyncio.to_thread(
|
||||
lambda: (
|
||||
service.users()
|
||||
.threads()
|
||||
.get(userId="me", id=thread_id, format=format_type)
|
||||
.execute()
|
||||
)
|
||||
lambda: service.users()
|
||||
.threads()
|
||||
.get(userId="me", id=thread_id, format=format_type)
|
||||
.execute()
|
||||
)
|
||||
|
||||
parsed_messages = []
|
||||
@@ -1131,25 +1106,23 @@ async def _build_reply_message(
|
||||
"""
|
||||
# Get parent message for reply context
|
||||
parent = await asyncio.to_thread(
|
||||
lambda: (
|
||||
service.users()
|
||||
.messages()
|
||||
.get(
|
||||
userId="me",
|
||||
id=input_data.parentMessageId,
|
||||
format="metadata",
|
||||
metadataHeaders=[
|
||||
"Subject",
|
||||
"References",
|
||||
"Message-ID",
|
||||
"From",
|
||||
"To",
|
||||
"Cc",
|
||||
"Reply-To",
|
||||
],
|
||||
)
|
||||
.execute()
|
||||
lambda: service.users()
|
||||
.messages()
|
||||
.get(
|
||||
userId="me",
|
||||
id=input_data.parentMessageId,
|
||||
format="metadata",
|
||||
metadataHeaders=[
|
||||
"Subject",
|
||||
"References",
|
||||
"Message-ID",
|
||||
"From",
|
||||
"To",
|
||||
"Cc",
|
||||
"Reply-To",
|
||||
],
|
||||
)
|
||||
.execute()
|
||||
)
|
||||
|
||||
# Build headers dictionary, preserving all values for duplicate headers
|
||||
@@ -1373,12 +1346,10 @@ class GmailReplyBlock(GmailBase):
|
||||
|
||||
# Send the message
|
||||
return await asyncio.to_thread(
|
||||
lambda: (
|
||||
service.users()
|
||||
.messages()
|
||||
.send(userId="me", body={"threadId": thread_id, "raw": raw})
|
||||
.execute()
|
||||
)
|
||||
lambda: service.users()
|
||||
.messages()
|
||||
.send(userId="me", body={"threadId": thread_id, "raw": raw})
|
||||
.execute()
|
||||
)
|
||||
|
||||
|
||||
@@ -1488,20 +1459,18 @@ class GmailDraftReplyBlock(GmailBase):
|
||||
|
||||
# Create draft with proper thread association
|
||||
draft = await asyncio.to_thread(
|
||||
lambda: (
|
||||
service.users()
|
||||
.drafts()
|
||||
.create(
|
||||
userId="me",
|
||||
body={
|
||||
"message": {
|
||||
"threadId": thread_id,
|
||||
"raw": raw,
|
||||
}
|
||||
},
|
||||
)
|
||||
.execute()
|
||||
lambda: service.users()
|
||||
.drafts()
|
||||
.create(
|
||||
userId="me",
|
||||
body={
|
||||
"message": {
|
||||
"threadId": thread_id,
|
||||
"raw": raw,
|
||||
}
|
||||
},
|
||||
)
|
||||
.execute()
|
||||
)
|
||||
|
||||
return draft
|
||||
@@ -1673,12 +1642,10 @@ class GmailForwardBlock(GmailBase):
|
||||
|
||||
# Get the original message
|
||||
original = await asyncio.to_thread(
|
||||
lambda: (
|
||||
service.users()
|
||||
.messages()
|
||||
.get(userId="me", id=input_data.messageId, format="full")
|
||||
.execute()
|
||||
)
|
||||
lambda: service.users()
|
||||
.messages()
|
||||
.get(userId="me", id=input_data.messageId, format="full")
|
||||
.execute()
|
||||
)
|
||||
|
||||
headers = {
|
||||
@@ -1768,10 +1735,8 @@ To: {original_to}
|
||||
# Send the forwarded message
|
||||
raw = base64.urlsafe_b64encode(msg.as_bytes()).decode("utf-8")
|
||||
return await asyncio.to_thread(
|
||||
lambda: (
|
||||
service.users()
|
||||
.messages()
|
||||
.send(userId="me", body={"raw": raw})
|
||||
.execute()
|
||||
)
|
||||
lambda: service.users()
|
||||
.messages()
|
||||
.send(userId="me", body={"raw": raw})
|
||||
.execute()
|
||||
)
|
||||
|
||||
@@ -345,17 +345,14 @@ class GoogleSheetsReadBlock(Block):
|
||||
)
|
||||
yield "result", data
|
||||
# Output the GoogleDriveFile for chaining (preserves credentials_id)
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=spreadsheet_id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{spreadsheet_id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=spreadsheet_id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{spreadsheet_id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", _handle_sheets_api_error(str(e), "read")
|
||||
@@ -469,12 +466,9 @@ class GoogleSheetsWriteBlock(Block):
|
||||
if validation_error:
|
||||
# Customize message for write operations on CSV files
|
||||
if "CSV file" in validation_error:
|
||||
yield (
|
||||
"error",
|
||||
validation_error.replace(
|
||||
"Please use a CSV reader block instead, or",
|
||||
"CSV files are read-only through Google Drive. Please",
|
||||
),
|
||||
yield "error", validation_error.replace(
|
||||
"Please use a CSV reader block instead, or",
|
||||
"CSV files are read-only through Google Drive. Please",
|
||||
)
|
||||
else:
|
||||
yield "error", validation_error
|
||||
@@ -491,17 +485,14 @@ class GoogleSheetsWriteBlock(Block):
|
||||
)
|
||||
yield "result", result
|
||||
# Output the GoogleDriveFile for chaining (preserves credentials_id)
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", _handle_sheets_api_error(str(e), "write")
|
||||
@@ -623,17 +614,14 @@ class GoogleSheetsAppendRowBlock(Block):
|
||||
input_data.value_input_option,
|
||||
)
|
||||
yield "result", result
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to append row: {str(e)}"
|
||||
@@ -756,17 +744,14 @@ class GoogleSheetsClearBlock(Block):
|
||||
)
|
||||
yield "result", result
|
||||
# Output the GoogleDriveFile for chaining (preserves credentials_id)
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to clear Google Sheet range: {str(e)}"
|
||||
@@ -869,17 +854,14 @@ class GoogleSheetsMetadataBlock(Block):
|
||||
)
|
||||
yield "result", result
|
||||
# Output the GoogleDriveFile for chaining (preserves credentials_id)
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to get spreadsheet metadata: {str(e)}"
|
||||
@@ -1002,17 +984,14 @@ class GoogleSheetsManageSheetBlock(Block):
|
||||
)
|
||||
yield "result", result
|
||||
# Output the GoogleDriveFile for chaining (preserves credentials_id)
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to manage sheet: {str(e)}"
|
||||
@@ -1162,17 +1141,14 @@ class GoogleSheetsBatchOperationsBlock(Block):
|
||||
)
|
||||
yield "result", result
|
||||
# Output the GoogleDriveFile for chaining (preserves credentials_id)
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to perform batch operations: {str(e)}"
|
||||
@@ -1330,17 +1306,14 @@ class GoogleSheetsFindReplaceBlock(Block):
|
||||
)
|
||||
yield "result", result
|
||||
# Output the GoogleDriveFile for chaining (preserves credentials_id)
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to find/replace in Google Sheet: {str(e)}"
|
||||
@@ -1515,17 +1488,14 @@ class GoogleSheetsFindBlock(Block):
|
||||
yield "locations", result["locations"]
|
||||
yield "result", {"success": True}
|
||||
# Output the GoogleDriveFile for chaining (preserves credentials_id)
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to find text in Google Sheet: {str(e)}"
|
||||
@@ -1784,17 +1754,14 @@ class GoogleSheetsFormatBlock(Block):
|
||||
else:
|
||||
yield "result", result
|
||||
# Output the GoogleDriveFile for chaining (preserves credentials_id)
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to format Google Sheet cells: {str(e)}"
|
||||
@@ -1961,17 +1928,14 @@ class GoogleSheetsCreateSpreadsheetBlock(Block):
|
||||
spreadsheet_id = result["spreadsheetId"]
|
||||
spreadsheet_url = result["spreadsheetUrl"]
|
||||
# Output the GoogleDriveFile for chaining (includes credentials_id)
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=spreadsheet_id,
|
||||
name=result.get("title", input_data.title),
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=spreadsheet_url,
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.credentials.id, # Preserve credentials for chaining
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=spreadsheet_id,
|
||||
name=result.get("title", input_data.title),
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=spreadsheet_url,
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.credentials.id, # Preserve credentials for chaining
|
||||
)
|
||||
yield "spreadsheet_id", spreadsheet_id
|
||||
yield "spreadsheet_url", spreadsheet_url
|
||||
@@ -2149,17 +2113,14 @@ class GoogleSheetsUpdateCellBlock(Block):
|
||||
|
||||
yield "result", result
|
||||
# Output the GoogleDriveFile for chaining (preserves credentials_id)
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", _handle_sheets_api_error(str(e), "update")
|
||||
@@ -2418,17 +2379,14 @@ class GoogleSheetsFilterRowsBlock(Block):
|
||||
yield "rows", result["rows"]
|
||||
yield "row_indices", result["row_indices"]
|
||||
yield "count", result["count"]
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to filter rows: {str(e)}"
|
||||
@@ -2638,17 +2596,14 @@ class GoogleSheetsLookupRowBlock(Block):
|
||||
yield "row_dict", result["row_dict"]
|
||||
yield "row_index", result["row_index"]
|
||||
yield "found", result["found"]
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to lookup row: {str(e)}"
|
||||
@@ -2862,17 +2817,14 @@ class GoogleSheetsDeleteRowsBlock(Block):
|
||||
)
|
||||
yield "result", {"success": True}
|
||||
yield "deleted_count", result["deleted_count"]
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to delete rows: {str(e)}"
|
||||
@@ -3043,17 +2995,14 @@ class GoogleSheetsGetColumnBlock(Block):
|
||||
yield "values", result["values"]
|
||||
yield "count", result["count"]
|
||||
yield "column_index", result["column_index"]
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to get column: {str(e)}"
|
||||
@@ -3227,17 +3176,14 @@ class GoogleSheetsSortBlock(Block):
|
||||
input_data.has_header,
|
||||
)
|
||||
yield "result", result
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to sort sheet: {str(e)}"
|
||||
@@ -3493,17 +3439,14 @@ class GoogleSheetsGetUniqueValuesBlock(Block):
|
||||
yield "values", result["values"]
|
||||
yield "counts", result["counts"]
|
||||
yield "total_unique", result["total_unique"]
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to get unique values: {str(e)}"
|
||||
@@ -3677,17 +3620,14 @@ class GoogleSheetsInsertRowBlock(Block):
|
||||
input_data.value_input_option,
|
||||
)
|
||||
yield "result", result
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to insert row: {str(e)}"
|
||||
@@ -3853,17 +3793,14 @@ class GoogleSheetsAddColumnBlock(Block):
|
||||
yield "result", {"success": True}
|
||||
yield "column_letter", result["column_letter"]
|
||||
yield "column_index", result["column_index"]
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to add column: {str(e)}"
|
||||
@@ -4061,17 +3998,14 @@ class GoogleSheetsGetRowCountBlock(Block):
|
||||
yield "data_rows", result["data_rows"]
|
||||
yield "last_row", result["last_row"]
|
||||
yield "column_count", result["column_count"]
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to get row count: {str(e)}"
|
||||
@@ -4242,17 +4176,14 @@ class GoogleSheetsRemoveDuplicatesBlock(Block):
|
||||
yield "result", {"success": True}
|
||||
yield "removed_count", result["removed_count"]
|
||||
yield "remaining_rows", result["remaining_rows"]
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to remove duplicates: {str(e)}"
|
||||
@@ -4495,17 +4426,14 @@ class GoogleSheetsUpdateRowBlock(Block):
|
||||
input_data.dict_values,
|
||||
)
|
||||
yield "result", result
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to update row: {str(e)}"
|
||||
@@ -4687,17 +4615,14 @@ class GoogleSheetsGetRowBlock(Block):
|
||||
)
|
||||
yield "row", result["row"]
|
||||
yield "row_dict", result["row_dict"]
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to get row: {str(e)}"
|
||||
@@ -4828,17 +4753,14 @@ class GoogleSheetsDeleteColumnBlock(Block):
|
||||
input_data.column,
|
||||
)
|
||||
yield "result", result
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to delete column: {str(e)}"
|
||||
@@ -5009,17 +4931,14 @@ class GoogleSheetsCreateNamedRangeBlock(Block):
|
||||
)
|
||||
yield "result", {"success": True}
|
||||
yield "named_range_id", result["named_range_id"]
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to create named range: {str(e)}"
|
||||
@@ -5185,17 +5104,14 @@ class GoogleSheetsListNamedRangesBlock(Block):
|
||||
)
|
||||
yield "named_ranges", result["named_ranges"]
|
||||
yield "count", result["count"]
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to list named ranges: {str(e)}"
|
||||
@@ -5348,17 +5264,14 @@ class GoogleSheetsAddDropdownBlock(Block):
|
||||
input_data.show_dropdown,
|
||||
)
|
||||
yield "result", result
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to add dropdown: {str(e)}"
|
||||
@@ -5523,17 +5436,14 @@ class GoogleSheetsCopyToSpreadsheetBlock(Block):
|
||||
yield "result", {"success": True}
|
||||
yield "new_sheet_id", result["new_sheet_id"]
|
||||
yield "new_sheet_name", result["new_sheet_name"]
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.source_spreadsheet.id,
|
||||
name=input_data.source_spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.source_spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.source_spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.source_spreadsheet.id,
|
||||
name=input_data.source_spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.source_spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.source_spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to copy sheet: {str(e)}"
|
||||
@@ -5678,17 +5588,14 @@ class GoogleSheetsProtectRangeBlock(Block):
|
||||
)
|
||||
yield "result", {"success": True}
|
||||
yield "protection_id", result["protection_id"]
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to protect range: {str(e)}"
|
||||
@@ -5845,17 +5752,14 @@ class GoogleSheetsExportCsvBlock(Block):
|
||||
)
|
||||
yield "csv_data", result["csv_data"]
|
||||
yield "row_count", result["row_count"]
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to export CSV: {str(e)}"
|
||||
@@ -5991,17 +5895,14 @@ class GoogleSheetsImportCsvBlock(Block):
|
||||
)
|
||||
yield "result", {"success": True}
|
||||
yield "rows_imported", result["rows_imported"]
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to import CSV: {str(e)}"
|
||||
@@ -6131,17 +6032,14 @@ class GoogleSheetsAddNoteBlock(Block):
|
||||
input_data.note,
|
||||
)
|
||||
yield "result", {"success": True}
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to add note: {str(e)}"
|
||||
@@ -6287,17 +6185,14 @@ class GoogleSheetsGetNotesBlock(Block):
|
||||
notes = result["notes"]
|
||||
yield "notes", notes
|
||||
yield "count", len(notes)
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to get notes: {str(e)}"
|
||||
@@ -6452,17 +6347,14 @@ class GoogleSheetsShareSpreadsheetBlock(Block):
|
||||
)
|
||||
yield "result", {"success": True}
|
||||
yield "share_link", result["share_link"]
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to share spreadsheet: {str(e)}"
|
||||
@@ -6599,17 +6491,14 @@ class GoogleSheetsSetPublicAccessBlock(Block):
|
||||
)
|
||||
yield "result", {"success": True, "is_public": result["is_public"]}
|
||||
yield "share_link", result["share_link"]
|
||||
yield (
|
||||
"spreadsheet",
|
||||
GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
),
|
||||
yield "spreadsheet", GoogleDriveFile(
|
||||
id=input_data.spreadsheet.id,
|
||||
name=input_data.spreadsheet.name,
|
||||
mimeType="application/vnd.google-apps.spreadsheet",
|
||||
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
|
||||
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
|
||||
isFolder=False,
|
||||
_credentials_id=input_data.spreadsheet.credentials_id,
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to set public access: {str(e)}"
|
||||
|
||||
@@ -195,12 +195,8 @@ class IdeogramModelBlock(Block):
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"run_model": lambda api_key, model_name, prompt, seed, aspect_ratio, magic_prompt_option, style_type, negative_prompt, color_palette_name, custom_colors: (
|
||||
"https://ideogram.ai/api/images/test-generated-image-url.png"
|
||||
),
|
||||
"upscale_image": lambda api_key, image_url: (
|
||||
"https://ideogram.ai/api/images/test-upscaled-image-url.png"
|
||||
),
|
||||
"run_model": lambda api_key, model_name, prompt, seed, aspect_ratio, magic_prompt_option, style_type, negative_prompt, color_palette_name, custom_colors: "https://ideogram.ai/api/images/test-generated-image-url.png",
|
||||
"upscale_image": lambda api_key, image_url: "https://ideogram.ai/api/images/test-upscaled-image-url.png",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
@@ -210,11 +210,8 @@ class AgentOutputBlock(Block):
|
||||
if input_data.format:
|
||||
try:
|
||||
formatter = TextFormatter(autoescape=input_data.escape_html)
|
||||
yield (
|
||||
"output",
|
||||
formatter.format_string(
|
||||
input_data.format, {input_data.name: input_data.value}
|
||||
),
|
||||
yield "output", formatter.format_string(
|
||||
input_data.format, {input_data.name: input_data.value}
|
||||
)
|
||||
except Exception as e:
|
||||
yield "output", f"Error: {e}, {input_data.value}"
|
||||
@@ -477,13 +474,10 @@ class AgentFileInputBlock(AgentInputBlock):
|
||||
# for_block_output: smart format - workspace:// in CoPilot, data URI in graphs
|
||||
return_format = "for_external_api" if input_data.base_64 else "for_block_output"
|
||||
|
||||
yield (
|
||||
"result",
|
||||
await store_media_file(
|
||||
file=input_data.value,
|
||||
execution_context=execution_context,
|
||||
return_format=return_format,
|
||||
),
|
||||
yield "result", await store_media_file(
|
||||
file=input_data.value,
|
||||
execution_context=execution_context,
|
||||
return_format=return_format,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -75,6 +75,7 @@ class LinearClient:
|
||||
|
||||
response_data = response.json()
|
||||
if "errors" in response_data:
|
||||
|
||||
error_messages = [
|
||||
error.get("message", "") for error in response_data["errors"]
|
||||
]
|
||||
|
||||
@@ -692,6 +692,7 @@ async def llm_call(
|
||||
reasoning=reasoning,
|
||||
)
|
||||
elif provider == "anthropic":
|
||||
|
||||
an_tools = convert_openai_tool_fmt_to_anthropic(tools)
|
||||
|
||||
system_messages = [p["content"] for p in prompt if p["role"] == "system"]
|
||||
|
||||
@@ -75,14 +75,11 @@ class PersistInformationBlock(Block):
|
||||
storage_key = get_storage_key(input_data.key, input_data.scope, graph_id)
|
||||
|
||||
# Store the data
|
||||
yield (
|
||||
"value",
|
||||
await self._store_data(
|
||||
user_id=user_id,
|
||||
node_exec_id=node_exec_id,
|
||||
key=storage_key,
|
||||
data=input_data.value,
|
||||
),
|
||||
yield "value", await self._store_data(
|
||||
user_id=user_id,
|
||||
node_exec_id=node_exec_id,
|
||||
key=storage_key,
|
||||
data=input_data.value,
|
||||
)
|
||||
|
||||
async def _store_data(
|
||||
|
||||
@@ -160,13 +160,10 @@ class PineconeQueryBlock(Block):
|
||||
combined_text = "\n\n".join(texts)
|
||||
|
||||
# Return both the raw matches and combined text
|
||||
yield (
|
||||
"results",
|
||||
{
|
||||
"matches": results["matches"],
|
||||
"combined_text": combined_text,
|
||||
},
|
||||
)
|
||||
yield "results", {
|
||||
"matches": results["matches"],
|
||||
"combined_text": combined_text,
|
||||
}
|
||||
yield "combined_results", combined_text
|
||||
|
||||
except Exception as e:
|
||||
|
||||
@@ -309,13 +309,10 @@ class PostRedditCommentBlock(Block):
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: RedditCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
yield (
|
||||
"comment_id",
|
||||
self.reply_post(
|
||||
credentials,
|
||||
post_id=input_data.post_id,
|
||||
comment=input_data.comment,
|
||||
),
|
||||
yield "comment_id", self.reply_post(
|
||||
credentials,
|
||||
post_id=input_data.post_id,
|
||||
comment=input_data.comment,
|
||||
)
|
||||
yield "post_id", input_data.post_id
|
||||
|
||||
|
||||
@@ -141,9 +141,7 @@ class ReplicateFluxAdvancedModelBlock(Block):
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"run_model": lambda api_key, model_name, prompt, seed, steps, guidance, interval, aspect_ratio, output_format, output_quality, safety_tolerance: (
|
||||
"https://replicate.com/output/generated-image-url.jpg"
|
||||
),
|
||||
"run_model": lambda api_key, model_name, prompt, seed, steps, guidance, interval, aspect_ratio, output_format, output_quality, safety_tolerance: "https://replicate.com/output/generated-image-url.jpg",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
@@ -48,7 +48,7 @@ class Slant3DBlockBase(Block):
|
||||
raise ValueError(
|
||||
f"""Invalid color profile combination {color_tag}.
|
||||
Valid colors for {profile.value} are:
|
||||
{",".join([filament["colorTag"].replace(profile.value.lower(), "") for filament in response["filaments"] if filament["profile"] == profile.value])}
|
||||
{','.join([filament['colorTag'].replace(profile.value.lower(), '') for filament in response['filaments'] if filament['profile'] == profile.value])}
|
||||
"""
|
||||
)
|
||||
return color_tag
|
||||
|
||||
@@ -933,10 +933,7 @@ class SmartDecisionMakerBlock(Block):
|
||||
credentials, input_data, iteration_prompt, tool_functions
|
||||
)
|
||||
except Exception as e:
|
||||
yield (
|
||||
"error",
|
||||
f"LLM call failed in agent mode iteration {iteration}: {str(e)}",
|
||||
)
|
||||
yield "error", f"LLM call failed in agent mode iteration {iteration}: {str(e)}"
|
||||
return
|
||||
|
||||
# Process tool calls
|
||||
@@ -976,10 +973,7 @@ class SmartDecisionMakerBlock(Block):
|
||||
if max_iterations < 0:
|
||||
yield "finished", f"Agent mode completed after {iteration} iterations"
|
||||
else:
|
||||
yield (
|
||||
"finished",
|
||||
f"Agent mode completed after {max_iterations} iterations (limit reached)",
|
||||
)
|
||||
yield "finished", f"Agent mode completed after {max_iterations} iterations (limit reached)"
|
||||
yield "conversations", current_prompt
|
||||
|
||||
async def run(
|
||||
|
||||
@@ -180,22 +180,20 @@ class AddLeadToCampaignBlock(Block):
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"add_leads_to_campaign": lambda campaign_id, lead_list, credentials: (
|
||||
AddLeadsToCampaignResponse(
|
||||
ok=True,
|
||||
upload_count=1,
|
||||
already_added_to_campaign=0,
|
||||
duplicate_count=0,
|
||||
invalid_email_count=0,
|
||||
is_lead_limit_exhausted=False,
|
||||
lead_import_stopped_count=0,
|
||||
error="",
|
||||
total_leads=1,
|
||||
block_count=0,
|
||||
invalid_emails=[],
|
||||
unsubscribed_leads=[],
|
||||
bounce_count=0,
|
||||
)
|
||||
"add_leads_to_campaign": lambda campaign_id, lead_list, credentials: AddLeadsToCampaignResponse(
|
||||
ok=True,
|
||||
upload_count=1,
|
||||
already_added_to_campaign=0,
|
||||
duplicate_count=0,
|
||||
invalid_email_count=0,
|
||||
is_lead_limit_exhausted=False,
|
||||
lead_import_stopped_count=0,
|
||||
error="",
|
||||
total_leads=1,
|
||||
block_count=0,
|
||||
invalid_emails=[],
|
||||
unsubscribed_leads=[],
|
||||
bounce_count=0,
|
||||
)
|
||||
},
|
||||
)
|
||||
@@ -297,11 +295,9 @@ class SaveCampaignSequencesBlock(Block):
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"save_campaign_sequences": lambda campaign_id, sequences, credentials: (
|
||||
SaveSequencesResponse(
|
||||
ok=True,
|
||||
message="Sequences saved successfully",
|
||||
)
|
||||
"save_campaign_sequences": lambda campaign_id, sequences, credentials: SaveSequencesResponse(
|
||||
ok=True,
|
||||
message="Sequences saved successfully",
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
@@ -97,9 +97,9 @@ class StagehandRecommendedLlmModel(str, Enum):
|
||||
if len(model_name.split("/")) == 1 and not self.value.startswith(
|
||||
model_metadata.provider
|
||||
):
|
||||
assert model_metadata.provider != "open_router", (
|
||||
"Logic failed and open_router provider attempted to be prepended to model name! in stagehand/_config.py"
|
||||
)
|
||||
assert (
|
||||
model_metadata.provider != "open_router"
|
||||
), "Logic failed and open_router provider attempted to be prepended to model name! in stagehand/_config.py"
|
||||
model_name = f"{model_metadata.provider}/{model_name}"
|
||||
|
||||
logger.error(f"Model name: {model_name}")
|
||||
|
||||
@@ -128,9 +128,9 @@ async def test_block_ids_valid(block: Type[Block]):
|
||||
try:
|
||||
parsed_uuid = uuid.UUID(block_instance.id)
|
||||
# Verify it's specifically UUID version 4
|
||||
assert parsed_uuid.version == 4, (
|
||||
f"Block {block.name} ID is UUID version {parsed_uuid.version}, expected version 4"
|
||||
)
|
||||
assert (
|
||||
parsed_uuid.version == 4
|
||||
), f"Block {block.name} ID is UUID version {parsed_uuid.version}, expected version 4"
|
||||
except ValueError:
|
||||
pytest.fail(f"Block {block.name} has invalid UUID format: {block_instance.id}")
|
||||
|
||||
|
||||
@@ -174,9 +174,9 @@ async def test_smart_decision_maker_function_signature(server: SpinTestServer):
|
||||
)
|
||||
assert tool_functions is not None, "Tool functions should not be None"
|
||||
|
||||
assert len(tool_functions) == 2, (
|
||||
f"Expected 2 tool functions, got {len(tool_functions)}"
|
||||
)
|
||||
assert (
|
||||
len(tool_functions) == 2
|
||||
), f"Expected 2 tool functions, got {len(tool_functions)}"
|
||||
|
||||
# Check the first tool function (testgraph)
|
||||
assert tool_functions[0]["type"] == "function"
|
||||
@@ -219,19 +219,17 @@ async def test_smart_decision_maker_tracks_llm_stats():
|
||||
|
||||
# Mock the _create_tool_node_signatures method to avoid database calls
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response,
|
||||
),
|
||||
patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_tool_node_signatures",
|
||||
new_callable=AsyncMock,
|
||||
return_value=[],
|
||||
),
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response,
|
||||
), patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_tool_node_signatures",
|
||||
new_callable=AsyncMock,
|
||||
return_value=[],
|
||||
):
|
||||
|
||||
# Create test input
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Should I continue with this task?",
|
||||
@@ -324,19 +322,17 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
mock_response_with_typo.reasoning = None
|
||||
mock_response_with_typo.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response_with_typo,
|
||||
) as mock_llm_call,
|
||||
patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_tool_node_signatures",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_tool_functions,
|
||||
),
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response_with_typo,
|
||||
) as mock_llm_call, patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_tool_node_signatures",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_tool_functions,
|
||||
):
|
||||
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.DEFAULT_LLM_MODEL,
|
||||
@@ -393,19 +389,17 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
mock_response_missing_required.reasoning = None
|
||||
mock_response_missing_required.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response_missing_required,
|
||||
),
|
||||
patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_tool_node_signatures",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_tool_functions,
|
||||
),
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response_missing_required,
|
||||
), patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_tool_node_signatures",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_tool_functions,
|
||||
):
|
||||
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.DEFAULT_LLM_MODEL,
|
||||
@@ -455,19 +449,17 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
mock_response_valid.reasoning = None
|
||||
mock_response_valid.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response_valid,
|
||||
),
|
||||
patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_tool_node_signatures",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_tool_functions,
|
||||
),
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response_valid,
|
||||
), patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_tool_node_signatures",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_tool_functions,
|
||||
):
|
||||
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.DEFAULT_LLM_MODEL,
|
||||
@@ -521,19 +513,17 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
mock_response_all_params.reasoning = None
|
||||
mock_response_all_params.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response_all_params,
|
||||
),
|
||||
patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_tool_node_signatures",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_tool_functions,
|
||||
),
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response_all_params,
|
||||
), patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_tool_node_signatures",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_tool_functions,
|
||||
):
|
||||
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.DEFAULT_LLM_MODEL,
|
||||
@@ -644,14 +634,13 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
|
||||
# Mock llm_call to return different responses on different calls
|
||||
|
||||
with (
|
||||
patch("backend.blocks.llm.llm_call", new_callable=AsyncMock) as mock_llm_call,
|
||||
patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_tool_node_signatures",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_tool_functions,
|
||||
),
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call", new_callable=AsyncMock
|
||||
) as mock_llm_call, patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_tool_node_signatures",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_tool_functions,
|
||||
):
|
||||
# First call returns response that will trigger retry due to validation error
|
||||
# Second call returns successful response
|
||||
@@ -721,18 +710,15 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
"I'll help you with that." # Ollama returns string
|
||||
)
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response_ollama,
|
||||
),
|
||||
patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_tool_node_signatures",
|
||||
new_callable=AsyncMock,
|
||||
return_value=[], # No tools for this test
|
||||
),
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response_ollama,
|
||||
), patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_tool_node_signatures",
|
||||
new_callable=AsyncMock,
|
||||
return_value=[], # No tools for this test
|
||||
):
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Simple prompt",
|
||||
@@ -780,18 +766,15 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
"content": "Test response",
|
||||
} # Dict format
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response_dict,
|
||||
),
|
||||
patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_tool_node_signatures",
|
||||
new_callable=AsyncMock,
|
||||
return_value=[],
|
||||
),
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response_dict,
|
||||
), patch.object(
|
||||
SmartDecisionMakerBlock,
|
||||
"_create_tool_node_signatures",
|
||||
new_callable=AsyncMock,
|
||||
return_value=[],
|
||||
):
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Another test",
|
||||
@@ -907,21 +890,18 @@ async def test_smart_decision_maker_agent_mode():
|
||||
|
||||
# No longer need mock_execute_node since we use execution_processor.on_node_execution
|
||||
|
||||
with (
|
||||
patch("backend.blocks.llm.llm_call", llm_call_mock),
|
||||
patch.object(
|
||||
block, "_create_tool_node_signatures", return_value=mock_tool_signatures
|
||||
),
|
||||
patch(
|
||||
"backend.blocks.smart_decision_maker.get_database_manager_async_client",
|
||||
return_value=mock_db_client,
|
||||
),
|
||||
patch(
|
||||
"backend.executor.manager.async_update_node_execution_status",
|
||||
new_callable=AsyncMock,
|
||||
),
|
||||
patch("backend.integrations.creds_manager.IntegrationCredentialsManager"),
|
||||
with patch("backend.blocks.llm.llm_call", llm_call_mock), patch.object(
|
||||
block, "_create_tool_node_signatures", return_value=mock_tool_signatures
|
||||
), patch(
|
||||
"backend.blocks.smart_decision_maker.get_database_manager_async_client",
|
||||
return_value=mock_db_client,
|
||||
), patch(
|
||||
"backend.executor.manager.async_update_node_execution_status",
|
||||
new_callable=AsyncMock,
|
||||
), patch(
|
||||
"backend.integrations.creds_manager.IntegrationCredentialsManager"
|
||||
):
|
||||
|
||||
# Create a mock execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(
|
||||
@@ -1029,16 +1009,14 @@ async def test_smart_decision_maker_traditional_mode_default():
|
||||
}
|
||||
]
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response,
|
||||
),
|
||||
patch.object(
|
||||
block, "_create_tool_node_signatures", return_value=mock_tool_signatures
|
||||
),
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response,
|
||||
), patch.object(
|
||||
block, "_create_tool_node_signatures", return_value=mock_tool_signatures
|
||||
):
|
||||
|
||||
# Test default behavior (traditional mode)
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Test prompt",
|
||||
|
||||
@@ -41,8 +41,7 @@ async def test_smart_decision_maker_handles_dynamic_dict_fields():
|
||||
|
||||
# Generate function signature
|
||||
signature = await SmartDecisionMakerBlock._create_block_function_signature(
|
||||
mock_node,
|
||||
mock_links, # type: ignore
|
||||
mock_node, mock_links # type: ignore
|
||||
)
|
||||
|
||||
# Verify the signature was created successfully
|
||||
@@ -99,8 +98,7 @@ async def test_smart_decision_maker_handles_dynamic_list_fields():
|
||||
|
||||
# Generate function signature
|
||||
signature = await SmartDecisionMakerBlock._create_block_function_signature(
|
||||
mock_node,
|
||||
mock_links, # type: ignore
|
||||
mock_node, mock_links # type: ignore
|
||||
)
|
||||
|
||||
# Verify dynamic list fields are handled properly
|
||||
|
||||
@@ -314,14 +314,11 @@ async def test_output_yielding_with_dynamic_fields():
|
||||
mock_llm.return_value = mock_response
|
||||
|
||||
# Mock the database manager to avoid HTTP calls during tool execution
|
||||
with (
|
||||
patch(
|
||||
"backend.blocks.smart_decision_maker.get_database_manager_async_client"
|
||||
) as mock_db_manager,
|
||||
patch.object(
|
||||
block, "_create_tool_node_signatures", new_callable=AsyncMock
|
||||
) as mock_sig,
|
||||
):
|
||||
with patch(
|
||||
"backend.blocks.smart_decision_maker.get_database_manager_async_client"
|
||||
) as mock_db_manager, patch.object(
|
||||
block, "_create_tool_node_signatures", new_callable=AsyncMock
|
||||
) as mock_sig:
|
||||
# Set up the mock database manager
|
||||
mock_db_client = AsyncMock()
|
||||
mock_db_manager.return_value = mock_db_client
|
||||
@@ -670,6 +667,6 @@ async def test_validation_errors_dont_pollute_conversation():
|
||||
if msg.get("role") == "user"
|
||||
and "parameter errors" in msg.get("content", "")
|
||||
]
|
||||
assert len(error_messages) == 0, (
|
||||
"Validation error leaked into final conversation"
|
||||
)
|
||||
assert (
|
||||
len(error_messages) == 0
|
||||
), "Validation error leaked into final conversation"
|
||||
|
||||
@@ -275,30 +275,24 @@ class GetCurrentDateBlock(Block):
|
||||
test_output=[
|
||||
(
|
||||
"date",
|
||||
lambda t: (
|
||||
abs(
|
||||
datetime.now().date()
|
||||
- datetime.strptime(t, "%Y-%m-%d").date()
|
||||
)
|
||||
<= timedelta(days=8)
|
||||
), # 7 days difference + 1 day error margin.
|
||||
lambda t: abs(
|
||||
datetime.now().date() - datetime.strptime(t, "%Y-%m-%d").date()
|
||||
)
|
||||
<= timedelta(days=8), # 7 days difference + 1 day error margin.
|
||||
),
|
||||
(
|
||||
"date",
|
||||
lambda t: (
|
||||
abs(
|
||||
datetime.now().date()
|
||||
- datetime.strptime(t, "%m/%d/%Y").date()
|
||||
)
|
||||
<= timedelta(days=8)
|
||||
),
|
||||
lambda t: abs(
|
||||
datetime.now().date() - datetime.strptime(t, "%m/%d/%Y").date()
|
||||
)
|
||||
<= timedelta(days=8),
|
||||
# 7 days difference + 1 day error margin.
|
||||
),
|
||||
(
|
||||
"date",
|
||||
lambda t: (
|
||||
len(t) == 10 and t[4] == "-" and t[7] == "-"
|
||||
), # ISO date format YYYY-MM-DD
|
||||
lambda t: len(t) == 10
|
||||
and t[4] == "-"
|
||||
and t[7] == "-", # ISO date format YYYY-MM-DD
|
||||
),
|
||||
],
|
||||
)
|
||||
@@ -386,32 +380,25 @@ class GetCurrentDateAndTimeBlock(Block):
|
||||
test_output=[
|
||||
(
|
||||
"date_time",
|
||||
lambda t: (
|
||||
abs(
|
||||
datetime.now(tz=ZoneInfo("UTC"))
|
||||
- datetime.strptime(t + "+00:00", "%Y-%m-%d %H:%M:%S%z")
|
||||
)
|
||||
< timedelta(seconds=10)
|
||||
), # 10 seconds error margin.
|
||||
lambda t: abs(
|
||||
datetime.now(tz=ZoneInfo("UTC"))
|
||||
- datetime.strptime(t + "+00:00", "%Y-%m-%d %H:%M:%S%z")
|
||||
)
|
||||
< timedelta(seconds=10), # 10 seconds error margin.
|
||||
),
|
||||
(
|
||||
"date_time",
|
||||
lambda t: (
|
||||
abs(
|
||||
datetime.now().date()
|
||||
- datetime.strptime(t, "%Y/%m/%d").date()
|
||||
)
|
||||
<= timedelta(days=1)
|
||||
), # Date format only, no time component
|
||||
lambda t: abs(
|
||||
datetime.now().date() - datetime.strptime(t, "%Y/%m/%d").date()
|
||||
)
|
||||
<= timedelta(days=1), # Date format only, no time component
|
||||
),
|
||||
(
|
||||
"date_time",
|
||||
lambda t: (
|
||||
abs(
|
||||
datetime.now(tz=ZoneInfo("UTC")) - datetime.fromisoformat(t)
|
||||
)
|
||||
< timedelta(seconds=10)
|
||||
), # 10 seconds error margin for ISO format.
|
||||
lambda t: abs(
|
||||
datetime.now(tz=ZoneInfo("UTC")) - datetime.fromisoformat(t)
|
||||
)
|
||||
< timedelta(seconds=10), # 10 seconds error margin for ISO format.
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -160,7 +160,7 @@ class TodoistCreateProjectBlock(Block):
|
||||
test_input={"credentials": TEST_CREDENTIALS_INPUT, "name": "Test Project"},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("success", True)],
|
||||
test_mock={"create_project": lambda *args, **kwargs: True},
|
||||
test_mock={"create_project": lambda *args, **kwargs: (True)},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
@@ -346,7 +346,7 @@ class TodoistUpdateProjectBlock(Block):
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("success", True)],
|
||||
test_mock={"update_project": lambda *args, **kwargs: True},
|
||||
test_mock={"update_project": lambda *args, **kwargs: (True)},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
@@ -426,7 +426,7 @@ class TodoistDeleteProjectBlock(Block):
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("success", True)],
|
||||
test_mock={"delete_project": lambda *args, **kwargs: True},
|
||||
test_mock={"delete_project": lambda *args, **kwargs: (True)},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -285,7 +285,7 @@ class TodoistDeleteSectionBlock(Block):
|
||||
test_input={"credentials": TEST_CREDENTIALS_INPUT, "section_id": "7025"},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("success", True)],
|
||||
test_mock={"delete_section": lambda *args, **kwargs: True},
|
||||
test_mock={"delete_section": lambda *args, **kwargs: (True)},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -580,7 +580,7 @@ class TodoistReopenTaskBlock(Block):
|
||||
test_output=[
|
||||
("success", True),
|
||||
],
|
||||
test_mock={"reopen_task": lambda *args, **kwargs: True},
|
||||
test_mock={"reopen_task": lambda *args, **kwargs: (True)},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
@@ -632,7 +632,7 @@ class TodoistDeleteTaskBlock(Block):
|
||||
test_output=[
|
||||
("success", True),
|
||||
],
|
||||
test_mock={"delete_task": lambda *args, **kwargs: True},
|
||||
test_mock={"delete_task": lambda *args, **kwargs: (True)},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -256,6 +256,7 @@ class ListFieldsFilter(BaseModel):
|
||||
|
||||
# --------- [Input Types] -------------
|
||||
class TweetExpansionInputs(BlockSchemaInput):
|
||||
|
||||
expansions: ExpansionFilter | None = SchemaField(
|
||||
description="Choose what extra information you want to get with your tweets. For example:\n- Select 'Media_Keys' to get media details\n- Select 'Author_User_ID' to get user information\n- Select 'Place_ID' to get location details",
|
||||
placeholder="Pick the extra information you want to see",
|
||||
|
||||
@@ -232,7 +232,7 @@ class TwitterCreateListBlock(Block):
|
||||
("list_id", "1234567890"),
|
||||
("url", "https://twitter.com/i/lists/1234567890"),
|
||||
],
|
||||
test_mock={"create_list": lambda *args, **kwargs: "1234567890"},
|
||||
test_mock={"create_list": lambda *args, **kwargs: ("1234567890")},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -159,6 +159,7 @@ class TwitterGetTweetBlock(Block):
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
|
||||
tweet_data, included, meta, user_id, user_name = self.get_tweet(
|
||||
credentials,
|
||||
input_data.tweet_id,
|
||||
|
||||
@@ -44,8 +44,7 @@ class VideoNarrationBlock(Block):
|
||||
)
|
||||
script: str = SchemaField(description="Narration script text")
|
||||
voice_id: str = SchemaField(
|
||||
description="ElevenLabs voice ID",
|
||||
default="21m00Tcm4TlvDq8ikWAM", # Rachel
|
||||
description="ElevenLabs voice ID", default="21m00Tcm4TlvDq8ikWAM" # Rachel
|
||||
)
|
||||
model_id: Literal[
|
||||
"eleven_multilingual_v2",
|
||||
|
||||
@@ -94,9 +94,7 @@ class TranscribeYoutubeVideoBlock(Block):
|
||||
{"text": "Never gonna give you up"},
|
||||
{"text": "Never gonna let you down"},
|
||||
],
|
||||
"format_transcript": lambda transcript: (
|
||||
"Never gonna give you up\nNever gonna let you down"
|
||||
),
|
||||
"format_transcript": lambda transcript: "Never gonna give you up\nNever gonna let you down",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@@ -140,22 +140,20 @@ class ValidateEmailsBlock(Block):
|
||||
)
|
||||
],
|
||||
test_mock={
|
||||
"validate_email": lambda email, ip_address, credentials: (
|
||||
ZBValidateResponse(
|
||||
data={
|
||||
"address": email,
|
||||
"status": ZBValidateStatus.valid,
|
||||
"sub_status": ZBValidateSubStatus.allowed,
|
||||
"account": "test",
|
||||
"domain": "test.com",
|
||||
"did_you_mean": None,
|
||||
"domain_age_days": None,
|
||||
"free_email": False,
|
||||
"mx_found": False,
|
||||
"mx_record": None,
|
||||
"smtp_provider": None,
|
||||
}
|
||||
)
|
||||
"validate_email": lambda email, ip_address, credentials: ZBValidateResponse(
|
||||
data={
|
||||
"address": email,
|
||||
"status": ZBValidateStatus.valid,
|
||||
"sub_status": ZBValidateSubStatus.allowed,
|
||||
"account": "test",
|
||||
"domain": "test.com",
|
||||
"did_you_mean": None,
|
||||
"domain_age_days": None,
|
||||
"free_email": False,
|
||||
"mx_found": False,
|
||||
"mx_record": None,
|
||||
"smtp_provider": None,
|
||||
}
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
@@ -172,7 +172,7 @@ async def add_test_data(db):
|
||||
"storeListingId": listing.id,
|
||||
"agentGraphId": graph.id,
|
||||
"agentGraphVersion": graph.version,
|
||||
"name": f"Test Agent {i + 1}",
|
||||
"name": f"Test Agent {i+1}",
|
||||
"subHeading": faker.catch_phrase(),
|
||||
"description": faker.paragraph(nb_sentences=5),
|
||||
"imageUrls": [faker.image_url()],
|
||||
@@ -245,7 +245,9 @@ async def compare_counts(before, after):
|
||||
print("🔍 Agent run changes:")
|
||||
before_runs = before["agent_runs"].get("total_runs") or 0
|
||||
after_runs = after["agent_runs"].get("total_runs") or 0
|
||||
print(f" Total runs: {before_runs} → {after_runs} (+{after_runs - before_runs})")
|
||||
print(
|
||||
f" Total runs: {before_runs} → {after_runs} " f"(+{after_runs - before_runs})"
|
||||
)
|
||||
|
||||
# Compare reviews
|
||||
print("\n🔍 Review changes:")
|
||||
|
||||
@@ -147,7 +147,7 @@ def format_sql_insert(creds: dict) -> str:
|
||||
|
||||
sql = f"""
|
||||
-- ============================================================
|
||||
-- OAuth Application: {creds["name"]}
|
||||
-- OAuth Application: {creds['name']}
|
||||
-- Generated: {now_iso} UTC
|
||||
-- ============================================================
|
||||
|
||||
@@ -167,14 +167,14 @@ INSERT INTO "OAuthApplication" (
|
||||
"isActive"
|
||||
)
|
||||
VALUES (
|
||||
'{creds["id"]}',
|
||||
'{creds['id']}',
|
||||
NOW(),
|
||||
NOW(),
|
||||
'{creds["name"]}',
|
||||
{f"'{creds['description']}'" if creds["description"] else "NULL"},
|
||||
'{creds["client_id"]}',
|
||||
'{creds["client_secret_hash"]}',
|
||||
'{creds["client_secret_salt"]}',
|
||||
'{creds['name']}',
|
||||
{f"'{creds['description']}'" if creds['description'] else 'NULL'},
|
||||
'{creds['client_id']}',
|
||||
'{creds['client_secret_hash']}',
|
||||
'{creds['client_secret_salt']}',
|
||||
ARRAY{redirect_uris_pg}::TEXT[],
|
||||
ARRAY{grant_types_pg}::TEXT[],
|
||||
ARRAY{scopes_pg}::"APIKeyPermission"[],
|
||||
@@ -186,8 +186,8 @@ VALUES (
|
||||
-- ⚠️ IMPORTANT: Save these credentials securely!
|
||||
-- ============================================================
|
||||
--
|
||||
-- Client ID: {creds["client_id"]}
|
||||
-- Client Secret: {creds["client_secret_plaintext"]}
|
||||
-- Client ID: {creds['client_id']}
|
||||
-- Client Secret: {creds['client_secret_plaintext']}
|
||||
--
|
||||
-- ⚠️ The client secret is shown ONLY ONCE!
|
||||
-- ⚠️ Store it securely and share only with the application developer.
|
||||
@@ -200,7 +200,7 @@ VALUES (
|
||||
-- To verify the application was created:
|
||||
-- SELECT "clientId", name, scopes, "redirectUris", "isActive"
|
||||
-- FROM "OAuthApplication"
|
||||
-- WHERE "clientId" = '{creds["client_id"]}';
|
||||
-- WHERE "clientId" = '{creds['client_id']}';
|
||||
"""
|
||||
return sql
|
||||
|
||||
|
||||
@@ -431,7 +431,7 @@ class UserCreditBase(ABC):
|
||||
current_balance, _ = await self._get_credits(user_id)
|
||||
if current_balance >= ceiling_balance:
|
||||
raise ValueError(
|
||||
f"You already have enough balance of ${current_balance / 100}, top-up is not required when you already have at least ${ceiling_balance / 100}"
|
||||
f"You already have enough balance of ${current_balance/100}, top-up is not required when you already have at least ${ceiling_balance/100}"
|
||||
)
|
||||
|
||||
# Single unified atomic operation for all transaction types using UserBalance
|
||||
@@ -570,7 +570,7 @@ class UserCreditBase(ABC):
|
||||
if amount < 0 and fail_insufficient_credits:
|
||||
current_balance, _ = await self._get_credits(user_id)
|
||||
raise InsufficientBalanceError(
|
||||
message=f"Insufficient balance of ${current_balance / 100}, where this will cost ${abs(amount) / 100}",
|
||||
message=f"Insufficient balance of ${current_balance/100}, where this will cost ${abs(amount)/100}",
|
||||
user_id=user_id,
|
||||
balance=current_balance,
|
||||
amount=amount,
|
||||
@@ -581,6 +581,7 @@ class UserCreditBase(ABC):
|
||||
|
||||
|
||||
class UserCredit(UserCreditBase):
|
||||
|
||||
async def _send_refund_notification(
|
||||
self,
|
||||
notification_request: RefundRequestData,
|
||||
@@ -732,7 +733,7 @@ class UserCredit(UserCreditBase):
|
||||
)
|
||||
if request.amount <= 0 or request.amount > transaction.amount:
|
||||
raise AssertionError(
|
||||
f"Invalid amount to deduct ${request.amount / 100} from ${transaction.amount / 100} top-up"
|
||||
f"Invalid amount to deduct ${request.amount/100} from ${transaction.amount/100} top-up"
|
||||
)
|
||||
|
||||
balance, _ = await self._add_transaction(
|
||||
@@ -786,12 +787,12 @@ class UserCredit(UserCreditBase):
|
||||
|
||||
# If the user has enough balance, just let them win the dispute.
|
||||
if balance - amount >= settings.config.refund_credit_tolerance_threshold:
|
||||
logger.warning(f"Accepting dispute from {user_id} for ${amount / 100}")
|
||||
logger.warning(f"Accepting dispute from {user_id} for ${amount/100}")
|
||||
dispute.close()
|
||||
return
|
||||
|
||||
logger.warning(
|
||||
f"Adding extra info for dispute from {user_id} for ${amount / 100}"
|
||||
f"Adding extra info for dispute from {user_id} for ${amount/100}"
|
||||
)
|
||||
# Retrieve recent transaction history to support our evidence.
|
||||
# This provides a concise timeline that shows service usage and proper credit application.
|
||||
|
||||
@@ -107,15 +107,15 @@ async def test_ceiling_balance_clamps_when_would_exceed(server: SpinTestServer):
|
||||
)
|
||||
|
||||
# Balance should be clamped to ceiling
|
||||
assert final_balance == 1000, (
|
||||
f"Balance should be clamped to 1000, got {final_balance}"
|
||||
)
|
||||
assert (
|
||||
final_balance == 1000
|
||||
), f"Balance should be clamped to 1000, got {final_balance}"
|
||||
|
||||
# Verify with get_credits too
|
||||
stored_balance = await credit_system.get_credits(user_id)
|
||||
assert stored_balance == 1000, (
|
||||
f"Stored balance should be 1000, got {stored_balance}"
|
||||
)
|
||||
assert (
|
||||
stored_balance == 1000
|
||||
), f"Stored balance should be 1000, got {stored_balance}"
|
||||
|
||||
# Verify transaction shows the clamped amount
|
||||
transactions = await CreditTransaction.prisma().find_many(
|
||||
@@ -164,9 +164,9 @@ async def test_ceiling_balance_allows_when_under_threshold(server: SpinTestServe
|
||||
|
||||
# Verify with get_credits too
|
||||
stored_balance = await credit_system.get_credits(user_id)
|
||||
assert stored_balance == 500, (
|
||||
f"Stored balance should be 500, got {stored_balance}"
|
||||
)
|
||||
assert (
|
||||
stored_balance == 500
|
||||
), f"Stored balance should be 500, got {stored_balance}"
|
||||
|
||||
finally:
|
||||
await cleanup_test_user(user_id)
|
||||
|
||||
@@ -108,9 +108,9 @@ async def test_concurrent_spends_same_user(server: SpinTestServer):
|
||||
transactions = await CreditTransaction.prisma().find_many(
|
||||
where={"userId": user_id, "type": prisma.enums.CreditTransactionType.USAGE}
|
||||
)
|
||||
assert len(transactions) == 10, (
|
||||
f"Expected 10 transactions, got {len(transactions)}"
|
||||
)
|
||||
assert (
|
||||
len(transactions) == 10
|
||||
), f"Expected 10 transactions, got {len(transactions)}"
|
||||
|
||||
finally:
|
||||
await cleanup_test_user(user_id)
|
||||
@@ -321,9 +321,9 @@ async def test_onboarding_reward_idempotency(server: SpinTestServer):
|
||||
"transactionKey": f"REWARD-{user_id}-WELCOME",
|
||||
}
|
||||
)
|
||||
assert len(transactions) == 1, (
|
||||
f"Expected 1 reward transaction, got {len(transactions)}"
|
||||
)
|
||||
assert (
|
||||
len(transactions) == 1
|
||||
), f"Expected 1 reward transaction, got {len(transactions)}"
|
||||
|
||||
finally:
|
||||
await cleanup_test_user(user_id)
|
||||
@@ -358,9 +358,9 @@ async def test_integer_overflow_protection(server: SpinTestServer):
|
||||
|
||||
# Balance should be clamped to max_int, not overflowed
|
||||
final_balance = await credit_system.get_credits(user_id)
|
||||
assert final_balance == max_int, (
|
||||
f"Balance should be clamped to {max_int}, got {final_balance}"
|
||||
)
|
||||
assert (
|
||||
final_balance == max_int
|
||||
), f"Balance should be clamped to {max_int}, got {final_balance}"
|
||||
|
||||
# Verify transaction was created with clamped amount
|
||||
transactions = await CreditTransaction.prisma().find_many(
|
||||
@@ -371,9 +371,9 @@ async def test_integer_overflow_protection(server: SpinTestServer):
|
||||
order={"createdAt": "desc"},
|
||||
)
|
||||
assert len(transactions) > 0, "Transaction should be created"
|
||||
assert transactions[0].runningBalance == max_int, (
|
||||
"Transaction should show clamped balance"
|
||||
)
|
||||
assert (
|
||||
transactions[0].runningBalance == max_int
|
||||
), "Transaction should show clamped balance"
|
||||
|
||||
finally:
|
||||
await cleanup_test_user(user_id)
|
||||
@@ -432,9 +432,9 @@ async def test_high_concurrency_stress(server: SpinTestServer):
|
||||
|
||||
# Verify final balance
|
||||
final_balance = await credit_system.get_credits(user_id)
|
||||
assert final_balance == expected_balance, (
|
||||
f"Expected {expected_balance}, got {final_balance}"
|
||||
)
|
||||
assert (
|
||||
final_balance == expected_balance
|
||||
), f"Expected {expected_balance}, got {final_balance}"
|
||||
assert final_balance >= 0, "Balance went negative!"
|
||||
|
||||
finally:
|
||||
@@ -507,7 +507,7 @@ async def test_concurrent_multiple_spends_sufficient_balance(server: SpinTestSer
|
||||
sorted_timings = sorted(timings.items(), key=lambda x: x[1]["start"])
|
||||
print("\nExecution order by start time:")
|
||||
for i, (label, timing) in enumerate(sorted_timings):
|
||||
print(f" {i + 1}. {label}: {timing['start']:.4f} -> {timing['end']:.4f}")
|
||||
print(f" {i+1}. {label}: {timing['start']:.4f} -> {timing['end']:.4f}")
|
||||
|
||||
# Check for overlap (true concurrency) vs serialization
|
||||
overlaps = []
|
||||
@@ -533,9 +533,9 @@ async def test_concurrent_multiple_spends_sufficient_balance(server: SpinTestSer
|
||||
print(f"Successful: {len(successful)}, Failed: {len(failed)}")
|
||||
|
||||
# All should succeed since 150 - (10 + 20 + 30) = 90 > 0
|
||||
assert len(successful) == 3, (
|
||||
f"Expected all 3 to succeed, got {len(successful)} successes: {results}"
|
||||
)
|
||||
assert (
|
||||
len(successful) == 3
|
||||
), f"Expected all 3 to succeed, got {len(successful)} successes: {results}"
|
||||
assert final_balance == 90, f"Expected balance 90, got {final_balance}"
|
||||
|
||||
# Check transaction timestamps to confirm database-level serialization
|
||||
@@ -546,7 +546,7 @@ async def test_concurrent_multiple_spends_sufficient_balance(server: SpinTestSer
|
||||
print("\nDatabase transaction order (by createdAt):")
|
||||
for i, tx in enumerate(transactions):
|
||||
print(
|
||||
f" {i + 1}. Amount {tx.amount}, Running balance: {tx.runningBalance}, Created: {tx.createdAt}"
|
||||
f" {i+1}. Amount {tx.amount}, Running balance: {tx.runningBalance}, Created: {tx.createdAt}"
|
||||
)
|
||||
|
||||
# Verify running balances are chronologically consistent (ordered by createdAt)
|
||||
@@ -575,38 +575,38 @@ async def test_concurrent_multiple_spends_sufficient_balance(server: SpinTestSer
|
||||
|
||||
# Verify all balances are valid intermediate states
|
||||
for balance in actual_balances:
|
||||
assert balance in expected_possible_balances, (
|
||||
f"Invalid balance {balance}, expected one of {expected_possible_balances}"
|
||||
)
|
||||
assert (
|
||||
balance in expected_possible_balances
|
||||
), f"Invalid balance {balance}, expected one of {expected_possible_balances}"
|
||||
|
||||
# Final balance should always be 90 (150 - 60)
|
||||
assert min(actual_balances) == 90, (
|
||||
f"Final balance should be 90, got {min(actual_balances)}"
|
||||
)
|
||||
assert (
|
||||
min(actual_balances) == 90
|
||||
), f"Final balance should be 90, got {min(actual_balances)}"
|
||||
|
||||
# The final transaction should always have balance 90
|
||||
# The other transactions should have valid intermediate balances
|
||||
assert 90 in actual_balances, (
|
||||
f"Final balance 90 should be in actual_balances: {actual_balances}"
|
||||
)
|
||||
assert (
|
||||
90 in actual_balances
|
||||
), f"Final balance 90 should be in actual_balances: {actual_balances}"
|
||||
|
||||
# All balances should be >= 90 (the final state)
|
||||
assert all(balance >= 90 for balance in actual_balances), (
|
||||
f"All balances should be >= 90, got {actual_balances}"
|
||||
)
|
||||
assert all(
|
||||
balance >= 90 for balance in actual_balances
|
||||
), f"All balances should be >= 90, got {actual_balances}"
|
||||
|
||||
# CRITICAL: Transactions are atomic but can complete in any order
|
||||
# What matters is that all running balances are valid intermediate states
|
||||
# Each balance should be between 90 (final) and 140 (after first transaction)
|
||||
for balance in actual_balances:
|
||||
assert 90 <= balance <= 140, (
|
||||
f"Balance {balance} is outside valid range [90, 140]"
|
||||
)
|
||||
assert (
|
||||
90 <= balance <= 140
|
||||
), f"Balance {balance} is outside valid range [90, 140]"
|
||||
|
||||
# Final balance (minimum) should always be 90
|
||||
assert min(actual_balances) == 90, (
|
||||
f"Final balance should be 90, got {min(actual_balances)}"
|
||||
)
|
||||
assert (
|
||||
min(actual_balances) == 90
|
||||
), f"Final balance should be 90, got {min(actual_balances)}"
|
||||
|
||||
finally:
|
||||
await cleanup_test_user(user_id)
|
||||
@@ -707,7 +707,7 @@ async def test_prove_database_locking_behavior(server: SpinTestServer):
|
||||
|
||||
for i, result in enumerate(sorted_results):
|
||||
print(
|
||||
f" {i + 1}. {result['label']}: DB operation took {result['db_duration']:.4f}s"
|
||||
f" {i+1}. {result['label']}: DB operation took {result['db_duration']:.4f}s"
|
||||
)
|
||||
|
||||
# Check if any operations overlapped at the database level
|
||||
@@ -722,9 +722,9 @@ async def test_prove_database_locking_behavior(server: SpinTestServer):
|
||||
print(f"\n💰 Final balance: {final_balance}")
|
||||
|
||||
if len(successful) == 3:
|
||||
assert final_balance == 0, (
|
||||
f"If all succeeded, balance should be 0, got {final_balance}"
|
||||
)
|
||||
assert (
|
||||
final_balance == 0
|
||||
), f"If all succeeded, balance should be 0, got {final_balance}"
|
||||
print(
|
||||
"✅ CONCLUSION: Database row locking causes requests to WAIT and execute serially"
|
||||
)
|
||||
|
||||
@@ -109,9 +109,9 @@ async def test_deduct_credits_atomic(server: SpinTestServer):
|
||||
where={"userId": REFUND_TEST_USER_ID}
|
||||
)
|
||||
assert user_balance is not None
|
||||
assert user_balance.balance == 500, (
|
||||
f"Expected balance 500, got {user_balance.balance}"
|
||||
)
|
||||
assert (
|
||||
user_balance.balance == 500
|
||||
), f"Expected balance 500, got {user_balance.balance}"
|
||||
|
||||
# Verify refund transaction was created
|
||||
refund_tx = await CreditTransaction.prisma().find_first(
|
||||
@@ -205,9 +205,9 @@ async def test_handle_dispute_with_sufficient_balance(
|
||||
where={"userId": REFUND_TEST_USER_ID}
|
||||
)
|
||||
assert user_balance is not None
|
||||
assert user_balance.balance == 1000, (
|
||||
f"Balance should remain 1000, got {user_balance.balance}"
|
||||
)
|
||||
assert (
|
||||
user_balance.balance == 1000
|
||||
), f"Balance should remain 1000, got {user_balance.balance}"
|
||||
|
||||
finally:
|
||||
await cleanup_test_user()
|
||||
@@ -332,9 +332,9 @@ async def test_concurrent_refunds(server: SpinTestServer):
|
||||
print(f"DEBUG: Final balance = {user_balance.balance}, expected = 500")
|
||||
|
||||
# With atomic implementation, all 5 refunds should process correctly
|
||||
assert user_balance.balance == 500, (
|
||||
f"Expected balance 500 after 5 refunds of 100 each, got {user_balance.balance}"
|
||||
)
|
||||
assert (
|
||||
user_balance.balance == 500
|
||||
), f"Expected balance 500 after 5 refunds of 100 each, got {user_balance.balance}"
|
||||
|
||||
# Verify all refund transactions exist
|
||||
refund_txs = await CreditTransaction.prisma().find_many(
|
||||
@@ -343,9 +343,9 @@ async def test_concurrent_refunds(server: SpinTestServer):
|
||||
"type": CreditTransactionType.REFUND,
|
||||
}
|
||||
)
|
||||
assert len(refund_txs) == 5, (
|
||||
f"Expected 5 refund transactions, got {len(refund_txs)}"
|
||||
)
|
||||
assert (
|
||||
len(refund_txs) == 5
|
||||
), f"Expected 5 refund transactions, got {len(refund_txs)}"
|
||||
|
||||
running_balances: set[int] = {
|
||||
tx.runningBalance for tx in refund_txs if tx.runningBalance is not None
|
||||
@@ -353,20 +353,20 @@ async def test_concurrent_refunds(server: SpinTestServer):
|
||||
|
||||
# Verify all balances are valid intermediate states
|
||||
for balance in running_balances:
|
||||
assert 500 <= balance <= 1000, (
|
||||
f"Invalid balance {balance}, should be between 500 and 1000"
|
||||
)
|
||||
assert (
|
||||
500 <= balance <= 1000
|
||||
), f"Invalid balance {balance}, should be between 500 and 1000"
|
||||
|
||||
# Final balance should be present
|
||||
assert 500 in running_balances, (
|
||||
f"Final balance 500 should be in {running_balances}"
|
||||
)
|
||||
assert (
|
||||
500 in running_balances
|
||||
), f"Final balance 500 should be in {running_balances}"
|
||||
|
||||
# All balances should be unique and form a valid sequence
|
||||
sorted_balances = sorted(running_balances, reverse=True)
|
||||
assert len(sorted_balances) == 5, (
|
||||
f"Expected 5 unique balances, got {len(sorted_balances)}"
|
||||
)
|
||||
assert (
|
||||
len(sorted_balances) == 5
|
||||
), f"Expected 5 unique balances, got {len(sorted_balances)}"
|
||||
|
||||
finally:
|
||||
await cleanup_test_user()
|
||||
|
||||
@@ -82,7 +82,9 @@ async def test_debug_underflow_step_by_step(server: SpinTestServer):
|
||||
|
||||
# Test 2: Apply amount that should cause underflow
|
||||
print("\n=== Test 2: Testing underflow protection ===")
|
||||
test_amount = -200 # This should cause underflow: (POSTGRES_INT_MIN + 100) + (-200) = POSTGRES_INT_MIN - 100
|
||||
test_amount = (
|
||||
-200
|
||||
) # This should cause underflow: (POSTGRES_INT_MIN + 100) + (-200) = POSTGRES_INT_MIN - 100
|
||||
expected_without_protection = current_balance + test_amount
|
||||
print(f"Current balance: {current_balance}")
|
||||
print(f"Test amount: {test_amount}")
|
||||
@@ -99,9 +101,9 @@ async def test_debug_underflow_step_by_step(server: SpinTestServer):
|
||||
print(f"Actual result: {balance_result}")
|
||||
|
||||
# Check if underflow protection worked
|
||||
assert balance_result == POSTGRES_INT_MIN, (
|
||||
f"Expected underflow protection to clamp balance to {POSTGRES_INT_MIN}, got {balance_result}"
|
||||
)
|
||||
assert (
|
||||
balance_result == POSTGRES_INT_MIN
|
||||
), f"Expected underflow protection to clamp balance to {POSTGRES_INT_MIN}, got {balance_result}"
|
||||
|
||||
# Test 3: Edge case - exactly at POSTGRES_INT_MIN
|
||||
print("\n=== Test 3: Testing exact POSTGRES_INT_MIN boundary ===")
|
||||
@@ -126,9 +128,9 @@ async def test_debug_underflow_step_by_step(server: SpinTestServer):
|
||||
)
|
||||
print(f"After subtracting 1: {edge_result}")
|
||||
|
||||
assert edge_result == POSTGRES_INT_MIN, (
|
||||
f"Expected balance to remain clamped at {POSTGRES_INT_MIN}, got {edge_result}"
|
||||
)
|
||||
assert (
|
||||
edge_result == POSTGRES_INT_MIN
|
||||
), f"Expected balance to remain clamped at {POSTGRES_INT_MIN}, got {edge_result}"
|
||||
|
||||
finally:
|
||||
await cleanup_test_user(user_id)
|
||||
@@ -174,18 +176,18 @@ async def test_underflow_protection_large_refunds(server: SpinTestServer):
|
||||
)
|
||||
|
||||
# Balance should be clamped to POSTGRES_INT_MIN, not the calculated underflow value
|
||||
assert final_balance == POSTGRES_INT_MIN, (
|
||||
f"Balance should be clamped to {POSTGRES_INT_MIN}, got {final_balance}"
|
||||
)
|
||||
assert final_balance > expected_without_protection, (
|
||||
f"Balance should be greater than underflow result {expected_without_protection}, got {final_balance}"
|
||||
)
|
||||
assert (
|
||||
final_balance == POSTGRES_INT_MIN
|
||||
), f"Balance should be clamped to {POSTGRES_INT_MIN}, got {final_balance}"
|
||||
assert (
|
||||
final_balance > expected_without_protection
|
||||
), f"Balance should be greater than underflow result {expected_without_protection}, got {final_balance}"
|
||||
|
||||
# Verify with get_credits too
|
||||
stored_balance = await credit_system.get_credits(user_id)
|
||||
assert stored_balance == POSTGRES_INT_MIN, (
|
||||
f"Stored balance should be {POSTGRES_INT_MIN}, got {stored_balance}"
|
||||
)
|
||||
assert (
|
||||
stored_balance == POSTGRES_INT_MIN
|
||||
), f"Stored balance should be {POSTGRES_INT_MIN}, got {stored_balance}"
|
||||
|
||||
# Verify transaction was created with the underflow-protected balance
|
||||
transactions = await CreditTransaction.prisma().find_many(
|
||||
@@ -193,9 +195,9 @@ async def test_underflow_protection_large_refunds(server: SpinTestServer):
|
||||
order={"createdAt": "desc"},
|
||||
)
|
||||
assert len(transactions) > 0, "Refund transaction should be created"
|
||||
assert transactions[0].runningBalance == POSTGRES_INT_MIN, (
|
||||
f"Transaction should show clamped balance {POSTGRES_INT_MIN}, got {transactions[0].runningBalance}"
|
||||
)
|
||||
assert (
|
||||
transactions[0].runningBalance == POSTGRES_INT_MIN
|
||||
), f"Transaction should show clamped balance {POSTGRES_INT_MIN}, got {transactions[0].runningBalance}"
|
||||
|
||||
finally:
|
||||
await cleanup_test_user(user_id)
|
||||
@@ -236,12 +238,12 @@ async def test_multiple_large_refunds_cumulative_underflow(server: SpinTestServe
|
||||
expected_balance_1 = (
|
||||
initial_balance + refund_amount
|
||||
) # Should be POSTGRES_INT_MIN + 200
|
||||
assert balance_1 == expected_balance_1, (
|
||||
f"First refund should result in {expected_balance_1}, got {balance_1}"
|
||||
)
|
||||
assert balance_1 >= POSTGRES_INT_MIN, (
|
||||
f"First refund should not go below {POSTGRES_INT_MIN}, got {balance_1}"
|
||||
)
|
||||
assert (
|
||||
balance_1 == expected_balance_1
|
||||
), f"First refund should result in {expected_balance_1}, got {balance_1}"
|
||||
assert (
|
||||
balance_1 >= POSTGRES_INT_MIN
|
||||
), f"First refund should not go below {POSTGRES_INT_MIN}, got {balance_1}"
|
||||
|
||||
# Second refund: (POSTGRES_INT_MIN + 200) + (-300) = POSTGRES_INT_MIN - 100 (would underflow)
|
||||
balance_2, _ = await credit_system._add_transaction(
|
||||
@@ -252,9 +254,9 @@ async def test_multiple_large_refunds_cumulative_underflow(server: SpinTestServe
|
||||
)
|
||||
|
||||
# Should be clamped to minimum due to underflow protection
|
||||
assert balance_2 == POSTGRES_INT_MIN, (
|
||||
f"Second refund should be clamped to {POSTGRES_INT_MIN}, got {balance_2}"
|
||||
)
|
||||
assert (
|
||||
balance_2 == POSTGRES_INT_MIN
|
||||
), f"Second refund should be clamped to {POSTGRES_INT_MIN}, got {balance_2}"
|
||||
|
||||
# Third refund: Should stay at minimum
|
||||
balance_3, _ = await credit_system._add_transaction(
|
||||
@@ -265,15 +267,15 @@ async def test_multiple_large_refunds_cumulative_underflow(server: SpinTestServe
|
||||
)
|
||||
|
||||
# Should still be at minimum
|
||||
assert balance_3 == POSTGRES_INT_MIN, (
|
||||
f"Third refund should stay at {POSTGRES_INT_MIN}, got {balance_3}"
|
||||
)
|
||||
assert (
|
||||
balance_3 == POSTGRES_INT_MIN
|
||||
), f"Third refund should stay at {POSTGRES_INT_MIN}, got {balance_3}"
|
||||
|
||||
# Final balance check
|
||||
final_balance = await credit_system.get_credits(user_id)
|
||||
assert final_balance == POSTGRES_INT_MIN, (
|
||||
f"Final balance should be {POSTGRES_INT_MIN}, got {final_balance}"
|
||||
)
|
||||
assert (
|
||||
final_balance == POSTGRES_INT_MIN
|
||||
), f"Final balance should be {POSTGRES_INT_MIN}, got {final_balance}"
|
||||
|
||||
finally:
|
||||
await cleanup_test_user(user_id)
|
||||
@@ -325,35 +327,35 @@ async def test_concurrent_large_refunds_no_underflow(server: SpinTestServer):
|
||||
for i, result in enumerate(results):
|
||||
if isinstance(result, tuple):
|
||||
balance, _ = result
|
||||
assert balance >= POSTGRES_INT_MIN, (
|
||||
f"Result {i} balance {balance} underflowed below {POSTGRES_INT_MIN}"
|
||||
)
|
||||
assert (
|
||||
balance >= POSTGRES_INT_MIN
|
||||
), f"Result {i} balance {balance} underflowed below {POSTGRES_INT_MIN}"
|
||||
valid_results.append(balance)
|
||||
elif isinstance(result, str) and "FAILED" in result:
|
||||
# Some operations might fail due to validation, that's okay
|
||||
pass
|
||||
else:
|
||||
# Unexpected exception
|
||||
assert not isinstance(result, Exception), (
|
||||
f"Unexpected exception in result {i}: {result}"
|
||||
)
|
||||
assert not isinstance(
|
||||
result, Exception
|
||||
), f"Unexpected exception in result {i}: {result}"
|
||||
|
||||
# At least one operation should succeed
|
||||
assert len(valid_results) > 0, (
|
||||
f"At least one refund should succeed, got results: {results}"
|
||||
)
|
||||
assert (
|
||||
len(valid_results) > 0
|
||||
), f"At least one refund should succeed, got results: {results}"
|
||||
|
||||
# All successful results should be >= POSTGRES_INT_MIN
|
||||
for balance in valid_results:
|
||||
assert balance >= POSTGRES_INT_MIN, (
|
||||
f"Balance {balance} should not be below {POSTGRES_INT_MIN}"
|
||||
)
|
||||
assert (
|
||||
balance >= POSTGRES_INT_MIN
|
||||
), f"Balance {balance} should not be below {POSTGRES_INT_MIN}"
|
||||
|
||||
# Final balance should be valid and at or above POSTGRES_INT_MIN
|
||||
final_balance = await credit_system.get_credits(user_id)
|
||||
assert final_balance >= POSTGRES_INT_MIN, (
|
||||
f"Final balance {final_balance} should not underflow below {POSTGRES_INT_MIN}"
|
||||
)
|
||||
assert (
|
||||
final_balance >= POSTGRES_INT_MIN
|
||||
), f"Final balance {final_balance} should not underflow below {POSTGRES_INT_MIN}"
|
||||
|
||||
finally:
|
||||
await cleanup_test_user(user_id)
|
||||
|
||||
@@ -60,9 +60,9 @@ async def test_user_balance_migration_complete(server: SpinTestServer):
|
||||
# User.balance should not exist or should be None/0 if it exists
|
||||
user_balance_attr = getattr(user, "balance", None)
|
||||
if user_balance_attr is not None:
|
||||
assert user_balance_attr == 0 or user_balance_attr is None, (
|
||||
f"User.balance should be 0 or None, got {user_balance_attr}"
|
||||
)
|
||||
assert (
|
||||
user_balance_attr == 0 or user_balance_attr is None
|
||||
), f"User.balance should be 0 or None, got {user_balance_attr}"
|
||||
|
||||
# 2. Perform various credit operations using internal method (bypasses Stripe)
|
||||
await credit_system._add_transaction(
|
||||
@@ -87,9 +87,9 @@ async def test_user_balance_migration_complete(server: SpinTestServer):
|
||||
# 3. Verify UserBalance table has correct values
|
||||
user_balance = await UserBalance.prisma().find_unique(where={"userId": user_id})
|
||||
assert user_balance is not None
|
||||
assert user_balance.balance == 700, (
|
||||
f"UserBalance should be 700, got {user_balance.balance}"
|
||||
)
|
||||
assert (
|
||||
user_balance.balance == 700
|
||||
), f"UserBalance should be 700, got {user_balance.balance}"
|
||||
|
||||
# 4. CRITICAL: Verify User.balance is NEVER updated during operations
|
||||
user_after = await User.prisma().find_unique(where={"id": user_id})
|
||||
@@ -97,15 +97,15 @@ async def test_user_balance_migration_complete(server: SpinTestServer):
|
||||
user_balance_after = getattr(user_after, "balance", None)
|
||||
if user_balance_after is not None:
|
||||
# If User.balance exists, it should still be 0 (never updated)
|
||||
assert user_balance_after == 0 or user_balance_after is None, (
|
||||
f"User.balance should remain 0/None after operations, got {user_balance_after}. This indicates User.balance is still being used!"
|
||||
)
|
||||
assert (
|
||||
user_balance_after == 0 or user_balance_after is None
|
||||
), f"User.balance should remain 0/None after operations, got {user_balance_after}. This indicates User.balance is still being used!"
|
||||
|
||||
# 5. Verify get_credits always returns UserBalance value, not User.balance
|
||||
final_balance = await credit_system.get_credits(user_id)
|
||||
assert final_balance == user_balance.balance, (
|
||||
f"get_credits should return UserBalance value {user_balance.balance}, got {final_balance}"
|
||||
)
|
||||
assert (
|
||||
final_balance == user_balance.balance
|
||||
), f"get_credits should return UserBalance value {user_balance.balance}, got {final_balance}"
|
||||
|
||||
finally:
|
||||
await cleanup_test_user(user_id)
|
||||
@@ -126,9 +126,9 @@ async def test_detect_stale_user_balance_queries(server: SpinTestServer):
|
||||
|
||||
# Verify that get_credits returns UserBalance value (5000), not any stale User.balance value
|
||||
balance = await credit_system.get_credits(user_id)
|
||||
assert balance == 5000, (
|
||||
f"Expected get_credits to return 5000 from UserBalance, got {balance}"
|
||||
)
|
||||
assert (
|
||||
balance == 5000
|
||||
), f"Expected get_credits to return 5000 from UserBalance, got {balance}"
|
||||
|
||||
# Verify all operations use UserBalance using internal method (bypasses Stripe)
|
||||
await credit_system._add_transaction(
|
||||
@@ -143,9 +143,9 @@ async def test_detect_stale_user_balance_queries(server: SpinTestServer):
|
||||
# Verify UserBalance table has the correct value
|
||||
user_balance = await UserBalance.prisma().find_unique(where={"userId": user_id})
|
||||
assert user_balance is not None
|
||||
assert user_balance.balance == 6000, (
|
||||
f"UserBalance should be 6000, got {user_balance.balance}"
|
||||
)
|
||||
assert (
|
||||
user_balance.balance == 6000
|
||||
), f"UserBalance should be 6000, got {user_balance.balance}"
|
||||
|
||||
finally:
|
||||
await cleanup_test_user(user_id)
|
||||
@@ -196,9 +196,9 @@ async def test_concurrent_operations_use_userbalance_only(server: SpinTestServer
|
||||
# Verify UserBalance has correct value
|
||||
user_balance = await UserBalance.prisma().find_unique(where={"userId": user_id})
|
||||
assert user_balance is not None
|
||||
assert user_balance.balance == 400, (
|
||||
f"UserBalance should be 400, got {user_balance.balance}"
|
||||
)
|
||||
assert (
|
||||
user_balance.balance == 400
|
||||
), f"UserBalance should be 400, got {user_balance.balance}"
|
||||
|
||||
# Critical: If User.balance exists and was used, it might have wrong value
|
||||
try:
|
||||
|
||||
@@ -569,6 +569,7 @@ class GraphModel(Graph, GraphMeta):
|
||||
field_name,
|
||||
field_info,
|
||||
) in node.block.input_schema.get_credentials_fields_info().items():
|
||||
|
||||
discriminator = field_info.discriminator
|
||||
if not discriminator:
|
||||
node_credential_data.append((field_info, (node.id, field_name)))
|
||||
@@ -835,9 +836,9 @@ class GraphModel(Graph, GraphMeta):
|
||||
# Check for missing dependencies when dependent field is present
|
||||
missing_deps = [dep for dep in dependencies if not has_value(node, dep)]
|
||||
if missing_deps and (field_has_value or field_is_required):
|
||||
node_errors[node.id][field_name] = (
|
||||
f"Requires {', '.join(missing_deps)} to be set"
|
||||
)
|
||||
node_errors[node.id][
|
||||
field_name
|
||||
] = f"Requires {', '.join(missing_deps)} to be set"
|
||||
|
||||
return node_errors
|
||||
|
||||
|
||||
@@ -468,6 +468,7 @@ class UserMetadataRaw(TypedDict, total=False):
|
||||
|
||||
|
||||
class UserIntegrations(BaseModel):
|
||||
|
||||
class ManagedCredentials(BaseModel):
|
||||
"""Integration credentials managed by us, rather than by the user"""
|
||||
|
||||
|
||||
@@ -100,7 +100,8 @@ async def create_workspace_file(
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Created workspace file {file.id} at path {path} in workspace {workspace_id}"
|
||||
f"Created workspace file {file.id} at path {path} "
|
||||
f"in workspace {workspace_id}"
|
||||
)
|
||||
return file
|
||||
|
||||
|
||||
@@ -224,9 +224,7 @@ class TestBuildExecutionSummary:
|
||||
# Check that errors are now in node's recent_errors field
|
||||
# Find the output node (with truncated UUID)
|
||||
output_node = next(
|
||||
n
|
||||
for n in summary["nodes"]
|
||||
if n["node_id"] == "678e9012" # Truncated
|
||||
n for n in summary["nodes"] if n["node_id"] == "678e9012" # Truncated
|
||||
)
|
||||
assert output_node["error_count"] == 1
|
||||
assert output_node["execution_count"] == 1
|
||||
@@ -354,9 +352,7 @@ class TestBuildExecutionSummary:
|
||||
|
||||
# String error format - find node with truncated ID
|
||||
string_error_node = next(
|
||||
n
|
||||
for n in summary["nodes"]
|
||||
if n["node_id"] == "333e4444" # Truncated
|
||||
n for n in summary["nodes"] if n["node_id"] == "333e4444" # Truncated
|
||||
)
|
||||
assert len(string_error_node["recent_errors"]) == 1
|
||||
assert (
|
||||
@@ -366,9 +362,7 @@ class TestBuildExecutionSummary:
|
||||
|
||||
# No error output format - find node with truncated ID
|
||||
no_error_node = next(
|
||||
n
|
||||
for n in summary["nodes"]
|
||||
if n["node_id"] == "777e8888" # Truncated
|
||||
n for n in summary["nodes"] if n["node_id"] == "777e8888" # Truncated
|
||||
)
|
||||
assert len(no_error_node["recent_errors"]) == 1
|
||||
assert no_error_node["recent_errors"][0]["error"] == "Unknown error"
|
||||
@@ -385,9 +379,8 @@ class TestLLMCall:
|
||||
from backend.blocks.llm import AIStructuredResponseGeneratorBlock
|
||||
from backend.data.model import APIKeyCredentials
|
||||
|
||||
with (
|
||||
patch("backend.blocks.llm.llm_call") as mock_llm_call,
|
||||
patch("backend.blocks.llm.secrets.token_hex", return_value="test123"),
|
||||
with patch("backend.blocks.llm.llm_call") as mock_llm_call, patch(
|
||||
"backend.blocks.llm.secrets.token_hex", return_value="test123"
|
||||
):
|
||||
mock_llm_call.return_value = LLMResponse(
|
||||
raw_response={},
|
||||
@@ -449,9 +442,8 @@ class TestLLMCall:
|
||||
from backend.blocks.llm import AIStructuredResponseGeneratorBlock
|
||||
from backend.data.model import APIKeyCredentials
|
||||
|
||||
with (
|
||||
patch("backend.blocks.llm.llm_call") as mock_llm_call,
|
||||
patch("backend.blocks.llm.secrets.token_hex", return_value="test123"),
|
||||
with patch("backend.blocks.llm.llm_call") as mock_llm_call, patch(
|
||||
"backend.blocks.llm.secrets.token_hex", return_value="test123"
|
||||
):
|
||||
# Return invalid JSON that will fail validation (missing required field)
|
||||
mock_llm_call.return_value = LLMResponse(
|
||||
@@ -523,21 +515,17 @@ class TestGenerateActivityStatusForExecution:
|
||||
mock_graph.links = []
|
||||
mock_db_client.get_graph.return_value = mock_graph
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.executor.activity_status_generator.get_block"
|
||||
) as mock_get_block,
|
||||
patch(
|
||||
"backend.executor.activity_status_generator.Settings"
|
||||
) as mock_settings,
|
||||
patch(
|
||||
"backend.executor.activity_status_generator.AIStructuredResponseGeneratorBlock"
|
||||
) as mock_structured_block,
|
||||
patch(
|
||||
"backend.executor.activity_status_generator.is_feature_enabled",
|
||||
return_value=True,
|
||||
),
|
||||
with patch(
|
||||
"backend.executor.activity_status_generator.get_block"
|
||||
) as mock_get_block, patch(
|
||||
"backend.executor.activity_status_generator.Settings"
|
||||
) as mock_settings, patch(
|
||||
"backend.executor.activity_status_generator.AIStructuredResponseGeneratorBlock"
|
||||
) as mock_structured_block, patch(
|
||||
"backend.executor.activity_status_generator.is_feature_enabled",
|
||||
return_value=True,
|
||||
):
|
||||
|
||||
mock_get_block.side_effect = lambda block_id: mock_blocks.get(block_id)
|
||||
mock_settings.return_value.secrets.openai_internal_api_key = "test_key"
|
||||
|
||||
@@ -545,13 +533,10 @@ class TestGenerateActivityStatusForExecution:
|
||||
mock_instance = mock_structured_block.return_value
|
||||
|
||||
async def mock_run(*args, **kwargs):
|
||||
yield (
|
||||
"response",
|
||||
{
|
||||
"activity_status": "I analyzed your data and provided the requested insights.",
|
||||
"correctness_score": 0.85,
|
||||
},
|
||||
)
|
||||
yield "response", {
|
||||
"activity_status": "I analyzed your data and provided the requested insights.",
|
||||
"correctness_score": 0.85,
|
||||
}
|
||||
|
||||
mock_instance.run = mock_run
|
||||
|
||||
@@ -601,14 +586,11 @@ class TestGenerateActivityStatusForExecution:
|
||||
"""Test activity status generation with no API key."""
|
||||
mock_db_client = AsyncMock()
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.executor.activity_status_generator.Settings"
|
||||
) as mock_settings,
|
||||
patch(
|
||||
"backend.executor.activity_status_generator.is_feature_enabled",
|
||||
return_value=True,
|
||||
),
|
||||
with patch(
|
||||
"backend.executor.activity_status_generator.Settings"
|
||||
) as mock_settings, patch(
|
||||
"backend.executor.activity_status_generator.is_feature_enabled",
|
||||
return_value=True,
|
||||
):
|
||||
mock_settings.return_value.secrets.openai_internal_api_key = ""
|
||||
|
||||
@@ -630,14 +612,11 @@ class TestGenerateActivityStatusForExecution:
|
||||
mock_db_client = AsyncMock()
|
||||
mock_db_client.get_node_executions.side_effect = Exception("Database error")
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.executor.activity_status_generator.Settings"
|
||||
) as mock_settings,
|
||||
patch(
|
||||
"backend.executor.activity_status_generator.is_feature_enabled",
|
||||
return_value=True,
|
||||
),
|
||||
with patch(
|
||||
"backend.executor.activity_status_generator.Settings"
|
||||
) as mock_settings, patch(
|
||||
"backend.executor.activity_status_generator.is_feature_enabled",
|
||||
return_value=True,
|
||||
):
|
||||
mock_settings.return_value.secrets.openai_internal_api_key = "test_key"
|
||||
|
||||
@@ -662,21 +641,17 @@ class TestGenerateActivityStatusForExecution:
|
||||
mock_db_client.get_graph_metadata.return_value = None # No metadata
|
||||
mock_db_client.get_graph.return_value = None # No graph
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.executor.activity_status_generator.get_block"
|
||||
) as mock_get_block,
|
||||
patch(
|
||||
"backend.executor.activity_status_generator.Settings"
|
||||
) as mock_settings,
|
||||
patch(
|
||||
"backend.executor.activity_status_generator.AIStructuredResponseGeneratorBlock"
|
||||
) as mock_structured_block,
|
||||
patch(
|
||||
"backend.executor.activity_status_generator.is_feature_enabled",
|
||||
return_value=True,
|
||||
),
|
||||
with patch(
|
||||
"backend.executor.activity_status_generator.get_block"
|
||||
) as mock_get_block, patch(
|
||||
"backend.executor.activity_status_generator.Settings"
|
||||
) as mock_settings, patch(
|
||||
"backend.executor.activity_status_generator.AIStructuredResponseGeneratorBlock"
|
||||
) as mock_structured_block, patch(
|
||||
"backend.executor.activity_status_generator.is_feature_enabled",
|
||||
return_value=True,
|
||||
):
|
||||
|
||||
mock_get_block.side_effect = lambda block_id: mock_blocks.get(block_id)
|
||||
mock_settings.return_value.secrets.openai_internal_api_key = "test_key"
|
||||
|
||||
@@ -684,13 +659,10 @@ class TestGenerateActivityStatusForExecution:
|
||||
mock_instance = mock_structured_block.return_value
|
||||
|
||||
async def mock_run(*args, **kwargs):
|
||||
yield (
|
||||
"response",
|
||||
{
|
||||
"activity_status": "Agent completed execution.",
|
||||
"correctness_score": 0.8,
|
||||
},
|
||||
)
|
||||
yield "response", {
|
||||
"activity_status": "Agent completed execution.",
|
||||
"correctness_score": 0.8,
|
||||
}
|
||||
|
||||
mock_instance.run = mock_run
|
||||
|
||||
@@ -732,21 +704,17 @@ class TestIntegration:
|
||||
|
||||
expected_activity = "I processed user input but failed during final output generation due to system error."
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.executor.activity_status_generator.get_block"
|
||||
) as mock_get_block,
|
||||
patch(
|
||||
"backend.executor.activity_status_generator.Settings"
|
||||
) as mock_settings,
|
||||
patch(
|
||||
"backend.executor.activity_status_generator.AIStructuredResponseGeneratorBlock"
|
||||
) as mock_structured_block,
|
||||
patch(
|
||||
"backend.executor.activity_status_generator.is_feature_enabled",
|
||||
return_value=True,
|
||||
),
|
||||
with patch(
|
||||
"backend.executor.activity_status_generator.get_block"
|
||||
) as mock_get_block, patch(
|
||||
"backend.executor.activity_status_generator.Settings"
|
||||
) as mock_settings, patch(
|
||||
"backend.executor.activity_status_generator.AIStructuredResponseGeneratorBlock"
|
||||
) as mock_structured_block, patch(
|
||||
"backend.executor.activity_status_generator.is_feature_enabled",
|
||||
return_value=True,
|
||||
):
|
||||
|
||||
mock_get_block.side_effect = lambda block_id: mock_blocks.get(block_id)
|
||||
mock_settings.return_value.secrets.openai_internal_api_key = "test_key"
|
||||
|
||||
@@ -754,13 +722,10 @@ class TestIntegration:
|
||||
mock_instance = mock_structured_block.return_value
|
||||
|
||||
async def mock_run(*args, **kwargs):
|
||||
yield (
|
||||
"response",
|
||||
{
|
||||
"activity_status": expected_activity,
|
||||
"correctness_score": 0.3, # Low score since there was a failure
|
||||
},
|
||||
)
|
||||
yield "response", {
|
||||
"activity_status": expected_activity,
|
||||
"correctness_score": 0.3, # Low score since there was a failure
|
||||
}
|
||||
|
||||
mock_instance.run = mock_run
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AutoModManager:
|
||||
|
||||
def __init__(self):
|
||||
self.config = self._load_config()
|
||||
|
||||
|
||||
@@ -35,14 +35,16 @@ async def test_handle_insufficient_funds_sends_discord_alert_first_time(
|
||||
amount=-714, # Attempting to spend $7.14
|
||||
)
|
||||
|
||||
with (
|
||||
patch("backend.executor.manager.queue_notification") as mock_queue_notif,
|
||||
patch(
|
||||
"backend.executor.manager.get_notification_manager_client"
|
||||
) as mock_get_client,
|
||||
patch("backend.executor.manager.settings") as mock_settings,
|
||||
patch("backend.executor.manager.redis") as mock_redis_module,
|
||||
):
|
||||
with patch(
|
||||
"backend.executor.manager.queue_notification"
|
||||
) as mock_queue_notif, patch(
|
||||
"backend.executor.manager.get_notification_manager_client"
|
||||
) as mock_get_client, patch(
|
||||
"backend.executor.manager.settings"
|
||||
) as mock_settings, patch(
|
||||
"backend.executor.manager.redis"
|
||||
) as mock_redis_module:
|
||||
|
||||
# Setup mocks
|
||||
mock_client = MagicMock()
|
||||
mock_get_client.return_value = mock_client
|
||||
@@ -107,14 +109,16 @@ async def test_handle_insufficient_funds_skips_duplicate_notifications(
|
||||
amount=-714,
|
||||
)
|
||||
|
||||
with (
|
||||
patch("backend.executor.manager.queue_notification") as mock_queue_notif,
|
||||
patch(
|
||||
"backend.executor.manager.get_notification_manager_client"
|
||||
) as mock_get_client,
|
||||
patch("backend.executor.manager.settings") as mock_settings,
|
||||
patch("backend.executor.manager.redis") as mock_redis_module,
|
||||
):
|
||||
with patch(
|
||||
"backend.executor.manager.queue_notification"
|
||||
) as mock_queue_notif, patch(
|
||||
"backend.executor.manager.get_notification_manager_client"
|
||||
) as mock_get_client, patch(
|
||||
"backend.executor.manager.settings"
|
||||
) as mock_settings, patch(
|
||||
"backend.executor.manager.redis"
|
||||
) as mock_redis_module:
|
||||
|
||||
# Setup mocks
|
||||
mock_client = MagicMock()
|
||||
mock_get_client.return_value = mock_client
|
||||
@@ -162,14 +166,14 @@ async def test_handle_insufficient_funds_different_agents_get_separate_alerts(
|
||||
amount=-714,
|
||||
)
|
||||
|
||||
with (
|
||||
patch("backend.executor.manager.queue_notification"),
|
||||
patch(
|
||||
"backend.executor.manager.get_notification_manager_client"
|
||||
) as mock_get_client,
|
||||
patch("backend.executor.manager.settings") as mock_settings,
|
||||
patch("backend.executor.manager.redis") as mock_redis_module,
|
||||
):
|
||||
with patch("backend.executor.manager.queue_notification"), patch(
|
||||
"backend.executor.manager.get_notification_manager_client"
|
||||
) as mock_get_client, patch(
|
||||
"backend.executor.manager.settings"
|
||||
) as mock_settings, patch(
|
||||
"backend.executor.manager.redis"
|
||||
) as mock_redis_module:
|
||||
|
||||
mock_client = MagicMock()
|
||||
mock_get_client.return_value = mock_client
|
||||
mock_settings.config.frontend_base_url = "https://test.com"
|
||||
@@ -224,6 +228,7 @@ async def test_clear_insufficient_funds_notifications(server: SpinTestServer):
|
||||
user_id = "test-user-123"
|
||||
|
||||
with patch("backend.executor.manager.redis") as mock_redis_module:
|
||||
|
||||
mock_redis_client = MagicMock()
|
||||
# get_redis_async is an async function, so we need AsyncMock for it
|
||||
mock_redis_module.get_redis_async = AsyncMock(return_value=mock_redis_client)
|
||||
@@ -259,6 +264,7 @@ async def test_clear_insufficient_funds_notifications_no_keys(server: SpinTestSe
|
||||
user_id = "test-user-no-notifications"
|
||||
|
||||
with patch("backend.executor.manager.redis") as mock_redis_module:
|
||||
|
||||
mock_redis_client = MagicMock()
|
||||
# get_redis_async is an async function, so we need AsyncMock for it
|
||||
mock_redis_module.get_redis_async = AsyncMock(return_value=mock_redis_client)
|
||||
@@ -285,6 +291,7 @@ async def test_clear_insufficient_funds_notifications_handles_redis_error(
|
||||
user_id = "test-user-redis-error"
|
||||
|
||||
with patch("backend.executor.manager.redis") as mock_redis_module:
|
||||
|
||||
# Mock get_redis_async to raise an error
|
||||
mock_redis_module.get_redis_async = AsyncMock(
|
||||
side_effect=Exception("Redis connection failed")
|
||||
@@ -313,14 +320,16 @@ async def test_handle_insufficient_funds_continues_on_redis_error(
|
||||
amount=-714,
|
||||
)
|
||||
|
||||
with (
|
||||
patch("backend.executor.manager.queue_notification") as mock_queue_notif,
|
||||
patch(
|
||||
"backend.executor.manager.get_notification_manager_client"
|
||||
) as mock_get_client,
|
||||
patch("backend.executor.manager.settings") as mock_settings,
|
||||
patch("backend.executor.manager.redis") as mock_redis_module,
|
||||
):
|
||||
with patch(
|
||||
"backend.executor.manager.queue_notification"
|
||||
) as mock_queue_notif, patch(
|
||||
"backend.executor.manager.get_notification_manager_client"
|
||||
) as mock_get_client, patch(
|
||||
"backend.executor.manager.settings"
|
||||
) as mock_settings, patch(
|
||||
"backend.executor.manager.redis"
|
||||
) as mock_redis_module:
|
||||
|
||||
mock_client = MagicMock()
|
||||
mock_get_client.return_value = mock_client
|
||||
mock_settings.config.frontend_base_url = "https://test.com"
|
||||
@@ -360,10 +369,10 @@ async def test_add_transaction_clears_notifications_on_grant(server: SpinTestSer
|
||||
|
||||
user_id = "test-user-grant-clear"
|
||||
|
||||
with (
|
||||
patch("backend.data.credit.query_raw_with_schema") as mock_query,
|
||||
patch("backend.executor.manager.redis") as mock_redis_module,
|
||||
):
|
||||
with patch("backend.data.credit.query_raw_with_schema") as mock_query, patch(
|
||||
"backend.executor.manager.redis"
|
||||
) as mock_redis_module:
|
||||
|
||||
# Mock the query to return a successful transaction
|
||||
mock_query.return_value = [{"balance": 1000, "transactionKey": "test-tx-key"}]
|
||||
|
||||
@@ -402,10 +411,10 @@ async def test_add_transaction_clears_notifications_on_top_up(server: SpinTestSe
|
||||
|
||||
user_id = "test-user-topup-clear"
|
||||
|
||||
with (
|
||||
patch("backend.data.credit.query_raw_with_schema") as mock_query,
|
||||
patch("backend.executor.manager.redis") as mock_redis_module,
|
||||
):
|
||||
with patch("backend.data.credit.query_raw_with_schema") as mock_query, patch(
|
||||
"backend.executor.manager.redis"
|
||||
) as mock_redis_module:
|
||||
|
||||
# Mock the query to return a successful transaction
|
||||
mock_query.return_value = [{"balance": 2000, "transactionKey": "test-tx-key-2"}]
|
||||
|
||||
@@ -440,10 +449,10 @@ async def test_add_transaction_skips_clearing_for_inactive_transaction(
|
||||
|
||||
user_id = "test-user-inactive"
|
||||
|
||||
with (
|
||||
patch("backend.data.credit.query_raw_with_schema") as mock_query,
|
||||
patch("backend.executor.manager.redis") as mock_redis_module,
|
||||
):
|
||||
with patch("backend.data.credit.query_raw_with_schema") as mock_query, patch(
|
||||
"backend.executor.manager.redis"
|
||||
) as mock_redis_module:
|
||||
|
||||
# Mock the query to return a successful transaction
|
||||
mock_query.return_value = [{"balance": 500, "transactionKey": "test-tx-key-3"}]
|
||||
|
||||
@@ -476,10 +485,10 @@ async def test_add_transaction_skips_clearing_for_usage_transaction(
|
||||
|
||||
user_id = "test-user-usage"
|
||||
|
||||
with (
|
||||
patch("backend.data.credit.query_raw_with_schema") as mock_query,
|
||||
patch("backend.executor.manager.redis") as mock_redis_module,
|
||||
):
|
||||
with patch("backend.data.credit.query_raw_with_schema") as mock_query, patch(
|
||||
"backend.executor.manager.redis"
|
||||
) as mock_redis_module:
|
||||
|
||||
# Mock the query to return a successful transaction
|
||||
mock_query.return_value = [{"balance": 400, "transactionKey": "test-tx-key-4"}]
|
||||
|
||||
@@ -510,11 +519,10 @@ async def test_enable_transaction_clears_notifications(server: SpinTestServer):
|
||||
|
||||
user_id = "test-user-enable"
|
||||
|
||||
with (
|
||||
patch("backend.data.credit.CreditTransaction") as mock_credit_tx,
|
||||
patch("backend.data.credit.query_raw_with_schema") as mock_query,
|
||||
patch("backend.executor.manager.redis") as mock_redis_module,
|
||||
):
|
||||
with patch("backend.data.credit.CreditTransaction") as mock_credit_tx, patch(
|
||||
"backend.data.credit.query_raw_with_schema"
|
||||
) as mock_query, patch("backend.executor.manager.redis") as mock_redis_module:
|
||||
|
||||
# Mock finding the pending transaction
|
||||
mock_transaction = MagicMock()
|
||||
mock_transaction.amount = 1000
|
||||
|
||||
@@ -18,13 +18,14 @@ async def test_handle_low_balance_threshold_crossing(server: SpinTestServer):
|
||||
transaction_cost = 600 # $6 transaction
|
||||
|
||||
# Mock dependencies
|
||||
with (
|
||||
patch("backend.executor.manager.queue_notification") as mock_queue_notif,
|
||||
patch(
|
||||
"backend.executor.manager.get_notification_manager_client"
|
||||
) as mock_get_client,
|
||||
patch("backend.executor.manager.settings") as mock_settings,
|
||||
):
|
||||
with patch(
|
||||
"backend.executor.manager.queue_notification"
|
||||
) as mock_queue_notif, patch(
|
||||
"backend.executor.manager.get_notification_manager_client"
|
||||
) as mock_get_client, patch(
|
||||
"backend.executor.manager.settings"
|
||||
) as mock_settings:
|
||||
|
||||
# Setup mocks
|
||||
mock_client = MagicMock()
|
||||
mock_get_client.return_value = mock_client
|
||||
@@ -76,13 +77,14 @@ async def test_handle_low_balance_no_notification_when_not_crossing(
|
||||
)
|
||||
|
||||
# Mock dependencies
|
||||
with (
|
||||
patch("backend.executor.manager.queue_notification") as mock_queue_notif,
|
||||
patch(
|
||||
"backend.executor.manager.get_notification_manager_client"
|
||||
) as mock_get_client,
|
||||
patch("backend.executor.manager.settings") as mock_settings,
|
||||
):
|
||||
with patch(
|
||||
"backend.executor.manager.queue_notification"
|
||||
) as mock_queue_notif, patch(
|
||||
"backend.executor.manager.get_notification_manager_client"
|
||||
) as mock_get_client, patch(
|
||||
"backend.executor.manager.settings"
|
||||
) as mock_settings:
|
||||
|
||||
# Setup mocks
|
||||
mock_client = MagicMock()
|
||||
mock_get_client.return_value = mock_client
|
||||
@@ -118,13 +120,14 @@ async def test_handle_low_balance_no_duplicate_when_already_below(
|
||||
)
|
||||
|
||||
# Mock dependencies
|
||||
with (
|
||||
patch("backend.executor.manager.queue_notification") as mock_queue_notif,
|
||||
patch(
|
||||
"backend.executor.manager.get_notification_manager_client"
|
||||
) as mock_get_client,
|
||||
patch("backend.executor.manager.settings") as mock_settings,
|
||||
):
|
||||
with patch(
|
||||
"backend.executor.manager.queue_notification"
|
||||
) as mock_queue_notif, patch(
|
||||
"backend.executor.manager.get_notification_manager_client"
|
||||
) as mock_get_client, patch(
|
||||
"backend.executor.manager.settings"
|
||||
) as mock_settings:
|
||||
|
||||
# Setup mocks
|
||||
mock_client = MagicMock()
|
||||
mock_get_client.return_value = mock_client
|
||||
|
||||
@@ -92,12 +92,12 @@ async def assert_sample_graph_executions(
|
||||
logger.info(f"Checking first StoreValueBlock execution: {exec}")
|
||||
assert exec.status == execution.ExecutionStatus.COMPLETED
|
||||
assert exec.graph_exec_id == graph_exec_id
|
||||
assert exec.output_data in output_list, (
|
||||
f"Output data: {exec.output_data} and {output_list}"
|
||||
)
|
||||
assert exec.input_data in input_list, (
|
||||
f"Input data: {exec.input_data} and {input_list}"
|
||||
)
|
||||
assert (
|
||||
exec.output_data in output_list
|
||||
), f"Output data: {exec.output_data} and {output_list}"
|
||||
assert (
|
||||
exec.input_data in input_list
|
||||
), f"Input data: {exec.input_data} and {input_list}"
|
||||
assert exec.node_id in [test_graph.nodes[0].id, test_graph.nodes[1].id]
|
||||
|
||||
# Executing StoreValueBlock
|
||||
@@ -105,12 +105,12 @@ async def assert_sample_graph_executions(
|
||||
logger.info(f"Checking second StoreValueBlock execution: {exec}")
|
||||
assert exec.status == execution.ExecutionStatus.COMPLETED
|
||||
assert exec.graph_exec_id == graph_exec_id
|
||||
assert exec.output_data in output_list, (
|
||||
f"Output data: {exec.output_data} and {output_list}"
|
||||
)
|
||||
assert exec.input_data in input_list, (
|
||||
f"Input data: {exec.input_data} and {input_list}"
|
||||
)
|
||||
assert (
|
||||
exec.output_data in output_list
|
||||
), f"Output data: {exec.output_data} and {output_list}"
|
||||
assert (
|
||||
exec.input_data in input_list
|
||||
), f"Input data: {exec.input_data} and {input_list}"
|
||||
assert exec.node_id in [test_graph.nodes[0].id, test_graph.nodes[1].id]
|
||||
|
||||
# Executing FillTextTemplateBlock
|
||||
@@ -301,7 +301,7 @@ async def test_static_input_link_on_graph(server: SpinTestServer):
|
||||
assert len(graph_exec.node_executions) == 8
|
||||
# The last 3 executions will be a+b=4+5=9
|
||||
for i, exec_data in enumerate(graph_exec.node_executions[-3:]):
|
||||
logger.info(f"Checking execution {i + 1} of last 3: {exec_data}")
|
||||
logger.info(f"Checking execution {i+1} of last 3: {exec_data}")
|
||||
assert exec_data.status == execution.ExecutionStatus.COMPLETED
|
||||
assert exec_data.output_data == {"result": [9]}
|
||||
logger.info("Completed test_static_input_link_on_graph")
|
||||
|
||||
@@ -292,9 +292,9 @@ async def _validate_node_input_credentials(
|
||||
if node.credentials_optional:
|
||||
continue # Don't add error, will be marked for skip after loop
|
||||
else:
|
||||
credential_errors[node.id][field_name] = (
|
||||
"These credentials are required"
|
||||
)
|
||||
credential_errors[node.id][
|
||||
field_name
|
||||
] = "These credentials are required"
|
||||
continue
|
||||
|
||||
credentials_meta = credentials_meta_type.model_validate(field_value)
|
||||
@@ -313,15 +313,15 @@ async def _validate_node_input_credentials(
|
||||
except Exception as e:
|
||||
# Handle any errors fetching credentials
|
||||
# If credentials were explicitly configured but unavailable, it's an error
|
||||
credential_errors[node.id][field_name] = (
|
||||
f"Credentials not available: {e}"
|
||||
)
|
||||
credential_errors[node.id][
|
||||
field_name
|
||||
] = f"Credentials not available: {e}"
|
||||
continue
|
||||
|
||||
if not credentials:
|
||||
credential_errors[node.id][field_name] = (
|
||||
f"Unknown credentials #{credentials_meta.id}"
|
||||
)
|
||||
credential_errors[node.id][
|
||||
field_name
|
||||
] = f"Unknown credentials #{credentials_meta.id}"
|
||||
continue
|
||||
|
||||
if (
|
||||
@@ -334,9 +334,9 @@ async def _validate_node_input_credentials(
|
||||
f"{credentials_meta.type}<>{credentials.type};"
|
||||
f"{credentials_meta.provider}<>{credentials.provider}"
|
||||
)
|
||||
credential_errors[node.id][field_name] = (
|
||||
"Invalid credentials: type/provider mismatch"
|
||||
)
|
||||
credential_errors[node.id][
|
||||
field_name
|
||||
] = "Invalid credentials: type/provider mismatch"
|
||||
continue
|
||||
|
||||
# If node has optional credentials and any are missing, mark for skipping
|
||||
@@ -410,10 +410,9 @@ async def validate_graph_with_credentials(
|
||||
)
|
||||
|
||||
# Get credential input/availability/validation errors and nodes to skip
|
||||
(
|
||||
node_credential_input_errors,
|
||||
nodes_to_skip,
|
||||
) = await _validate_node_input_credentials(graph, user_id, nodes_input_masks)
|
||||
node_credential_input_errors, nodes_to_skip = (
|
||||
await _validate_node_input_credentials(graph, user_id, nodes_input_masks)
|
||||
)
|
||||
|
||||
# Merge credential errors with structural errors
|
||||
for node_id, field_errors in node_credential_input_errors.items():
|
||||
@@ -561,14 +560,13 @@ async def validate_and_construct_node_execution_input(
|
||||
nodes_input_masks or {},
|
||||
)
|
||||
|
||||
(
|
||||
starting_nodes_input,
|
||||
nodes_to_skip,
|
||||
) = await _construct_starting_node_execution_input(
|
||||
graph=graph,
|
||||
user_id=user_id,
|
||||
graph_inputs=graph_inputs,
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
starting_nodes_input, nodes_to_skip = (
|
||||
await _construct_starting_node_execution_input(
|
||||
graph=graph,
|
||||
user_id=user_id,
|
||||
graph_inputs=graph_inputs,
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
)
|
||||
)
|
||||
|
||||
return graph, starting_nodes_input, nodes_input_masks, nodes_to_skip
|
||||
@@ -632,7 +630,8 @@ def create_execution_queue_config() -> RabbitMQConfig:
|
||||
# Solution: Disable consumer timeout entirely - let graphs run indefinitely
|
||||
# Safety: Heartbeat mechanism now handles dead consumer detection instead
|
||||
# Use case: Graph executions that take hours to complete (AI model training, etc.)
|
||||
"x-consumer-timeout": GRACEFUL_SHUTDOWN_TIMEOUT_SECONDS * 1000,
|
||||
"x-consumer-timeout": GRACEFUL_SHUTDOWN_TIMEOUT_SECONDS
|
||||
* 1000,
|
||||
},
|
||||
)
|
||||
cancel_queue = Queue(
|
||||
@@ -858,19 +857,16 @@ async def add_graph_execution(
|
||||
)
|
||||
|
||||
# Create new execution
|
||||
(
|
||||
graph,
|
||||
starting_nodes_input,
|
||||
compiled_nodes_input_masks,
|
||||
nodes_to_skip,
|
||||
) = await validate_and_construct_node_execution_input(
|
||||
graph_id=graph_id,
|
||||
user_id=user_id,
|
||||
graph_inputs=inputs or {},
|
||||
graph_version=graph_version,
|
||||
graph_credentials_inputs=graph_credentials_inputs,
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
is_sub_graph=parent_exec_id is not None,
|
||||
graph, starting_nodes_input, compiled_nodes_input_masks, nodes_to_skip = (
|
||||
await validate_and_construct_node_execution_input(
|
||||
graph_id=graph_id,
|
||||
user_id=user_id,
|
||||
graph_inputs=inputs or {},
|
||||
graph_version=graph_version,
|
||||
graph_credentials_inputs=graph_credentials_inputs,
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
is_sub_graph=parent_exec_id is not None,
|
||||
)
|
||||
)
|
||||
|
||||
graph_exec = await edb.create_graph_execution(
|
||||
|
||||
@@ -486,6 +486,7 @@ class IntegrationCredentialsStore:
|
||||
user_integrations.oauth_states.append(state)
|
||||
|
||||
async with await self.locked_user_integrations(user_id):
|
||||
|
||||
user_integrations = await self._get_user_integrations(user_id)
|
||||
oauth_states = user_integrations.oauth_states
|
||||
oauth_states.append(state)
|
||||
|
||||
@@ -140,7 +140,8 @@ class IntegrationCredentialsManager:
|
||||
oauth_handler = await _get_provider_oauth_handler(credentials.provider)
|
||||
if oauth_handler.needs_refresh(credentials):
|
||||
logger.debug(
|
||||
f"Refreshing '{credentials.provider}' credentials #{credentials.id}"
|
||||
f"Refreshing '{credentials.provider}' "
|
||||
f"credentials #{credentials.id}"
|
||||
)
|
||||
_lock = None
|
||||
if lock:
|
||||
|
||||
@@ -77,23 +77,18 @@ class TestNotificationErrorHandling:
|
||||
self, notification_manager, sample_batch_event
|
||||
):
|
||||
"""Test that 406 inactive recipient error stops ALL processing for that user."""
|
||||
with (
|
||||
patch("backend.notifications.notifications.logger"),
|
||||
patch(
|
||||
"backend.notifications.notifications.set_user_email_verification",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_set_verification,
|
||||
patch(
|
||||
"backend.notifications.notifications.disable_all_user_notifications",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_disable_all,
|
||||
patch(
|
||||
"backend.notifications.notifications.get_database_manager_async_client"
|
||||
) as mock_db_client,
|
||||
patch(
|
||||
"backend.notifications.notifications.generate_unsubscribe_link"
|
||||
) as mock_unsub_link,
|
||||
):
|
||||
with patch("backend.notifications.notifications.logger"), patch(
|
||||
"backend.notifications.notifications.set_user_email_verification",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_set_verification, patch(
|
||||
"backend.notifications.notifications.disable_all_user_notifications",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_disable_all, patch(
|
||||
"backend.notifications.notifications.get_database_manager_async_client"
|
||||
) as mock_db_client, patch(
|
||||
"backend.notifications.notifications.generate_unsubscribe_link"
|
||||
) as mock_unsub_link:
|
||||
|
||||
# Create batch of 5 notifications
|
||||
notifications = []
|
||||
for i in range(5):
|
||||
@@ -174,15 +169,12 @@ class TestNotificationErrorHandling:
|
||||
self, notification_manager, sample_batch_event
|
||||
):
|
||||
"""Test that 422 error permanently removes the malformed notification from batch and continues with others."""
|
||||
with (
|
||||
patch("backend.notifications.notifications.logger") as mock_logger,
|
||||
patch(
|
||||
"backend.notifications.notifications.get_database_manager_async_client"
|
||||
) as mock_db_client,
|
||||
patch(
|
||||
"backend.notifications.notifications.generate_unsubscribe_link"
|
||||
) as mock_unsub_link,
|
||||
):
|
||||
with patch("backend.notifications.notifications.logger") as mock_logger, patch(
|
||||
"backend.notifications.notifications.get_database_manager_async_client"
|
||||
) as mock_db_client, patch(
|
||||
"backend.notifications.notifications.generate_unsubscribe_link"
|
||||
) as mock_unsub_link:
|
||||
|
||||
# Create batch of 5 notifications
|
||||
notifications = []
|
||||
for i in range(5):
|
||||
@@ -280,15 +272,12 @@ class TestNotificationErrorHandling:
|
||||
self, notification_manager, sample_batch_event
|
||||
):
|
||||
"""Test that oversized notifications are permanently removed from batch but others continue."""
|
||||
with (
|
||||
patch("backend.notifications.notifications.logger") as mock_logger,
|
||||
patch(
|
||||
"backend.notifications.notifications.get_database_manager_async_client"
|
||||
) as mock_db_client,
|
||||
patch(
|
||||
"backend.notifications.notifications.generate_unsubscribe_link"
|
||||
) as mock_unsub_link,
|
||||
):
|
||||
with patch("backend.notifications.notifications.logger") as mock_logger, patch(
|
||||
"backend.notifications.notifications.get_database_manager_async_client"
|
||||
) as mock_db_client, patch(
|
||||
"backend.notifications.notifications.generate_unsubscribe_link"
|
||||
) as mock_unsub_link:
|
||||
|
||||
# Create batch of 5 notifications
|
||||
notifications = []
|
||||
for i in range(5):
|
||||
@@ -393,15 +382,12 @@ class TestNotificationErrorHandling:
|
||||
self, notification_manager, sample_batch_event
|
||||
):
|
||||
"""Test that generic API errors keep notifications in batch for retry while others continue."""
|
||||
with (
|
||||
patch("backend.notifications.notifications.logger") as mock_logger,
|
||||
patch(
|
||||
"backend.notifications.notifications.get_database_manager_async_client"
|
||||
) as mock_db_client,
|
||||
patch(
|
||||
"backend.notifications.notifications.generate_unsubscribe_link"
|
||||
) as mock_unsub_link,
|
||||
):
|
||||
with patch("backend.notifications.notifications.logger") as mock_logger, patch(
|
||||
"backend.notifications.notifications.get_database_manager_async_client"
|
||||
) as mock_db_client, patch(
|
||||
"backend.notifications.notifications.generate_unsubscribe_link"
|
||||
) as mock_unsub_link:
|
||||
|
||||
# Create batch of 5 notifications
|
||||
notifications = []
|
||||
for i in range(5):
|
||||
@@ -513,15 +499,12 @@ class TestNotificationErrorHandling:
|
||||
self, notification_manager, sample_batch_event
|
||||
):
|
||||
"""Test successful batch processing where all notifications are sent without errors."""
|
||||
with (
|
||||
patch("backend.notifications.notifications.logger") as mock_logger,
|
||||
patch(
|
||||
"backend.notifications.notifications.get_database_manager_async_client"
|
||||
) as mock_db_client,
|
||||
patch(
|
||||
"backend.notifications.notifications.generate_unsubscribe_link"
|
||||
) as mock_unsub_link,
|
||||
):
|
||||
with patch("backend.notifications.notifications.logger") as mock_logger, patch(
|
||||
"backend.notifications.notifications.get_database_manager_async_client"
|
||||
) as mock_db_client, patch(
|
||||
"backend.notifications.notifications.generate_unsubscribe_link"
|
||||
) as mock_unsub_link:
|
||||
|
||||
# Create batch of 5 notifications
|
||||
notifications = []
|
||||
for i in range(5):
|
||||
|
||||
@@ -6,7 +6,7 @@ Usage: from backend.sdk import *
|
||||
|
||||
This module provides:
|
||||
- All block base classes and types
|
||||
- All credential and authentication components
|
||||
- All credential and authentication components
|
||||
- All cost tracking components
|
||||
- All webhook components
|
||||
- All utility functions
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""
|
||||
Integration between SDK provider costs and the execution cost system.
|
||||
|
||||
This module provides the glue between provider-defined base costs and the
|
||||
This module provides the glue between provider-defined base costs and the
|
||||
BLOCK_COSTS configuration used by the execution system.
|
||||
"""
|
||||
|
||||
|
||||
@@ -91,6 +91,7 @@ class AutoRegistry:
|
||||
not hasattr(provider.webhook_manager, "PROVIDER_NAME")
|
||||
or provider.webhook_manager.PROVIDER_NAME is None
|
||||
):
|
||||
|
||||
# This works because ProviderName has _missing_ method
|
||||
provider.webhook_manager.PROVIDER_NAME = ProviderName(provider.name)
|
||||
cls._webhook_managers[provider.name] = provider.webhook_manager
|
||||
|
||||
@@ -168,9 +168,7 @@ def async_error_logged() -> Callable[
|
||||
]: ...
|
||||
|
||||
|
||||
def async_error_logged(
|
||||
*, swallow: bool = True
|
||||
) -> (
|
||||
def async_error_logged(*, swallow: bool = True) -> (
|
||||
Callable[
|
||||
[Callable[P, Coroutine[Any, Any, T]]],
|
||||
Callable[P, Coroutine[Any, Any, T | None]],
|
||||
|
||||
@@ -3,7 +3,7 @@ Utilities for handling dynamic field names and delimiters in the AutoGPT Platfor
|
||||
|
||||
Dynamic fields allow graphs to connect complex data structures using special delimiters:
|
||||
- _#_ for dictionary keys (e.g., "values_#_name" → values["name"])
|
||||
- _$_ for list indices (e.g., "items_$_0" → items[0])
|
||||
- _$_ for list indices (e.g., "items_$_0" → items[0])
|
||||
- _@_ for object attributes (e.g., "obj_@_attr" → obj.attr)
|
||||
|
||||
This module provides utilities for:
|
||||
|
||||
@@ -33,11 +33,14 @@ class TestFileCloudIntegration:
|
||||
cloud_path = "gcs://test-bucket/uploads/456/source.txt"
|
||||
cloud_content = b"cloud file content"
|
||||
|
||||
with (
|
||||
patch("backend.util.file.get_cloud_storage_handler") as mock_handler_getter,
|
||||
patch("backend.util.file.scan_content_safe") as mock_scan,
|
||||
patch("backend.util.file.Path") as mock_path_class,
|
||||
):
|
||||
with patch(
|
||||
"backend.util.file.get_cloud_storage_handler"
|
||||
) as mock_handler_getter, patch(
|
||||
"backend.util.file.scan_content_safe"
|
||||
) as mock_scan, patch(
|
||||
"backend.util.file.Path"
|
||||
) as mock_path_class:
|
||||
|
||||
# Mock cloud storage handler
|
||||
mock_handler = MagicMock()
|
||||
mock_handler.is_cloud_path.return_value = True
|
||||
@@ -107,13 +110,18 @@ class TestFileCloudIntegration:
|
||||
cloud_path = "gcs://test-bucket/uploads/456/image.png"
|
||||
cloud_content = b"\\x89PNG\\r\\n\\x1a\\n\\x00\\x00\\x00\\rIHDR" # PNG header
|
||||
|
||||
with (
|
||||
patch("backend.util.file.get_cloud_storage_handler") as mock_handler_getter,
|
||||
patch("backend.util.file.scan_content_safe") as mock_scan,
|
||||
patch("backend.util.file.get_mime_type") as mock_mime,
|
||||
patch("backend.util.file.base64.b64encode") as mock_b64,
|
||||
patch("backend.util.file.Path") as mock_path_class,
|
||||
):
|
||||
with patch(
|
||||
"backend.util.file.get_cloud_storage_handler"
|
||||
) as mock_handler_getter, patch(
|
||||
"backend.util.file.scan_content_safe"
|
||||
) as mock_scan, patch(
|
||||
"backend.util.file.get_mime_type"
|
||||
) as mock_mime, patch(
|
||||
"backend.util.file.base64.b64encode"
|
||||
) as mock_b64, patch(
|
||||
"backend.util.file.Path"
|
||||
) as mock_path_class:
|
||||
|
||||
# Mock cloud storage handler
|
||||
mock_handler = MagicMock()
|
||||
mock_handler.is_cloud_path.return_value = True
|
||||
@@ -161,13 +169,18 @@ class TestFileCloudIntegration:
|
||||
graph_exec_id = "test-exec-123"
|
||||
data_uri = "data:text/plain;base64,SGVsbG8gd29ybGQ="
|
||||
|
||||
with (
|
||||
patch("backend.util.file.get_cloud_storage_handler") as mock_handler_getter,
|
||||
patch("backend.util.file.scan_content_safe") as mock_scan,
|
||||
patch("backend.util.file.base64.b64decode") as mock_b64decode,
|
||||
patch("backend.util.file.uuid.uuid4") as mock_uuid,
|
||||
patch("backend.util.file.Path") as mock_path_class,
|
||||
):
|
||||
with patch(
|
||||
"backend.util.file.get_cloud_storage_handler"
|
||||
) as mock_handler_getter, patch(
|
||||
"backend.util.file.scan_content_safe"
|
||||
) as mock_scan, patch(
|
||||
"backend.util.file.base64.b64decode"
|
||||
) as mock_b64decode, patch(
|
||||
"backend.util.file.uuid.uuid4"
|
||||
) as mock_uuid, patch(
|
||||
"backend.util.file.Path"
|
||||
) as mock_path_class:
|
||||
|
||||
# Mock cloud storage handler
|
||||
mock_handler = MagicMock()
|
||||
mock_handler.is_cloud_path.return_value = False
|
||||
@@ -217,6 +230,7 @@ class TestFileCloudIntegration:
|
||||
with patch(
|
||||
"backend.util.file.get_cloud_storage_handler"
|
||||
) as mock_handler_getter:
|
||||
|
||||
# Mock cloud storage handler to raise error
|
||||
mock_handler = AsyncMock()
|
||||
mock_handler.is_cloud_path.return_value = True
|
||||
@@ -241,11 +255,14 @@ class TestFileCloudIntegration:
|
||||
local_file = "test_video.mp4"
|
||||
file_content = b"fake video content"
|
||||
|
||||
with (
|
||||
patch("backend.util.file.get_cloud_storage_handler") as mock_handler_getter,
|
||||
patch("backend.util.file.scan_content_safe") as mock_scan,
|
||||
patch("backend.util.file.Path") as mock_path_class,
|
||||
):
|
||||
with patch(
|
||||
"backend.util.file.get_cloud_storage_handler"
|
||||
) as mock_handler_getter, patch(
|
||||
"backend.util.file.scan_content_safe"
|
||||
) as mock_scan, patch(
|
||||
"backend.util.file.Path"
|
||||
) as mock_path_class:
|
||||
|
||||
# Mock cloud storage handler - not a cloud path
|
||||
mock_handler = MagicMock()
|
||||
mock_handler.is_cloud_path.return_value = False
|
||||
@@ -290,11 +307,14 @@ class TestFileCloudIntegration:
|
||||
local_file = "infected.exe"
|
||||
file_content = b"malicious content"
|
||||
|
||||
with (
|
||||
patch("backend.util.file.get_cloud_storage_handler") as mock_handler_getter,
|
||||
patch("backend.util.file.scan_content_safe") as mock_scan,
|
||||
patch("backend.util.file.Path") as mock_path_class,
|
||||
):
|
||||
with patch(
|
||||
"backend.util.file.get_cloud_storage_handler"
|
||||
) as mock_handler_getter, patch(
|
||||
"backend.util.file.scan_content_safe"
|
||||
) as mock_scan, patch(
|
||||
"backend.util.file.Path"
|
||||
) as mock_path_class:
|
||||
|
||||
# Mock cloud storage handler - not a cloud path
|
||||
mock_handler = MagicMock()
|
||||
mock_handler.is_cloud_path.return_value = False
|
||||
|
||||
@@ -500,6 +500,7 @@ class Requests:
|
||||
json=json,
|
||||
**kwargs,
|
||||
) as response:
|
||||
|
||||
if self.raise_for_status:
|
||||
try:
|
||||
response.raise_for_status()
|
||||
|
||||
@@ -558,6 +558,7 @@ def get_service_client(
|
||||
self._connection_failure_count >= 3
|
||||
and current_time - self._last_client_reset > 30
|
||||
):
|
||||
|
||||
logger.warning(
|
||||
f"Connection failures detected ({self._connection_failure_count}), recreating HTTP clients"
|
||||
)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user