From 211478bb29b6c6b725bfeb575bbb5ee9251f9055 Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Thu, 12 Feb 2026 08:25:22 +0400 Subject: [PATCH] Revert "style: run ruff format and isort" This reverts commit 40b58807ab142a0024f853c52b2aba7aa03b7af3. --- .../backend/api/features/analytics_test.py | 12 +- .../backend/api/features/chat/model_test.py | 18 +- .../api/features/chat/sdk/security_hooks.py | 3 +- .../backend/api/features/chat/sdk/service.py | 4 +- .../api/features/chat/sdk/tool_adapter.py | 3 +- .../backend/api/features/chat/service.py | 12 +- .../api/features/chat/stream_registry.py | 4 +- .../api/features/chat/tools/run_block.py | 7 +- .../backend/api/features/library/db_test.py | 4 +- .../backend/api/features/postmark/postmark.py | 2 +- .../api/features/store/content_handlers.py | 6 +- .../backend/api/features/store/media_test.py | 6 +- .../backend/backend/api/features/v1_test.py | 40 +- .../backend/backend/api/test_helpers.py | 12 +- .../backend/api/utils/api_key_auth_test.py | 7 +- .../backend/backend/api/ws_api_test.py | 18 +- .../blocks/ai_image_generator_block.py | 4 +- .../backend/blocks/ai_music_generator.py | 4 +- .../backend/backend/blocks/airtable/_api.py | 30 +- .../backend/blocks/airtable/_api_test.py | 42 +- .../blocks/ayrshare/post_to_bluesky.py | 10 +- .../blocks/ayrshare/post_to_facebook.py | 5 +- .../backend/blocks/ayrshare/post_to_gmb.py | 10 +- .../blocks/ayrshare/post_to_instagram.py | 45 +- .../blocks/ayrshare/post_to_linkedin.py | 20 +- .../blocks/ayrshare/post_to_pinterest.py | 25 +- .../blocks/ayrshare/post_to_snapchat.py | 10 +- .../blocks/ayrshare/post_to_telegram.py | 5 +- .../blocks/ayrshare/post_to_threads.py | 20 +- .../backend/blocks/ayrshare/post_to_tiktok.py | 30 +- .../backend/blocks/ayrshare/post_to_x.py | 30 +- .../blocks/ayrshare/post_to_youtube.py | 40 +- .../backend/backend/blocks/basic.py | 11 +- .../backend/blocks/data_manipulation.py | 9 +- .../blocks/dataforseo/keyword_suggestions.py | 6 +- .../backend/backend/blocks/email_block.py | 78 +- .../backend/blocks/enrichlayer/linkedin.py | 4 +- .../backend/backend/blocks/exa/helpers.py | 4 +- .../backend/backend/blocks/exa/websets.py | 19 +- .../backend/blocks/exa/websets_polling.py | 14 +- .../backend/backend/blocks/flux_kontext.py | 4 +- .../backend/backend/blocks/github/issues.py | 4 +- .../backend/blocks/github/pull_requests.py | 12 +- .../backend/backend/blocks/google/docs.py | 24 +- .../backend/backend/blocks/google/gmail.py | 193 ++--- .../backend/backend/blocks/google/sheets.py | 693 ++++++++---------- .../backend/backend/blocks/ideogram.py | 8 +- autogpt_platform/backend/backend/blocks/io.py | 18 +- .../backend/backend/blocks/linear/_api.py | 1 + .../backend/backend/blocks/llm.py | 1 + .../backend/backend/blocks/persistence.py | 13 +- .../backend/backend/blocks/pinecone.py | 11 +- .../backend/backend/blocks/reddit.py | 11 +- .../backend/blocks/replicate/flux_advanced.py | 4 +- .../backend/backend/blocks/slant3d/base.py | 2 +- .../backend/blocks/smart_decision_maker.py | 10 +- .../backend/blocks/smartlead/campaign.py | 38 +- .../backend/blocks/stagehand/blocks.py | 6 +- .../backend/backend/blocks/test/test_block.py | 6 +- .../blocks/test/test_smart_decision_maker.py | 214 +++--- .../test/test_smart_decision_maker_dict.py | 6 +- ...est_smart_decision_maker_dynamic_fields.py | 19 +- .../backend/backend/blocks/time_blocks.py | 61 +- .../backend/blocks/todoist/projects.py | 6 +- .../backend/blocks/todoist/sections.py | 2 +- .../backend/backend/blocks/todoist/tasks.py | 4 +- .../backend/backend/blocks/twitter/_types.py | 1 + .../blocks/twitter/lists/manage_lists.py | 2 +- .../blocks/twitter/tweets/tweet_lookup.py | 1 + .../backend/backend/blocks/video/narration.py | 3 +- .../backend/backend/blocks/youtube.py | 4 +- .../blocks/zerobounce/validate_emails.py | 30 +- autogpt_platform/backend/backend/check_db.py | 6 +- .../backend/backend/cli/oauth_tool.py | 20 +- .../backend/backend/data/credit.py | 11 +- .../backend/data/credit_ceiling_test.py | 18 +- .../backend/data/credit_concurrency_test.py | 84 +-- .../backend/data/credit_refund_test.py | 42 +- .../backend/data/credit_underflow_test.py | 100 +-- .../credit_user_balance_migration_test.py | 42 +- .../backend/backend/data/graph.py | 7 +- .../backend/backend/data/model.py | 1 + .../backend/backend/data/workspace.py | 3 +- .../activity_status_generator_test.py | 153 ++-- .../backend/executor/automod/manager.py | 1 + .../manager_insufficient_funds_test.py | 114 +-- .../executor/manager_low_balance_test.py | 45 +- .../backend/backend/executor/manager_test.py | 26 +- .../backend/backend/executor/utils.py | 72 +- .../backend/integrations/credentials_store.py | 1 + .../backend/integrations/creds_manager.py | 3 +- .../notifications/test_notifications.py | 89 +-- .../backend/backend/sdk/__init__.py | 2 +- .../backend/backend/sdk/cost_integration.py | 2 +- .../backend/backend/sdk/registry.py | 1 + .../backend/backend/util/decorator.py | 4 +- .../backend/backend/util/dynamic_fields.py | 2 +- .../backend/backend/util/file_test.py | 78 +- .../backend/backend/util/request.py | 1 + .../backend/backend/util/service.py | 1 + .../backend/backend/util/service_test.py | 49 +- .../backend/backend/util/test_json.py | 80 +- .../scripts/test_generate_block_docs.py | 1 - .../agent_generator/test_core_integration.py | 99 ++- .../test/sdk/test_sdk_block_creation.py | 26 +- .../backend/test/sdk/test_sdk_webhooks.py | 5 +- .../backend/test_requeue_integration.py | 48 +- 107 files changed, 1403 insertions(+), 1879 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/analytics_test.py b/autogpt_platform/backend/backend/api/features/analytics_test.py index 727c5f4441..2493bdb7e4 100644 --- a/autogpt_platform/backend/backend/api/features/analytics_test.py +++ b/autogpt_platform/backend/backend/api/features/analytics_test.py @@ -154,9 +154,9 @@ def test_log_raw_metric_validation_errors( assert "detail" in error_detail, f"Missing 'detail' in error: {error_detail}" error_text = json.dumps(error_detail) - assert expected_error in error_text, ( - f"Expected '{expected_error}' in error response: {error_text}" - ) + assert ( + expected_error in error_text + ), f"Expected '{expected_error}' in error response: {error_text}" def test_log_raw_metric_service_error( @@ -310,9 +310,9 @@ def test_log_raw_analytics_validation_errors( assert "detail" in error_detail, f"Missing 'detail' in error: {error_detail}" error_text = json.dumps(error_detail) - assert expected_error in error_text, ( - f"Expected '{expected_error}' in error response: {error_text}" - ) + assert ( + expected_error in error_text + ), f"Expected '{expected_error}' in error response: {error_text}" def test_log_raw_analytics_service_error( diff --git a/autogpt_platform/backend/backend/api/features/chat/model_test.py b/autogpt_platform/backend/backend/api/features/chat/model_test.py index ea2e7adacb..c230b00f9c 100644 --- a/autogpt_platform/backend/backend/api/features/chat/model_test.py +++ b/autogpt_platform/backend/backend/api/features/chat/model_test.py @@ -96,9 +96,9 @@ async def test_chatsession_db_storage(setup_test_user, test_user_id): ) assert s2 is not None, "Session not found after loading from DB" - assert len(s2.messages) == len(s.messages), ( - f"Message count mismatch: expected {len(s.messages)}, got {len(s2.messages)}" - ) + assert len(s2.messages) == len( + s.messages + ), f"Message count mismatch: expected {len(s.messages)}, got {len(s2.messages)}" # Verify all roles are present roles = [m.role for m in s2.messages] @@ -109,11 +109,11 @@ async def test_chatsession_db_storage(setup_test_user, test_user_id): # Verify message content for orig, loaded in zip(s.messages, s2.messages): assert orig.role == loaded.role, f"Role mismatch: {orig.role} != {loaded.role}" - assert orig.content == loaded.content, ( - f"Content mismatch for {orig.role}: {orig.content} != {loaded.content}" - ) + assert ( + orig.content == loaded.content + ), f"Content mismatch for {orig.role}: {orig.content} != {loaded.content}" if orig.tool_calls: - assert loaded.tool_calls is not None, ( - f"Tool calls missing for {orig.role} message" - ) + assert ( + loaded.tool_calls is not None + ), f"Tool calls missing for {orig.role} message" assert len(orig.tool_calls) == len(loaded.tool_calls) diff --git a/autogpt_platform/backend/backend/api/features/chat/sdk/security_hooks.py b/autogpt_platform/backend/backend/api/features/chat/sdk/security_hooks.py index b0d746af74..4300f6effc 100644 --- a/autogpt_platform/backend/backend/api/features/chat/sdk/security_hooks.py +++ b/autogpt_platform/backend/backend/api/features/chat/sdk/security_hooks.py @@ -197,7 +197,8 @@ def _validate_bash_command( allowed = ", ".join(sorted(ALLOWED_BASH_COMMANDS)) logger.warning(f"Blocked Bash command: {cmd_name}") return _deny( - f"Command '{cmd_name}' is not allowed. Allowed commands: {allowed}" + f"Command '{cmd_name}' is not allowed. " + f"Allowed commands: {allowed}" ) expect_command = False diff --git a/autogpt_platform/backend/backend/api/features/chat/sdk/service.py b/autogpt_platform/backend/backend/api/features/chat/sdk/service.py index 19cb3ff99e..123657f828 100644 --- a/autogpt_platform/backend/backend/api/features/chat/sdk/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/sdk/service.py @@ -120,9 +120,7 @@ def _cleanup_sdk_tool_results(cwd: str) -> None: # Security check 3: Validate project_dir is under ~/.claude/projects project_dir = os.path.normpath(project_dir) if not project_dir.startswith(claude_projects): - logger.warning( - f"[SDK] Rejecting cleanup for escaped project path: {project_dir}" - ) + logger.warning(f"[SDK] Rejecting cleanup for escaped project path: {project_dir}") return results_dir = os.path.join(project_dir, "tool-results") diff --git a/autogpt_platform/backend/backend/api/features/chat/sdk/tool_adapter.py b/autogpt_platform/backend/backend/api/features/chat/sdk/tool_adapter.py index f9efb10996..c34350bb94 100644 --- a/autogpt_platform/backend/backend/api/features/chat/sdk/tool_adapter.py +++ b/autogpt_platform/backend/backend/api/features/chat/sdk/tool_adapter.py @@ -37,8 +37,7 @@ _current_tool_call_id: ContextVar[str | None] = ContextVar( # Keyed by tool_name → full output string. Consumed (popped) by the # response adapter when it builds StreamToolOutputAvailable. _pending_tool_outputs: ContextVar[dict[str, str]] = ContextVar( - "pending_tool_outputs", - default=None, # type: ignore[arg-type] + "pending_tool_outputs", default=None # type: ignore[arg-type] ) diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index ffe6780291..986730cd5d 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -1184,13 +1184,13 @@ async def _stream_chat_chunks( tool_calls[idx]["id"] = tc_chunk.id if tc_chunk.function: if tc_chunk.function.name: - tool_calls[idx]["function"]["name"] = ( - tc_chunk.function.name - ) + tool_calls[idx]["function"][ + "name" + ] = tc_chunk.function.name if tc_chunk.function.arguments: - tool_calls[idx]["function"]["arguments"] += ( - tc_chunk.function.arguments - ) + tool_calls[idx]["function"][ + "arguments" + ] += tc_chunk.function.arguments # Emit StreamToolInputStart only after we have the tool call ID if ( diff --git a/autogpt_platform/backend/backend/api/features/chat/stream_registry.py b/autogpt_platform/backend/backend/api/features/chat/stream_registry.py index 6776e6c4e7..671aefc7ba 100644 --- a/autogpt_platform/backend/backend/api/features/chat/stream_registry.py +++ b/autogpt_platform/backend/backend/api/features/chat/stream_registry.py @@ -569,7 +569,7 @@ async def _stream_listener( if isinstance(chunk, StreamFinish): total_time = (time.perf_counter() - start_time) * 1000 logger.info( - f"[TIMING] StreamFinish received in {total_time / 1000:.1f}s; delivered={messages_delivered}", + f"[TIMING] StreamFinish received in {total_time/1000:.1f}s; delivered={messages_delivered}", extra={ "json_fields": { **log_meta, @@ -620,7 +620,7 @@ async def _stream_listener( # Clean up listener task mapping on exit total_time = (time.perf_counter() - start_time) * 1000 logger.info( - f"[TIMING] _stream_listener FINISHED in {total_time / 1000:.1f}s; task={task_id}, " + f"[TIMING] _stream_listener FINISHED in {total_time/1000:.1f}s; task={task_id}, " f"delivered={messages_delivered}, xread_count={xread_count}", extra={ "json_fields": { diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py index 16a5064640..fc4a470fdd 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py @@ -151,10 +151,9 @@ class RunBlockTool(BaseTool): logger.info(f"Executing block {block.name} ({block_id}) for user {user_id}") creds_manager = IntegrationCredentialsManager() - ( - matched_credentials, - missing_credentials, - ) = await self._resolve_block_credentials(user_id, block, input_data) + matched_credentials, missing_credentials = ( + await self._resolve_block_credentials(user_id, block, input_data) + ) if missing_credentials: # Return setup requirements response with missing credentials diff --git a/autogpt_platform/backend/backend/api/features/library/db_test.py b/autogpt_platform/backend/backend/api/features/library/db_test.py index 3ae0435fd4..6023177070 100644 --- a/autogpt_platform/backend/backend/api/features/library/db_test.py +++ b/autogpt_platform/backend/backend/api/features/library/db_test.py @@ -152,7 +152,9 @@ async def test_add_agent_to_library(mocker): # Mock graph_db.get_graph function that's called to check for HITL blocks mock_graph_db = mocker.patch("backend.api.features.library.db.graph_db") mock_graph_model = mocker.Mock() - mock_graph_model.nodes = [] # Empty list so _has_human_in_the_loop_blocks returns False + mock_graph_model.nodes = ( + [] + ) # Empty list so _has_human_in_the_loop_blocks returns False mock_graph_db.get_graph = mocker.AsyncMock(return_value=mock_graph_model) # Mock the model conversion diff --git a/autogpt_platform/backend/backend/api/features/postmark/postmark.py b/autogpt_platform/backend/backend/api/features/postmark/postmark.py index 5d3db26118..224e30fa9d 100644 --- a/autogpt_platform/backend/backend/api/features/postmark/postmark.py +++ b/autogpt_platform/backend/backend/api/features/postmark/postmark.py @@ -57,7 +57,7 @@ async def postmark_webhook_handler( webhook: Annotated[ PostmarkWebhook, Body(discriminator="RecordType"), - ], + ] ): logger.info(f"Received webhook from Postmark: {webhook}") match webhook: diff --git a/autogpt_platform/backend/backend/api/features/store/content_handlers.py b/autogpt_platform/backend/backend/api/features/store/content_handlers.py index 212076cac4..cbbdcfbebf 100644 --- a/autogpt_platform/backend/backend/api/features/store/content_handlers.py +++ b/autogpt_platform/backend/backend/api/features/store/content_handlers.py @@ -164,7 +164,7 @@ class BlockHandler(ContentHandler): block_ids = list(all_blocks.keys()) # Query for existing embeddings - placeholders = ",".join([f"${i + 1}" for i in range(len(block_ids))]) + placeholders = ",".join([f"${i+1}" for i in range(len(block_ids))]) existing_result = await query_raw_with_schema( f""" SELECT "contentId" @@ -265,7 +265,7 @@ class BlockHandler(ContentHandler): return {"total": 0, "with_embeddings": 0, "without_embeddings": 0} block_ids = enabled_block_ids - placeholders = ",".join([f"${i + 1}" for i in range(len(block_ids))]) + placeholders = ",".join([f"${i+1}" for i in range(len(block_ids))]) embedded_result = await query_raw_with_schema( f""" @@ -508,7 +508,7 @@ class DocumentationHandler(ContentHandler): ] # Check which ones have embeddings - placeholders = ",".join([f"${i + 1}" for i in range(len(section_content_ids))]) + placeholders = ",".join([f"${i+1}" for i in range(len(section_content_ids))]) existing_result = await query_raw_with_schema( f""" SELECT "contentId" diff --git a/autogpt_platform/backend/backend/api/features/store/media_test.py b/autogpt_platform/backend/backend/api/features/store/media_test.py index 264fc30774..7f3899c8a5 100644 --- a/autogpt_platform/backend/backend/api/features/store/media_test.py +++ b/autogpt_platform/backend/backend/api/features/store/media_test.py @@ -47,7 +47,7 @@ def mock_storage_client(mocker): async def test_upload_media_success(mock_settings, mock_storage_client): # Create test JPEG data with valid signature - test_data = b"\xff\xd8\xff" + b"test data" + test_data = b"\xFF\xD8\xFF" + b"test data" test_file = fastapi.UploadFile( filename="laptop.jpeg", @@ -85,7 +85,7 @@ async def test_upload_media_missing_credentials(monkeypatch): test_file = fastapi.UploadFile( filename="laptop.jpeg", - file=io.BytesIO(b"\xff\xd8\xff" + b"test data"), # Valid JPEG signature + file=io.BytesIO(b"\xFF\xD8\xFF" + b"test data"), # Valid JPEG signature headers=starlette.datastructures.Headers({"content-type": "image/jpeg"}), ) @@ -110,7 +110,7 @@ async def test_upload_media_video_type(mock_settings, mock_storage_client): async def test_upload_media_file_too_large(mock_settings, mock_storage_client): - large_data = b"\xff\xd8\xff" + b"x" * ( + large_data = b"\xFF\xD8\xFF" + b"x" * ( 50 * 1024 * 1024 + 1 ) # 50MB + 1 byte with valid JPEG signature test_file = fastapi.UploadFile( diff --git a/autogpt_platform/backend/backend/api/features/v1_test.py b/autogpt_platform/backend/backend/api/features/v1_test.py index dc76e4edd0..d57ad49949 100644 --- a/autogpt_platform/backend/backend/api/features/v1_test.py +++ b/autogpt_platform/backend/backend/api/features/v1_test.py @@ -499,12 +499,10 @@ async def test_upload_file_success(test_user_id: str): ) # Mock dependencies - with ( - patch("backend.api.features.v1.scan_content_safe") as mock_scan, - patch( - "backend.api.features.v1.get_cloud_storage_handler" - ) as mock_handler_getter, - ): + with patch("backend.api.features.v1.scan_content_safe") as mock_scan, patch( + "backend.api.features.v1.get_cloud_storage_handler" + ) as mock_handler_getter: + mock_scan.return_value = None mock_handler = AsyncMock() mock_handler.store_file.return_value = "gcs://test-bucket/uploads/123/test.txt" @@ -553,12 +551,10 @@ async def test_upload_file_no_filename(test_user_id: str): ), ) - with ( - patch("backend.api.features.v1.scan_content_safe") as mock_scan, - patch( - "backend.api.features.v1.get_cloud_storage_handler" - ) as mock_handler_getter, - ): + with patch("backend.api.features.v1.scan_content_safe") as mock_scan, patch( + "backend.api.features.v1.get_cloud_storage_handler" + ) as mock_handler_getter: + mock_scan.return_value = None mock_handler = AsyncMock() mock_handler.store_file.return_value = ( @@ -636,12 +632,10 @@ async def test_upload_file_cloud_storage_failure(test_user_id: str): headers=starlette.datastructures.Headers({"content-type": "text/plain"}), ) - with ( - patch("backend.api.features.v1.scan_content_safe") as mock_scan, - patch( - "backend.api.features.v1.get_cloud_storage_handler" - ) as mock_handler_getter, - ): + with patch("backend.api.features.v1.scan_content_safe") as mock_scan, patch( + "backend.api.features.v1.get_cloud_storage_handler" + ) as mock_handler_getter: + mock_scan.return_value = None mock_handler = AsyncMock() mock_handler.store_file.side_effect = RuntimeError("Storage error!") @@ -685,12 +679,10 @@ async def test_upload_file_gcs_not_configured_fallback(test_user_id: str): headers=starlette.datastructures.Headers({"content-type": "text/plain"}), ) - with ( - patch("backend.api.features.v1.scan_content_safe") as mock_scan, - patch( - "backend.api.features.v1.get_cloud_storage_handler" - ) as mock_handler_getter, - ): + with patch("backend.api.features.v1.scan_content_safe") as mock_scan, patch( + "backend.api.features.v1.get_cloud_storage_handler" + ) as mock_handler_getter: + mock_scan.return_value = None mock_handler = AsyncMock() mock_handler.config.gcs_bucket_name = "" # Simulate no GCS bucket configured diff --git a/autogpt_platform/backend/backend/api/test_helpers.py b/autogpt_platform/backend/backend/api/test_helpers.py index 9254257d98..c6ba333a2e 100644 --- a/autogpt_platform/backend/backend/api/test_helpers.py +++ b/autogpt_platform/backend/backend/api/test_helpers.py @@ -102,12 +102,12 @@ def assert_mock_called_with_partial(mock_obj: Any, **expected_kwargs: Any) -> No actual_kwargs = mock_obj.call_args.kwargs if mock_obj.call_args else {} for key, expected_value in expected_kwargs.items(): - assert key in actual_kwargs, ( - f"Missing key '{key}' in mock call. Actual keys: {list(actual_kwargs.keys())}" - ) - assert actual_kwargs[key] == expected_value, ( - f"Mock called with {key}={actual_kwargs[key]}, expected {expected_value}" - ) + assert ( + key in actual_kwargs + ), f"Missing key '{key}' in mock call. Actual keys: {list(actual_kwargs.keys())}" + assert ( + actual_kwargs[key] == expected_value + ), f"Mock called with {key}={actual_kwargs[key]}, expected {expected_value}" @contextmanager diff --git a/autogpt_platform/backend/backend/api/utils/api_key_auth_test.py b/autogpt_platform/backend/backend/api/utils/api_key_auth_test.py index 2b85e1e7e3..39c3150561 100644 --- a/autogpt_platform/backend/backend/api/utils/api_key_auth_test.py +++ b/autogpt_platform/backend/backend/api/utils/api_key_auth_test.py @@ -457,8 +457,7 @@ async def test_api_key_with_unicode_characters_normalization_attack(mock_request """Test that Unicode normalization doesn't bypass validation.""" # Create auth with composed Unicode character auth = APIKeyAuthenticator( - header_name="X-API-Key", - expected_token="café", # é is composed + header_name="X-API-Key", expected_token="café" # é is composed ) # Try with decomposed version (c + a + f + e + ´) @@ -523,8 +522,8 @@ async def test_api_keys_with_newline_variations(mock_request): "valid\r\ntoken", # Windows newline "valid\rtoken", # Mac newline "valid\x85token", # NEL (Next Line) - "valid\x0btoken", # Vertical Tab - "valid\x0ctoken", # Form Feed + "valid\x0Btoken", # Vertical Tab + "valid\x0Ctoken", # Form Feed ] for api_key in newline_variations: diff --git a/autogpt_platform/backend/backend/api/ws_api_test.py b/autogpt_platform/backend/backend/api/ws_api_test.py index c06cf09376..edab1bbded 100644 --- a/autogpt_platform/backend/backend/api/ws_api_test.py +++ b/autogpt_platform/backend/backend/api/ws_api_test.py @@ -44,12 +44,9 @@ def test_websocket_server_uses_cors_helper(mocker) -> None: "backend.api.ws_api.build_cors_params", return_value=cors_params ) - with ( - override_config( - settings, "backend_cors_allow_origins", cors_params["allow_origins"] - ), - override_config(settings, "app_env", AppEnvironment.LOCAL), - ): + with override_config( + settings, "backend_cors_allow_origins", cors_params["allow_origins"] + ), override_config(settings, "app_env", AppEnvironment.LOCAL): WebsocketServer().run() build_cors.assert_called_once_with( @@ -68,12 +65,9 @@ def test_websocket_server_uses_cors_helper(mocker) -> None: def test_websocket_server_blocks_localhost_in_production(mocker) -> None: mocker.patch("backend.api.ws_api.uvicorn.run") - with ( - override_config( - settings, "backend_cors_allow_origins", ["http://localhost:3000"] - ), - override_config(settings, "app_env", AppEnvironment.PRODUCTION), - ): + with override_config( + settings, "backend_cors_allow_origins", ["http://localhost:3000"] + ), override_config(settings, "app_env", AppEnvironment.PRODUCTION): with pytest.raises(ValueError): WebsocketServer().run() diff --git a/autogpt_platform/backend/backend/blocks/ai_image_generator_block.py b/autogpt_platform/backend/backend/blocks/ai_image_generator_block.py index 3b6adfa9cd..e40731cd97 100644 --- a/autogpt_platform/backend/backend/blocks/ai_image_generator_block.py +++ b/autogpt_platform/backend/backend/blocks/ai_image_generator_block.py @@ -174,9 +174,7 @@ class AIImageGeneratorBlock(Block): ], test_mock={ # Return a data URI directly so store_media_file doesn't need to download - "_run_client": lambda *args, **kwargs: ( - "data:image/webp;base64,UklGRiQAAABXRUJQVlA4IBgAAAAwAQCdASoBAAEAAQAcJYgCdAEO" - ) + "_run_client": lambda *args, **kwargs: "data:image/webp;base64,UklGRiQAAABXRUJQVlA4IBgAAAAwAQCdASoBAAEAAQAcJYgCdAEO" }, ) diff --git a/autogpt_platform/backend/backend/blocks/ai_music_generator.py b/autogpt_platform/backend/backend/blocks/ai_music_generator.py index b8755f777d..1ecb78f95e 100644 --- a/autogpt_platform/backend/backend/blocks/ai_music_generator.py +++ b/autogpt_platform/backend/backend/blocks/ai_music_generator.py @@ -142,9 +142,7 @@ class AIMusicGeneratorBlock(Block): ), ], test_mock={ - "run_model": lambda api_key, music_gen_model_version, prompt, duration, temperature, top_k, top_p, classifier_free_guidance, output_format, normalization_strategy: ( - "https://replicate.com/output/generated-audio-url.wav" - ), + "run_model": lambda api_key, music_gen_model_version, prompt, duration, temperature, top_k, top_p, classifier_free_guidance, output_format, normalization_strategy: "https://replicate.com/output/generated-audio-url.wav", }, test_credentials=TEST_CREDENTIALS, ) diff --git a/autogpt_platform/backend/backend/blocks/airtable/_api.py b/autogpt_platform/backend/backend/blocks/airtable/_api.py index 5a4424e1fb..53ace72d98 100644 --- a/autogpt_platform/backend/backend/blocks/airtable/_api.py +++ b/autogpt_platform/backend/backend/blocks/airtable/_api.py @@ -556,9 +556,9 @@ async def create_table( ) -> dict: for field in table_fields: assert field.get("name"), "Field name is required" - assert field.get("type") in TABLE_FIELD_TYPES, ( - f"Field type {field.get('type')} is not valid. Valid types are {TABLE_FIELD_TYPES}." - ) + assert ( + field.get("type") in TABLE_FIELD_TYPES + ), f"Field type {field.get('type')} is not valid. Valid types are {TABLE_FIELD_TYPES}." # Note fields have differnet options for different types we are not currently validating them response = await Requests().post( @@ -582,9 +582,9 @@ async def update_table( date_dependency: dict | None = None, ) -> dict: - assert table_name or table_description or date_dependency, ( - "At least one of table_name, table_description, or date_dependency must be provided" - ) + assert ( + table_name or table_description or date_dependency + ), "At least one of table_name, table_description, or date_dependency must be provided" params: dict[str, str | dict[str, str]] = {} if table_name: @@ -613,9 +613,9 @@ async def create_field( options: dict[str, str] | None = None, ) -> dict[str, str | dict[str, str]]: - assert field_type in TABLE_FIELD_TYPES, ( - f"Field type {field_type} is not valid. Valid types are {TABLE_FIELD_TYPES}." - ) + assert ( + field_type in TABLE_FIELD_TYPES + ), f"Field type {field_type} is not valid. Valid types are {TABLE_FIELD_TYPES}." params: dict[str, str | dict[str, str]] = {} params["type"] = field_type params["name"] = name @@ -928,9 +928,9 @@ async def update_record( typecast: bool | None = None, fields: dict[str, Any] | None = None, ) -> dict[str, dict[str, dict[str, str]]]: - params: dict[ - str, str | bool | dict[str, Any] | list[dict[str, dict[str, str]]] - ] = {} + params: dict[str, str | bool | dict[str, Any] | list[dict[str, dict[str, str]]]] = ( + {} + ) if return_fields_by_field_id: params["returnFieldsByFieldId"] = return_fields_by_field_id if typecast: @@ -958,9 +958,9 @@ async def create_record( assert fields or records, "At least one of fields or records must be provided" assert not (fields and records), "Only one of fields or records can be provided" if records is not None: - assert len(records) <= 10, ( - "Only up to 10 records can be provided when using records" - ) + assert ( + len(records) <= 10 + ), "Only up to 10 records can be provided when using records" params: dict[str, str | bool | dict[str, Any] | list[dict[str, Any]]] = {} if fields: diff --git a/autogpt_platform/backend/backend/blocks/airtable/_api_test.py b/autogpt_platform/backend/backend/blocks/airtable/_api_test.py index 16fe27d3b9..02f15a509f 100644 --- a/autogpt_platform/backend/backend/blocks/airtable/_api_test.py +++ b/autogpt_platform/backend/backend/blocks/airtable/_api_test.py @@ -43,9 +43,9 @@ async def test_create_update_table(): workspace_id = "wsphuHmfllg7V3Brd" response = await create_base(credentials, workspace_id, "API Testing Base") assert response is not None, f"Checking create base response: {response}" - assert response.get("id") is not None, ( - f"Checking create base response id: {response}" - ) + assert ( + response.get("id") is not None + ), f"Checking create base response id: {response}" base_id = response.get("id") assert base_id is not None, f"Checking create base response id: {base_id}" @@ -236,9 +236,9 @@ async def test_record_management(): updated_records = response.get("records") assert updated_records is not None assert len(updated_records) == 2, f"Updated records: {updated_records}" - assert isinstance(updated_records, list), ( - f"Type of updated records: {type(updated_records)}" - ) + assert isinstance( + updated_records, list + ), f"Type of updated records: {type(updated_records)}" first_updated = updated_records[0] # type: ignore second_updated = updated_records[1] # type: ignore first_updated_fields = first_updated.get("fields") @@ -257,9 +257,9 @@ async def test_record_management(): deleted_records = response.get("records") assert deleted_records is not None assert len(deleted_records) == 2, f"Deleted records: {deleted_records}" - assert isinstance(deleted_records, list), ( - f"Type of deleted records: {type(deleted_records)}" - ) + assert isinstance( + deleted_records, list + ), f"Type of deleted records: {type(deleted_records)}" first_deleted = deleted_records[0] # type: ignore second_deleted = deleted_records[1] # type: ignore assert first_deleted.get("deleted") @@ -293,12 +293,12 @@ async def test_webhook_management(): ) response = await create_webhook(credentials, base_id, webhook_specification) assert response is not None, f"Checking create webhook response: {response}" - assert response.get("id") is not None, ( - f"Checking create webhook response id: {response}" - ) - assert response.get("macSecretBase64") is not None, ( - f"Checking create webhook response macSecretBase64: {response}" - ) + assert ( + response.get("id") is not None + ), f"Checking create webhook response id: {response}" + assert ( + response.get("macSecretBase64") is not None + ), f"Checking create webhook response macSecretBase64: {response}" webhook_id = response.get("id") assert webhook_id is not None, f"Webhook ID: {webhook_id}" @@ -308,14 +308,14 @@ async def test_webhook_management(): credentials, base_id, table_id, fields={"test_field": "test_value"} ) assert response is not None, f"Checking create record response: {response}" - assert response.get("id") is not None, ( - f"Checking create record response id: {response}" - ) + assert ( + response.get("id") is not None + ), f"Checking create record response id: {response}" fields = response.get("fields") assert fields is not None, f"Checking create record response fields: {response}" - assert fields.get("test_field") == "test_value", ( - f"Checking create record response fields test_field: {response}" - ) + assert ( + fields.get("test_field") == "test_value" + ), f"Checking create record response fields test_field: {response}" response = await list_webhook_payloads(credentials, base_id, webhook_id) assert response is not None, f"Checking list webhook payloads response: {response}" diff --git a/autogpt_platform/backend/backend/blocks/ayrshare/post_to_bluesky.py b/autogpt_platform/backend/backend/blocks/ayrshare/post_to_bluesky.py index 031d54dfbb..df0d5ad269 100644 --- a/autogpt_platform/backend/backend/blocks/ayrshare/post_to_bluesky.py +++ b/autogpt_platform/backend/backend/blocks/ayrshare/post_to_bluesky.py @@ -69,18 +69,12 @@ class PostToBlueskyBlock(Block): client = create_ayrshare_client() if not client: - yield ( - "error", - "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY.", - ) + yield "error", "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY." return # Validate character limit for Bluesky if len(input_data.post) > 300: - yield ( - "error", - f"Post text exceeds Bluesky's 300 character limit ({len(input_data.post)} characters)", - ) + yield "error", f"Post text exceeds Bluesky's 300 character limit ({len(input_data.post)} characters)" return # Validate media constraints for Bluesky diff --git a/autogpt_platform/backend/backend/blocks/ayrshare/post_to_facebook.py b/autogpt_platform/backend/backend/blocks/ayrshare/post_to_facebook.py index 66fe5cc70d..a9087915e6 100644 --- a/autogpt_platform/backend/backend/blocks/ayrshare/post_to_facebook.py +++ b/autogpt_platform/backend/backend/blocks/ayrshare/post_to_facebook.py @@ -131,10 +131,7 @@ class PostToFacebookBlock(Block): client = create_ayrshare_client() if not client: - yield ( - "error", - "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY.", - ) + yield "error", "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY." return # Convert datetime to ISO format if provided diff --git a/autogpt_platform/backend/backend/blocks/ayrshare/post_to_gmb.py b/autogpt_platform/backend/backend/blocks/ayrshare/post_to_gmb.py index eaa878f90e..1f223f1f80 100644 --- a/autogpt_platform/backend/backend/blocks/ayrshare/post_to_gmb.py +++ b/autogpt_platform/backend/backend/blocks/ayrshare/post_to_gmb.py @@ -120,18 +120,12 @@ class PostToGMBBlock(Block): client = create_ayrshare_client() if not client: - yield ( - "error", - "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY.", - ) + yield "error", "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY." return # Validate GMB constraints if len(input_data.media_urls) > 1: - yield ( - "error", - "Google My Business supports only one image or video per post", - ) + yield "error", "Google My Business supports only one image or video per post" return # Validate offer coupon code length diff --git a/autogpt_platform/backend/backend/blocks/ayrshare/post_to_instagram.py b/autogpt_platform/backend/backend/blocks/ayrshare/post_to_instagram.py index 5399827467..06d80db528 100644 --- a/autogpt_platform/backend/backend/blocks/ayrshare/post_to_instagram.py +++ b/autogpt_platform/backend/backend/blocks/ayrshare/post_to_instagram.py @@ -123,25 +123,16 @@ class PostToInstagramBlock(Block): client = create_ayrshare_client() if not client: - yield ( - "error", - "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY.", - ) + yield "error", "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY." return # Validate Instagram constraints if len(input_data.post) > 2200: - yield ( - "error", - f"Instagram post text exceeds 2,200 character limit ({len(input_data.post)} characters)", - ) + yield "error", f"Instagram post text exceeds 2,200 character limit ({len(input_data.post)} characters)" return if len(input_data.media_urls) > 10: - yield ( - "error", - "Instagram supports a maximum of 10 images/videos in a carousel", - ) + yield "error", "Instagram supports a maximum of 10 images/videos in a carousel" return if len(input_data.collaborators) > 3: @@ -156,10 +147,7 @@ class PostToInstagramBlock(Block): ] if any(reel_options) and not all(reel_options): - yield ( - "error", - "When posting a reel, all reel options must be set: share_reels_feed, audio_name, and either thumbnail or thumbnail_offset", - ) + yield "error", "When posting a reel, all reel options must be set: share_reels_feed, audio_name, and either thumbnail or thumbnail_offset" return # Count hashtags and mentions @@ -167,17 +155,11 @@ class PostToInstagramBlock(Block): mention_count = input_data.post.count("@") if hashtag_count > 30: - yield ( - "error", - f"Instagram allows maximum 30 hashtags ({hashtag_count} found)", - ) + yield "error", f"Instagram allows maximum 30 hashtags ({hashtag_count} found)" return if mention_count > 3: - yield ( - "error", - f"Instagram allows maximum 3 @mentions ({mention_count} found)", - ) + yield "error", f"Instagram allows maximum 3 @mentions ({mention_count} found)" return # Convert datetime to ISO format if provided @@ -209,10 +191,7 @@ class PostToInstagramBlock(Block): # Validate alt text length for i, alt in enumerate(input_data.alt_text): if len(alt) > 1000: - yield ( - "error", - f"Alt text {i + 1} exceeds 1,000 character limit ({len(alt)} characters)", - ) + yield "error", f"Alt text {i+1} exceeds 1,000 character limit ({len(alt)} characters)" return instagram_options["altText"] = input_data.alt_text @@ -227,19 +206,13 @@ class PostToInstagramBlock(Block): try: tag_obj = InstagramUserTag(**tag) except Exception as e: - yield ( - "error", - f"Invalid user tag: {e}, tages need to be a dictionary with a 3 items: username (str), x (float) and y (float)", - ) + yield "error", f"Invalid user tag: {e}, tages need to be a dictionary with a 3 items: username (str), x (float) and y (float)" return tag_dict: dict[str, float | str] = {"username": tag_obj.username} if tag_obj.x is not None and tag_obj.y is not None: # Validate coordinates if not (0.0 <= tag_obj.x <= 1.0) or not (0.0 <= tag_obj.y <= 1.0): - yield ( - "error", - f"User tag coordinates must be between 0.0 and 1.0 (user: {tag_obj.username})", - ) + yield "error", f"User tag coordinates must be between 0.0 and 1.0 (user: {tag_obj.username})" return tag_dict["x"] = tag_obj.x tag_dict["y"] = tag_obj.y diff --git a/autogpt_platform/backend/backend/blocks/ayrshare/post_to_linkedin.py b/autogpt_platform/backend/backend/blocks/ayrshare/post_to_linkedin.py index a549a615f8..961587d201 100644 --- a/autogpt_platform/backend/backend/blocks/ayrshare/post_to_linkedin.py +++ b/autogpt_platform/backend/backend/blocks/ayrshare/post_to_linkedin.py @@ -123,18 +123,12 @@ class PostToLinkedInBlock(Block): client = create_ayrshare_client() if not client: - yield ( - "error", - "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY.", - ) + yield "error", "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY." return # Validate LinkedIn constraints if len(input_data.post) > 3000: - yield ( - "error", - f"LinkedIn post text exceeds 3,000 character limit ({len(input_data.post)} characters)", - ) + yield "error", f"LinkedIn post text exceeds 3,000 character limit ({len(input_data.post)} characters)" return if len(input_data.media_urls) > 9: @@ -142,19 +136,13 @@ class PostToLinkedInBlock(Block): return if input_data.document_title and len(input_data.document_title) > 400: - yield ( - "error", - f"LinkedIn document title exceeds 400 character limit ({len(input_data.document_title)} characters)", - ) + yield "error", f"LinkedIn document title exceeds 400 character limit ({len(input_data.document_title)} characters)" return # Validate visibility option valid_visibility = ["public", "connections", "loggedin"] if input_data.visibility not in valid_visibility: - yield ( - "error", - f"LinkedIn visibility must be one of: {', '.join(valid_visibility)}", - ) + yield "error", f"LinkedIn visibility must be one of: {', '.join(valid_visibility)}" return # Check for document extensions diff --git a/autogpt_platform/backend/backend/blocks/ayrshare/post_to_pinterest.py b/autogpt_platform/backend/backend/blocks/ayrshare/post_to_pinterest.py index 2492b8a509..834cd4e301 100644 --- a/autogpt_platform/backend/backend/blocks/ayrshare/post_to_pinterest.py +++ b/autogpt_platform/backend/backend/blocks/ayrshare/post_to_pinterest.py @@ -103,32 +103,20 @@ class PostToPinterestBlock(Block): client = create_ayrshare_client() if not client: - yield ( - "error", - "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY.", - ) + yield "error", "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY." return # Validate Pinterest constraints if len(input_data.post) > 500: - yield ( - "error", - f"Pinterest pin description exceeds 500 character limit ({len(input_data.post)} characters)", - ) + yield "error", f"Pinterest pin description exceeds 500 character limit ({len(input_data.post)} characters)" return if len(input_data.pin_title) > 100: - yield ( - "error", - f"Pinterest pin title exceeds 100 character limit ({len(input_data.pin_title)} characters)", - ) + yield "error", f"Pinterest pin title exceeds 100 character limit ({len(input_data.pin_title)} characters)" return if len(input_data.link) > 2048: - yield ( - "error", - f"Pinterest link URL exceeds 2048 character limit ({len(input_data.link)} characters)", - ) + yield "error", f"Pinterest link URL exceeds 2048 character limit ({len(input_data.link)} characters)" return if len(input_data.media_urls) == 0: @@ -153,10 +141,7 @@ class PostToPinterestBlock(Block): # Validate alt text length for i, alt in enumerate(input_data.alt_text): if len(alt) > 500: - yield ( - "error", - f"Pinterest alt text {i + 1} exceeds 500 character limit ({len(alt)} characters)", - ) + yield "error", f"Pinterest alt text {i+1} exceeds 500 character limit ({len(alt)} characters)" return # Convert datetime to ISO format if provided diff --git a/autogpt_platform/backend/backend/blocks/ayrshare/post_to_snapchat.py b/autogpt_platform/backend/backend/blocks/ayrshare/post_to_snapchat.py index 39486f5f93..3645f7cc9b 100644 --- a/autogpt_platform/backend/backend/blocks/ayrshare/post_to_snapchat.py +++ b/autogpt_platform/backend/backend/blocks/ayrshare/post_to_snapchat.py @@ -73,10 +73,7 @@ class PostToSnapchatBlock(Block): client = create_ayrshare_client() if not client: - yield ( - "error", - "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY.", - ) + yield "error", "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY." return # Validate Snapchat constraints @@ -91,10 +88,7 @@ class PostToSnapchatBlock(Block): # Validate story type valid_story_types = ["story", "saved_story", "spotlight"] if input_data.story_type not in valid_story_types: - yield ( - "error", - f"Snapchat story type must be one of: {', '.join(valid_story_types)}", - ) + yield "error", f"Snapchat story type must be one of: {', '.join(valid_story_types)}" return # Convert datetime to ISO format if provided diff --git a/autogpt_platform/backend/backend/blocks/ayrshare/post_to_telegram.py b/autogpt_platform/backend/backend/blocks/ayrshare/post_to_telegram.py index 9043900c64..a220cbe9e8 100644 --- a/autogpt_platform/backend/backend/blocks/ayrshare/post_to_telegram.py +++ b/autogpt_platform/backend/backend/blocks/ayrshare/post_to_telegram.py @@ -68,10 +68,7 @@ class PostToTelegramBlock(Block): client = create_ayrshare_client() if not client: - yield ( - "error", - "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY.", - ) + yield "error", "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY." return # Validate Telegram constraints diff --git a/autogpt_platform/backend/backend/blocks/ayrshare/post_to_threads.py b/autogpt_platform/backend/backend/blocks/ayrshare/post_to_threads.py index fd4449c18e..75983b2d13 100644 --- a/autogpt_platform/backend/backend/blocks/ayrshare/post_to_threads.py +++ b/autogpt_platform/backend/backend/blocks/ayrshare/post_to_threads.py @@ -61,34 +61,22 @@ class PostToThreadsBlock(Block): client = create_ayrshare_client() if not client: - yield ( - "error", - "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY.", - ) + yield "error", "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY." return # Validate Threads constraints if len(input_data.post) > 500: - yield ( - "error", - f"Threads post text exceeds 500 character limit ({len(input_data.post)} characters)", - ) + yield "error", f"Threads post text exceeds 500 character limit ({len(input_data.post)} characters)" return if len(input_data.media_urls) > 20: - yield ( - "error", - "Threads supports a maximum of 20 images/videos in a carousel", - ) + yield "error", "Threads supports a maximum of 20 images/videos in a carousel" return # Count hashtags (only 1 allowed) hashtag_count = input_data.post.count("#") if hashtag_count > 1: - yield ( - "error", - f"Threads allows only 1 hashtag per post ({hashtag_count} found)", - ) + yield "error", f"Threads allows only 1 hashtag per post ({hashtag_count} found)" return # Convert datetime to ISO format if provided diff --git a/autogpt_platform/backend/backend/blocks/ayrshare/post_to_tiktok.py b/autogpt_platform/backend/backend/blocks/ayrshare/post_to_tiktok.py index a762afbfb0..2d68f10ff0 100644 --- a/autogpt_platform/backend/backend/blocks/ayrshare/post_to_tiktok.py +++ b/autogpt_platform/backend/backend/blocks/ayrshare/post_to_tiktok.py @@ -123,25 +123,16 @@ class PostToTikTokBlock(Block): client = create_ayrshare_client() if not client: - yield ( - "error", - "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY.", - ) + yield "error", "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY." return # Validate TikTok constraints if len(input_data.post) > 2200: - yield ( - "error", - f"TikTok post text exceeds 2,200 character limit ({len(input_data.post)} characters)", - ) + yield "error", f"TikTok post text exceeds 2,200 character limit ({len(input_data.post)} characters)" return if not input_data.media_urls: - yield ( - "error", - "TikTok requires at least one media URL (either 1 video or up to 35 images)", - ) + yield "error", "TikTok requires at least one media URL (either 1 video or up to 35 images)" return # Check for video vs image constraints @@ -159,10 +150,7 @@ class PostToTikTokBlock(Block): ) if has_video and has_images: - yield ( - "error", - "TikTok does not support mixing video and images in the same post", - ) + yield "error", "TikTok does not support mixing video and images in the same post" return if has_video and len(input_data.media_urls) > 1: @@ -175,19 +163,13 @@ class PostToTikTokBlock(Block): # Validate image cover index if has_images and input_data.image_cover_index >= len(input_data.media_urls): - yield ( - "error", - f"Image cover index {input_data.image_cover_index} is out of range (max: {len(input_data.media_urls) - 1})", - ) + yield "error", f"Image cover index {input_data.image_cover_index} is out of range (max: {len(input_data.media_urls) - 1})" return # Check for PNG files (not supported) has_png = any(url.lower().endswith(".png") for url in input_data.media_urls) if has_png: - yield ( - "error", - "TikTok does not support PNG files. Please use JPG, JPEG, or WEBP for images.", - ) + yield "error", "TikTok does not support PNG files. Please use JPG, JPEG, or WEBP for images." return # Convert datetime to ISO format if provided diff --git a/autogpt_platform/backend/backend/blocks/ayrshare/post_to_x.py b/autogpt_platform/backend/backend/blocks/ayrshare/post_to_x.py index 91236bcb76..bbecd31ed4 100644 --- a/autogpt_platform/backend/backend/blocks/ayrshare/post_to_x.py +++ b/autogpt_platform/backend/backend/blocks/ayrshare/post_to_x.py @@ -126,25 +126,16 @@ class PostToXBlock(Block): client = create_ayrshare_client() if not client: - yield ( - "error", - "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY.", - ) + yield "error", "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY." return # Validate X constraints if not input_data.long_post and len(input_data.post) > 280: - yield ( - "error", - f"X post text exceeds 280 character limit ({len(input_data.post)} characters). Enable 'long_post' for Premium accounts.", - ) + yield "error", f"X post text exceeds 280 character limit ({len(input_data.post)} characters). Enable 'long_post' for Premium accounts." return if input_data.long_post and len(input_data.post) > 25000: - yield ( - "error", - f"X long post text exceeds 25,000 character limit ({len(input_data.post)} characters)", - ) + yield "error", f"X long post text exceeds 25,000 character limit ({len(input_data.post)} characters)" return if len(input_data.media_urls) > 4: @@ -158,20 +149,14 @@ class PostToXBlock(Block): return if input_data.poll_duration < 1 or input_data.poll_duration > 10080: - yield ( - "error", - "X poll duration must be between 1 and 10,080 minutes (7 days)", - ) + yield "error", "X poll duration must be between 1 and 10,080 minutes (7 days)" return # Validate alt text if input_data.alt_text: for i, alt in enumerate(input_data.alt_text): if len(alt) > 1000: - yield ( - "error", - f"X alt text {i + 1} exceeds 1,000 character limit ({len(alt)} characters)", - ) + yield "error", f"X alt text {i+1} exceeds 1,000 character limit ({len(alt)} characters)" return # Validate subtitle settings @@ -183,10 +168,7 @@ class PostToXBlock(Block): return if len(input_data.subtitle_name) > 150: - yield ( - "error", - f"Subtitle name exceeds 150 character limit ({len(input_data.subtitle_name)} characters)", - ) + yield "error", f"Subtitle name exceeds 150 character limit ({len(input_data.subtitle_name)} characters)" return # Convert datetime to ISO format if provided diff --git a/autogpt_platform/backend/backend/blocks/ayrshare/post_to_youtube.py b/autogpt_platform/backend/backend/blocks/ayrshare/post_to_youtube.py index e4c6e61742..8a366ba5c5 100644 --- a/autogpt_platform/backend/backend/blocks/ayrshare/post_to_youtube.py +++ b/autogpt_platform/backend/backend/blocks/ayrshare/post_to_youtube.py @@ -149,10 +149,7 @@ class PostToYouTubeBlock(Block): client = create_ayrshare_client() if not client: - yield ( - "error", - "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY.", - ) + yield "error", "Ayrshare integration is not configured. Please set up the AYRSHARE_API_KEY." return # Validate YouTube constraints @@ -161,17 +158,11 @@ class PostToYouTubeBlock(Block): return if len(input_data.title) > 100: - yield ( - "error", - f"YouTube title exceeds 100 character limit ({len(input_data.title)} characters)", - ) + yield "error", f"YouTube title exceeds 100 character limit ({len(input_data.title)} characters)" return if len(input_data.post) > 5000: - yield ( - "error", - f"YouTube description exceeds 5,000 character limit ({len(input_data.post)} characters)", - ) + yield "error", f"YouTube description exceeds 5,000 character limit ({len(input_data.post)} characters)" return # Check for forbidden characters @@ -195,10 +186,7 @@ class PostToYouTubeBlock(Block): # Validate visibility option valid_visibility = ["private", "public", "unlisted"] if input_data.visibility not in valid_visibility: - yield ( - "error", - f"YouTube visibility must be one of: {', '.join(valid_visibility)}", - ) + yield "error", f"YouTube visibility must be one of: {', '.join(valid_visibility)}" return # Validate thumbnail URL format @@ -214,18 +202,12 @@ class PostToYouTubeBlock(Block): if input_data.tags: total_tag_length = sum(len(tag) for tag in input_data.tags) if total_tag_length > 500: - yield ( - "error", - f"YouTube tags total length exceeds 500 characters ({total_tag_length} characters)", - ) + yield "error", f"YouTube tags total length exceeds 500 characters ({total_tag_length} characters)" return for tag in input_data.tags: if len(tag) < 2: - yield ( - "error", - f"YouTube tag '{tag}' is too short (minimum 2 characters)", - ) + yield "error", f"YouTube tag '{tag}' is too short (minimum 2 characters)" return # Validate subtitle URL @@ -243,18 +225,12 @@ class PostToYouTubeBlock(Block): return if input_data.subtitle_name and len(input_data.subtitle_name) > 150: - yield ( - "error", - f"YouTube subtitle name exceeds 150 character limit ({len(input_data.subtitle_name)} characters)", - ) + yield "error", f"YouTube subtitle name exceeds 150 character limit ({len(input_data.subtitle_name)} characters)" return # Validate publish_at format if provided if input_data.publish_at and input_data.schedule_date: - yield ( - "error", - "Cannot use both 'publish_at' and 'schedule_date'. Use 'publish_at' for YouTube-controlled publishing.", - ) + yield "error", "Cannot use both 'publish_at' and 'schedule_date'. Use 'publish_at' for YouTube-controlled publishing." return # Convert datetime to ISO format if provided (only if not using publish_at) diff --git a/autogpt_platform/backend/backend/blocks/basic.py b/autogpt_platform/backend/backend/blocks/basic.py index fa511412cf..95193b3feb 100644 --- a/autogpt_platform/backend/backend/blocks/basic.py +++ b/autogpt_platform/backend/backend/blocks/basic.py @@ -59,13 +59,10 @@ class FileStoreBlock(Block): # for_block_output: smart format - workspace:// in CoPilot, data URI in graphs return_format = "for_external_api" if input_data.base_64 else "for_block_output" - yield ( - "file_out", - await store_media_file( - file=input_data.file_in, - execution_context=execution_context, - return_format=return_format, - ), + yield "file_out", await store_media_file( + file=input_data.file_in, + execution_context=execution_context, + return_format=return_format, ) diff --git a/autogpt_platform/backend/backend/blocks/data_manipulation.py b/autogpt_platform/backend/backend/blocks/data_manipulation.py index dc4eb2f040..1014236b8c 100644 --- a/autogpt_platform/backend/backend/blocks/data_manipulation.py +++ b/autogpt_platform/backend/backend/blocks/data_manipulation.py @@ -728,12 +728,9 @@ class ConcatenateListsBlock(Block): # Type validation: each item must be a list # Strings are iterable and would cause extend() to iterate character-by-character # Non-iterable types would raise TypeError - yield ( - "error", - ( - f"Invalid input at index {idx}: expected a list, got {type(lst).__name__}. " - f"All items in 'lists' must be lists (e.g., [[1, 2], [3, 4]])." - ), + yield "error", ( + f"Invalid input at index {idx}: expected a list, got {type(lst).__name__}. " + f"All items in 'lists' must be lists (e.g., [[1, 2], [3, 4]])." ) return concatenated.extend(lst) diff --git a/autogpt_platform/backend/backend/blocks/dataforseo/keyword_suggestions.py b/autogpt_platform/backend/backend/blocks/dataforseo/keyword_suggestions.py index 96d65c1514..a1ecc86386 100644 --- a/autogpt_platform/backend/backend/blocks/dataforseo/keyword_suggestions.py +++ b/autogpt_platform/backend/backend/blocks/dataforseo/keyword_suggestions.py @@ -110,10 +110,8 @@ class DataForSeoKeywordSuggestionsBlock(Block): test_output=[ ( "suggestion", - lambda x: ( - hasattr(x, "keyword") - and x.keyword == "digital marketing strategy" - ), + lambda x: hasattr(x, "keyword") + and x.keyword == "digital marketing strategy", ), ("suggestions", lambda x: isinstance(x, list) and len(x) == 1), ("total_count", 1), diff --git a/autogpt_platform/backend/backend/blocks/email_block.py b/autogpt_platform/backend/backend/blocks/email_block.py index d1103ad137..fad2f411cb 100644 --- a/autogpt_platform/backend/backend/blocks/email_block.py +++ b/autogpt_platform/backend/backend/blocks/email_block.py @@ -137,71 +137,47 @@ class SendEmailBlock(Block): ) yield "status", status except socket.gaierror: - yield ( - "error", - ( - f"Cannot connect to SMTP server '{input_data.config.smtp_server}'. " - "Please verify the server address is correct." - ), + yield "error", ( + f"Cannot connect to SMTP server '{input_data.config.smtp_server}'. " + "Please verify the server address is correct." ) except socket.timeout: - yield ( - "error", - ( - f"Connection timeout to '{input_data.config.smtp_server}' " - f"on port {input_data.config.smtp_port}. " - "The server may be down or unreachable." - ), + yield "error", ( + f"Connection timeout to '{input_data.config.smtp_server}' " + f"on port {input_data.config.smtp_port}. " + "The server may be down or unreachable." ) except ConnectionRefusedError: - yield ( - "error", - ( - f"Connection refused to '{input_data.config.smtp_server}' " - f"on port {input_data.config.smtp_port}. " - "Common SMTP ports are: 587 (TLS), 465 (SSL), 25 (plain). " - "Please verify the port is correct." - ), + yield "error", ( + f"Connection refused to '{input_data.config.smtp_server}' " + f"on port {input_data.config.smtp_port}. " + "Common SMTP ports are: 587 (TLS), 465 (SSL), 25 (plain). " + "Please verify the port is correct." ) except smtplib.SMTPNotSupportedError: - yield ( - "error", - ( - f"STARTTLS not supported by server '{input_data.config.smtp_server}'. " - "Try using port 465 for SSL or port 25 for unencrypted connection." - ), + yield "error", ( + f"STARTTLS not supported by server '{input_data.config.smtp_server}'. " + "Try using port 465 for SSL or port 25 for unencrypted connection." ) except ssl.SSLError as e: - yield ( - "error", - ( - f"SSL/TLS error when connecting to '{input_data.config.smtp_server}': {str(e)}. " - "The server may require a different security protocol." - ), + yield "error", ( + f"SSL/TLS error when connecting to '{input_data.config.smtp_server}': {str(e)}. " + "The server may require a different security protocol." ) except smtplib.SMTPAuthenticationError: - yield ( - "error", - ( - "Authentication failed. Please verify your username and password are correct." - ), + yield "error", ( + "Authentication failed. Please verify your username and password are correct." ) except smtplib.SMTPRecipientsRefused: - yield ( - "error", - ( - f"Recipient email address '{input_data.to_email}' was rejected by the server. " - "Please verify the email address is valid." - ), + yield "error", ( + f"Recipient email address '{input_data.to_email}' was rejected by the server. " + "Please verify the email address is valid." ) except smtplib.SMTPSenderRefused: - yield ( - "error", - ( - "Sender email address defined in the credentials that where used" - "was rejected by the server. " - "Please verify your account is authorized to send emails." - ), + yield "error", ( + "Sender email address defined in the credentials that where used" + "was rejected by the server. " + "Please verify your account is authorized to send emails." ) except smtplib.SMTPDataError as e: yield "error", f"Email data rejected by server: {str(e)}" diff --git a/autogpt_platform/backend/backend/blocks/enrichlayer/linkedin.py b/autogpt_platform/backend/backend/blocks/enrichlayer/linkedin.py index ecfc4bd721..974ad28eed 100644 --- a/autogpt_platform/backend/backend/blocks/enrichlayer/linkedin.py +++ b/autogpt_platform/backend/backend/blocks/enrichlayer/linkedin.py @@ -490,9 +490,7 @@ class GetLinkedinProfilePictureBlock(Block): ], test_credentials=TEST_CREDENTIALS, test_mock={ - "_get_profile_picture": lambda *args, **kwargs: ( - "https://media.licdn.com/dms/image/C4D03AQFj-xjuXrLFSQ/profile-displayphoto-shrink_800_800/0/1576881858598?e=1686787200&v=beta&t=zrQC76QwsfQQIWthfOnrKRBMZ5D-qIAvzLXLmWgYvTk" - ), + "_get_profile_picture": lambda *args, **kwargs: "https://media.licdn.com/dms/image/C4D03AQFj-xjuXrLFSQ/profile-displayphoto-shrink_800_800/0/1576881858598?e=1686787200&v=beta&t=zrQC76QwsfQQIWthfOnrKRBMZ5D-qIAvzLXLmWgYvTk", }, ) diff --git a/autogpt_platform/backend/backend/blocks/exa/helpers.py b/autogpt_platform/backend/backend/blocks/exa/helpers.py index de8c0e7443..f31f01c78a 100644 --- a/autogpt_platform/backend/backend/blocks/exa/helpers.py +++ b/autogpt_platform/backend/backend/blocks/exa/helpers.py @@ -319,7 +319,7 @@ class CostDollars(BaseModel): # Helper functions for payload processing def process_text_field( - text: Union[bool, TextEnabled, TextDisabled, TextAdvanced, None], + text: Union[bool, TextEnabled, TextDisabled, TextAdvanced, None] ) -> Optional[Union[bool, Dict[str, Any]]]: """Process text field for API payload.""" if text is None: @@ -400,7 +400,7 @@ def process_contents_settings(contents: Optional[ContentSettings]) -> Dict[str, def process_context_field( - context: Union[bool, dict, ContextEnabled, ContextDisabled, ContextAdvanced, None], + context: Union[bool, dict, ContextEnabled, ContextDisabled, ContextAdvanced, None] ) -> Optional[Union[bool, Dict[str, int]]]: """Process context field for API payload.""" if context is None: diff --git a/autogpt_platform/backend/backend/blocks/exa/websets.py b/autogpt_platform/backend/backend/blocks/exa/websets.py index da1de56b48..ce623ad410 100644 --- a/autogpt_platform/backend/backend/blocks/exa/websets.py +++ b/autogpt_platform/backend/backend/blocks/exa/websets.py @@ -566,9 +566,8 @@ class ExaUpdateWebsetBlock(Block): yield "status", status_str yield "external_id", sdk_webset.external_id yield "metadata", sdk_webset.metadata or {} - yield ( - "updated_at", - (sdk_webset.updated_at.isoformat() if sdk_webset.updated_at else ""), + yield "updated_at", ( + sdk_webset.updated_at.isoformat() if sdk_webset.updated_at else "" ) @@ -707,13 +706,11 @@ class ExaGetWebsetBlock(Block): yield "enrichments", enrichments_data yield "monitors", monitors_data yield "metadata", sdk_webset.metadata or {} - yield ( - "created_at", - (sdk_webset.created_at.isoformat() if sdk_webset.created_at else ""), + yield "created_at", ( + sdk_webset.created_at.isoformat() if sdk_webset.created_at else "" ) - yield ( - "updated_at", - (sdk_webset.updated_at.isoformat() if sdk_webset.updated_at else ""), + yield "updated_at", ( + sdk_webset.updated_at.isoformat() if sdk_webset.updated_at else "" ) @@ -1267,9 +1264,7 @@ class ExaWebsetSummaryBlock(Block): ( e.format.value if e.format and hasattr(e.format, "value") - else str(e.format) - if e.format - else "text" + else str(e.format) if e.format else "text" ) for e in enrichments ) diff --git a/autogpt_platform/backend/backend/blocks/exa/websets_polling.py b/autogpt_platform/backend/backend/blocks/exa/websets_polling.py index d7b8c14b0a..f4168f1446 100644 --- a/autogpt_platform/backend/backend/blocks/exa/websets_polling.py +++ b/autogpt_platform/backend/backend/blocks/exa/websets_polling.py @@ -523,20 +523,16 @@ class ExaWaitForEnrichmentBlock(Block): items_enriched = 0 if input_data.sample_results and status == "completed": - ( - sample_data, - items_enriched, - ) = await self._get_sample_enrichments( - input_data.webset_id, input_data.enrichment_id, aexa + sample_data, items_enriched = ( + await self._get_sample_enrichments( + input_data.webset_id, input_data.enrichment_id, aexa + ) ) yield "enrichment_id", input_data.enrichment_id yield "final_status", status yield "items_enriched", items_enriched - yield ( - "enrichment_title", - enrichment.title or enrichment.description or "", - ) + yield "enrichment_title", enrichment.title or enrichment.description or "" yield "elapsed_time", elapsed if input_data.sample_results: yield "sample_data", sample_data diff --git a/autogpt_platform/backend/backend/blocks/flux_kontext.py b/autogpt_platform/backend/backend/blocks/flux_kontext.py index ea5d3027c2..d56baa6d92 100644 --- a/autogpt_platform/backend/backend/blocks/flux_kontext.py +++ b/autogpt_platform/backend/backend/blocks/flux_kontext.py @@ -127,9 +127,7 @@ class AIImageEditorBlock(Block): ], test_mock={ # Use data URI to avoid HTTP requests during tests - "run_model": lambda *args, **kwargs: ( - "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==" - ), + "run_model": lambda *args, **kwargs: "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==", }, test_credentials=TEST_CREDENTIALS, ) diff --git a/autogpt_platform/backend/backend/blocks/github/issues.py b/autogpt_platform/backend/backend/blocks/github/issues.py index 1272bdd520..22b4149663 100644 --- a/autogpt_platform/backend/backend/blocks/github/issues.py +++ b/autogpt_platform/backend/backend/blocks/github/issues.py @@ -798,9 +798,7 @@ class GithubUnassignIssueBlock(Block): test_credentials=TEST_CREDENTIALS, test_output=[("status", "Issue unassigned successfully")], test_mock={ - "unassign_issue": lambda *args, **kwargs: ( - "Issue unassigned successfully" - ) + "unassign_issue": lambda *args, **kwargs: "Issue unassigned successfully" }, ) diff --git a/autogpt_platform/backend/backend/blocks/github/pull_requests.py b/autogpt_platform/backend/backend/blocks/github/pull_requests.py index 59882f173d..9049037716 100644 --- a/autogpt_platform/backend/backend/blocks/github/pull_requests.py +++ b/autogpt_platform/backend/backend/blocks/github/pull_requests.py @@ -261,9 +261,7 @@ class GithubReadPullRequestBlock(Block): "This is the body of the pull request.", "username", ), - "read_pr_changes": lambda *args, **kwargs: ( - "List of changes made in the pull request." - ), + "read_pr_changes": lambda *args, **kwargs: "List of changes made in the pull request.", }, ) @@ -367,9 +365,7 @@ class GithubAssignPRReviewerBlock(Block): test_credentials=TEST_CREDENTIALS, test_output=[("status", "Reviewer assigned successfully")], test_mock={ - "assign_reviewer": lambda *args, **kwargs: ( - "Reviewer assigned successfully" - ) + "assign_reviewer": lambda *args, **kwargs: "Reviewer assigned successfully" }, ) @@ -436,9 +432,7 @@ class GithubUnassignPRReviewerBlock(Block): test_credentials=TEST_CREDENTIALS, test_output=[("status", "Reviewer unassigned successfully")], test_mock={ - "unassign_reviewer": lambda *args, **kwargs: ( - "Reviewer unassigned successfully" - ) + "unassign_reviewer": lambda *args, **kwargs: "Reviewer unassigned successfully" }, ) diff --git a/autogpt_platform/backend/backend/blocks/google/docs.py b/autogpt_platform/backend/backend/blocks/google/docs.py index 90aeb8fca2..7840cbae73 100644 --- a/autogpt_platform/backend/backend/blocks/google/docs.py +++ b/autogpt_platform/backend/backend/blocks/google/docs.py @@ -341,17 +341,14 @@ class GoogleDocsCreateBlock(Block): ) doc_id = result["document_id"] doc_url = result["document_url"] - yield ( - "document", - GoogleDriveFile( - id=doc_id, - name=input_data.title, - mimeType="application/vnd.google-apps.document", - url=doc_url, - iconUrl="https://www.gstatic.com/images/branding/product/1x/docs_48dp.png", - isFolder=False, - _credentials_id=input_data.credentials.id, - ), + yield "document", GoogleDriveFile( + id=doc_id, + name=input_data.title, + mimeType="application/vnd.google-apps.document", + url=doc_url, + iconUrl="https://www.gstatic.com/images/branding/product/1x/docs_48dp.png", + isFolder=False, + _credentials_id=input_data.credentials.id, ) yield "document_id", doc_id yield "document_url", doc_url @@ -818,10 +815,7 @@ class GoogleDocsGetMetadataBlock(Block): yield "title", result["title"] yield "document_id", input_data.document.id yield "revision_id", result["revision_id"] - yield ( - "document_url", - f"https://docs.google.com/document/d/{input_data.document.id}/edit", - ) + yield "document_url", f"https://docs.google.com/document/d/{input_data.document.id}/edit" yield "document", _make_document_output(input_data.document) except Exception as e: yield "error", f"Failed to get metadata: {str(e)}" diff --git a/autogpt_platform/backend/backend/blocks/google/gmail.py b/autogpt_platform/backend/backend/blocks/google/gmail.py index f9b34dad64..2040cabe3f 100644 --- a/autogpt_platform/backend/backend/blocks/google/gmail.py +++ b/autogpt_platform/backend/backend/blocks/google/gmail.py @@ -278,13 +278,11 @@ class GmailBase(Block, ABC): """Download attachment content when email body is stored as attachment.""" try: attachment = await asyncio.to_thread( - lambda: ( - service.users() - .messages() - .attachments() - .get(userId="me", messageId=msg_id, id=attachment_id) - .execute() - ) + lambda: service.users() + .messages() + .attachments() + .get(userId="me", messageId=msg_id, id=attachment_id) + .execute() ) return attachment.get("data") except Exception: @@ -306,13 +304,11 @@ class GmailBase(Block, ABC): async def download_attachment(self, service, message_id: str, attachment_id: str): attachment = await asyncio.to_thread( - lambda: ( - service.users() - .messages() - .attachments() - .get(userId="me", messageId=message_id, id=attachment_id) - .execute() - ) + lambda: service.users() + .messages() + .attachments() + .get(userId="me", messageId=message_id, id=attachment_id) + .execute() ) file_data = base64.urlsafe_b64decode(attachment["data"].encode("UTF-8")) return file_data @@ -470,12 +466,10 @@ class GmailReadBlock(GmailBase): else "full" ) msg = await asyncio.to_thread( - lambda: ( - service.users() - .messages() - .get(userId="me", id=message["id"], format=format_type) - .execute() - ) + lambda: service.users() + .messages() + .get(userId="me", id=message["id"], format=format_type) + .execute() ) headers = { @@ -608,12 +602,10 @@ class GmailSendBlock(GmailBase): ) raw_message = await create_mime_message(input_data, execution_context) sent_message = await asyncio.to_thread( - lambda: ( - service.users() - .messages() - .send(userId="me", body={"raw": raw_message}) - .execute() - ) + lambda: service.users() + .messages() + .send(userId="me", body={"raw": raw_message}) + .execute() ) return {"id": sent_message["id"], "status": "sent"} @@ -707,13 +699,8 @@ class GmailCreateDraftBlock(GmailBase): input_data, execution_context, ) - yield ( - "result", - GmailDraftResult( - id=result["id"], - message_id=result["message"]["id"], - status="draft_created", - ), + yield "result", GmailDraftResult( + id=result["id"], message_id=result["message"]["id"], status="draft_created" ) async def _create_draft( @@ -726,12 +713,10 @@ class GmailCreateDraftBlock(GmailBase): raw_message = await create_mime_message(input_data, execution_context) draft = await asyncio.to_thread( - lambda: ( - service.users() - .drafts() - .create(userId="me", body={"message": {"raw": raw_message}}) - .execute() - ) + lambda: service.users() + .drafts() + .create(userId="me", body={"message": {"raw": raw_message}}) + .execute() ) return draft @@ -855,12 +840,10 @@ class GmailAddLabelBlock(GmailBase): async def _add_label(self, service, message_id: str, label_name: str) -> dict: label_id = await self._get_or_create_label(service, label_name) result = await asyncio.to_thread( - lambda: ( - service.users() - .messages() - .modify(userId="me", id=message_id, body={"addLabelIds": [label_id]}) - .execute() - ) + lambda: service.users() + .messages() + .modify(userId="me", id=message_id, body={"addLabelIds": [label_id]}) + .execute() ) if not result.get("labelIds"): return { @@ -874,12 +857,10 @@ class GmailAddLabelBlock(GmailBase): label_id = await self._get_label_id(service, label_name) if not label_id: label = await asyncio.to_thread( - lambda: ( - service.users() - .labels() - .create(userId="me", body={"name": label_name}) - .execute() - ) + lambda: service.users() + .labels() + .create(userId="me", body={"name": label_name}) + .execute() ) label_id = label["id"] return label_id @@ -946,14 +927,10 @@ class GmailRemoveLabelBlock(GmailBase): label_id = await self._get_label_id(service, label_name) if label_id: result = await asyncio.to_thread( - lambda: ( - service.users() - .messages() - .modify( - userId="me", id=message_id, body={"removeLabelIds": [label_id]} - ) - .execute() - ) + lambda: service.users() + .messages() + .modify(userId="me", id=message_id, body={"removeLabelIds": [label_id]}) + .execute() ) if not result.get("labelIds"): return { @@ -1071,12 +1048,10 @@ class GmailGetThreadBlock(GmailBase): else "full" ) thread = await asyncio.to_thread( - lambda: ( - service.users() - .threads() - .get(userId="me", id=thread_id, format=format_type) - .execute() - ) + lambda: service.users() + .threads() + .get(userId="me", id=thread_id, format=format_type) + .execute() ) parsed_messages = [] @@ -1131,25 +1106,23 @@ async def _build_reply_message( """ # Get parent message for reply context parent = await asyncio.to_thread( - lambda: ( - service.users() - .messages() - .get( - userId="me", - id=input_data.parentMessageId, - format="metadata", - metadataHeaders=[ - "Subject", - "References", - "Message-ID", - "From", - "To", - "Cc", - "Reply-To", - ], - ) - .execute() + lambda: service.users() + .messages() + .get( + userId="me", + id=input_data.parentMessageId, + format="metadata", + metadataHeaders=[ + "Subject", + "References", + "Message-ID", + "From", + "To", + "Cc", + "Reply-To", + ], ) + .execute() ) # Build headers dictionary, preserving all values for duplicate headers @@ -1373,12 +1346,10 @@ class GmailReplyBlock(GmailBase): # Send the message return await asyncio.to_thread( - lambda: ( - service.users() - .messages() - .send(userId="me", body={"threadId": thread_id, "raw": raw}) - .execute() - ) + lambda: service.users() + .messages() + .send(userId="me", body={"threadId": thread_id, "raw": raw}) + .execute() ) @@ -1488,20 +1459,18 @@ class GmailDraftReplyBlock(GmailBase): # Create draft with proper thread association draft = await asyncio.to_thread( - lambda: ( - service.users() - .drafts() - .create( - userId="me", - body={ - "message": { - "threadId": thread_id, - "raw": raw, - } - }, - ) - .execute() + lambda: service.users() + .drafts() + .create( + userId="me", + body={ + "message": { + "threadId": thread_id, + "raw": raw, + } + }, ) + .execute() ) return draft @@ -1673,12 +1642,10 @@ class GmailForwardBlock(GmailBase): # Get the original message original = await asyncio.to_thread( - lambda: ( - service.users() - .messages() - .get(userId="me", id=input_data.messageId, format="full") - .execute() - ) + lambda: service.users() + .messages() + .get(userId="me", id=input_data.messageId, format="full") + .execute() ) headers = { @@ -1768,10 +1735,8 @@ To: {original_to} # Send the forwarded message raw = base64.urlsafe_b64encode(msg.as_bytes()).decode("utf-8") return await asyncio.to_thread( - lambda: ( - service.users() - .messages() - .send(userId="me", body={"raw": raw}) - .execute() - ) + lambda: service.users() + .messages() + .send(userId="me", body={"raw": raw}) + .execute() ) diff --git a/autogpt_platform/backend/backend/blocks/google/sheets.py b/autogpt_platform/backend/backend/blocks/google/sheets.py index b764bdcf83..da541d3bf5 100644 --- a/autogpt_platform/backend/backend/blocks/google/sheets.py +++ b/autogpt_platform/backend/backend/blocks/google/sheets.py @@ -345,17 +345,14 @@ class GoogleSheetsReadBlock(Block): ) yield "result", data # Output the GoogleDriveFile for chaining (preserves credentials_id) - yield ( - "spreadsheet", - GoogleDriveFile( - id=spreadsheet_id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{spreadsheet_id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=spreadsheet_id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{spreadsheet_id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", _handle_sheets_api_error(str(e), "read") @@ -469,12 +466,9 @@ class GoogleSheetsWriteBlock(Block): if validation_error: # Customize message for write operations on CSV files if "CSV file" in validation_error: - yield ( - "error", - validation_error.replace( - "Please use a CSV reader block instead, or", - "CSV files are read-only through Google Drive. Please", - ), + yield "error", validation_error.replace( + "Please use a CSV reader block instead, or", + "CSV files are read-only through Google Drive. Please", ) else: yield "error", validation_error @@ -491,17 +485,14 @@ class GoogleSheetsWriteBlock(Block): ) yield "result", result # Output the GoogleDriveFile for chaining (preserves credentials_id) - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", _handle_sheets_api_error(str(e), "write") @@ -623,17 +614,14 @@ class GoogleSheetsAppendRowBlock(Block): input_data.value_input_option, ) yield "result", result - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to append row: {str(e)}" @@ -756,17 +744,14 @@ class GoogleSheetsClearBlock(Block): ) yield "result", result # Output the GoogleDriveFile for chaining (preserves credentials_id) - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to clear Google Sheet range: {str(e)}" @@ -869,17 +854,14 @@ class GoogleSheetsMetadataBlock(Block): ) yield "result", result # Output the GoogleDriveFile for chaining (preserves credentials_id) - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to get spreadsheet metadata: {str(e)}" @@ -1002,17 +984,14 @@ class GoogleSheetsManageSheetBlock(Block): ) yield "result", result # Output the GoogleDriveFile for chaining (preserves credentials_id) - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to manage sheet: {str(e)}" @@ -1162,17 +1141,14 @@ class GoogleSheetsBatchOperationsBlock(Block): ) yield "result", result # Output the GoogleDriveFile for chaining (preserves credentials_id) - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to perform batch operations: {str(e)}" @@ -1330,17 +1306,14 @@ class GoogleSheetsFindReplaceBlock(Block): ) yield "result", result # Output the GoogleDriveFile for chaining (preserves credentials_id) - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to find/replace in Google Sheet: {str(e)}" @@ -1515,17 +1488,14 @@ class GoogleSheetsFindBlock(Block): yield "locations", result["locations"] yield "result", {"success": True} # Output the GoogleDriveFile for chaining (preserves credentials_id) - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to find text in Google Sheet: {str(e)}" @@ -1784,17 +1754,14 @@ class GoogleSheetsFormatBlock(Block): else: yield "result", result # Output the GoogleDriveFile for chaining (preserves credentials_id) - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to format Google Sheet cells: {str(e)}" @@ -1961,17 +1928,14 @@ class GoogleSheetsCreateSpreadsheetBlock(Block): spreadsheet_id = result["spreadsheetId"] spreadsheet_url = result["spreadsheetUrl"] # Output the GoogleDriveFile for chaining (includes credentials_id) - yield ( - "spreadsheet", - GoogleDriveFile( - id=spreadsheet_id, - name=result.get("title", input_data.title), - mimeType="application/vnd.google-apps.spreadsheet", - url=spreadsheet_url, - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.credentials.id, # Preserve credentials for chaining - ), + yield "spreadsheet", GoogleDriveFile( + id=spreadsheet_id, + name=result.get("title", input_data.title), + mimeType="application/vnd.google-apps.spreadsheet", + url=spreadsheet_url, + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.credentials.id, # Preserve credentials for chaining ) yield "spreadsheet_id", spreadsheet_id yield "spreadsheet_url", spreadsheet_url @@ -2149,17 +2113,14 @@ class GoogleSheetsUpdateCellBlock(Block): yield "result", result # Output the GoogleDriveFile for chaining (preserves credentials_id) - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", _handle_sheets_api_error(str(e), "update") @@ -2418,17 +2379,14 @@ class GoogleSheetsFilterRowsBlock(Block): yield "rows", result["rows"] yield "row_indices", result["row_indices"] yield "count", result["count"] - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to filter rows: {str(e)}" @@ -2638,17 +2596,14 @@ class GoogleSheetsLookupRowBlock(Block): yield "row_dict", result["row_dict"] yield "row_index", result["row_index"] yield "found", result["found"] - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to lookup row: {str(e)}" @@ -2862,17 +2817,14 @@ class GoogleSheetsDeleteRowsBlock(Block): ) yield "result", {"success": True} yield "deleted_count", result["deleted_count"] - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to delete rows: {str(e)}" @@ -3043,17 +2995,14 @@ class GoogleSheetsGetColumnBlock(Block): yield "values", result["values"] yield "count", result["count"] yield "column_index", result["column_index"] - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to get column: {str(e)}" @@ -3227,17 +3176,14 @@ class GoogleSheetsSortBlock(Block): input_data.has_header, ) yield "result", result - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to sort sheet: {str(e)}" @@ -3493,17 +3439,14 @@ class GoogleSheetsGetUniqueValuesBlock(Block): yield "values", result["values"] yield "counts", result["counts"] yield "total_unique", result["total_unique"] - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to get unique values: {str(e)}" @@ -3677,17 +3620,14 @@ class GoogleSheetsInsertRowBlock(Block): input_data.value_input_option, ) yield "result", result - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to insert row: {str(e)}" @@ -3853,17 +3793,14 @@ class GoogleSheetsAddColumnBlock(Block): yield "result", {"success": True} yield "column_letter", result["column_letter"] yield "column_index", result["column_index"] - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to add column: {str(e)}" @@ -4061,17 +3998,14 @@ class GoogleSheetsGetRowCountBlock(Block): yield "data_rows", result["data_rows"] yield "last_row", result["last_row"] yield "column_count", result["column_count"] - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to get row count: {str(e)}" @@ -4242,17 +4176,14 @@ class GoogleSheetsRemoveDuplicatesBlock(Block): yield "result", {"success": True} yield "removed_count", result["removed_count"] yield "remaining_rows", result["remaining_rows"] - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to remove duplicates: {str(e)}" @@ -4495,17 +4426,14 @@ class GoogleSheetsUpdateRowBlock(Block): input_data.dict_values, ) yield "result", result - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to update row: {str(e)}" @@ -4687,17 +4615,14 @@ class GoogleSheetsGetRowBlock(Block): ) yield "row", result["row"] yield "row_dict", result["row_dict"] - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to get row: {str(e)}" @@ -4828,17 +4753,14 @@ class GoogleSheetsDeleteColumnBlock(Block): input_data.column, ) yield "result", result - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to delete column: {str(e)}" @@ -5009,17 +4931,14 @@ class GoogleSheetsCreateNamedRangeBlock(Block): ) yield "result", {"success": True} yield "named_range_id", result["named_range_id"] - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to create named range: {str(e)}" @@ -5185,17 +5104,14 @@ class GoogleSheetsListNamedRangesBlock(Block): ) yield "named_ranges", result["named_ranges"] yield "count", result["count"] - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to list named ranges: {str(e)}" @@ -5348,17 +5264,14 @@ class GoogleSheetsAddDropdownBlock(Block): input_data.show_dropdown, ) yield "result", result - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to add dropdown: {str(e)}" @@ -5523,17 +5436,14 @@ class GoogleSheetsCopyToSpreadsheetBlock(Block): yield "result", {"success": True} yield "new_sheet_id", result["new_sheet_id"] yield "new_sheet_name", result["new_sheet_name"] - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.source_spreadsheet.id, - name=input_data.source_spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.source_spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.source_spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.source_spreadsheet.id, + name=input_data.source_spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.source_spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.source_spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to copy sheet: {str(e)}" @@ -5678,17 +5588,14 @@ class GoogleSheetsProtectRangeBlock(Block): ) yield "result", {"success": True} yield "protection_id", result["protection_id"] - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to protect range: {str(e)}" @@ -5845,17 +5752,14 @@ class GoogleSheetsExportCsvBlock(Block): ) yield "csv_data", result["csv_data"] yield "row_count", result["row_count"] - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to export CSV: {str(e)}" @@ -5991,17 +5895,14 @@ class GoogleSheetsImportCsvBlock(Block): ) yield "result", {"success": True} yield "rows_imported", result["rows_imported"] - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to import CSV: {str(e)}" @@ -6131,17 +6032,14 @@ class GoogleSheetsAddNoteBlock(Block): input_data.note, ) yield "result", {"success": True} - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to add note: {str(e)}" @@ -6287,17 +6185,14 @@ class GoogleSheetsGetNotesBlock(Block): notes = result["notes"] yield "notes", notes yield "count", len(notes) - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to get notes: {str(e)}" @@ -6452,17 +6347,14 @@ class GoogleSheetsShareSpreadsheetBlock(Block): ) yield "result", {"success": True} yield "share_link", result["share_link"] - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to share spreadsheet: {str(e)}" @@ -6599,17 +6491,14 @@ class GoogleSheetsSetPublicAccessBlock(Block): ) yield "result", {"success": True, "is_public": result["is_public"]} yield "share_link", result["share_link"] - yield ( - "spreadsheet", - GoogleDriveFile( - id=input_data.spreadsheet.id, - name=input_data.spreadsheet.name, - mimeType="application/vnd.google-apps.spreadsheet", - url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", - iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", - isFolder=False, - _credentials_id=input_data.spreadsheet.credentials_id, - ), + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: yield "error", f"Failed to set public access: {str(e)}" diff --git a/autogpt_platform/backend/backend/blocks/ideogram.py b/autogpt_platform/backend/backend/blocks/ideogram.py index 582ae7e9ee..09a384c74a 100644 --- a/autogpt_platform/backend/backend/blocks/ideogram.py +++ b/autogpt_platform/backend/backend/blocks/ideogram.py @@ -195,12 +195,8 @@ class IdeogramModelBlock(Block): ), ], test_mock={ - "run_model": lambda api_key, model_name, prompt, seed, aspect_ratio, magic_prompt_option, style_type, negative_prompt, color_palette_name, custom_colors: ( - "https://ideogram.ai/api/images/test-generated-image-url.png" - ), - "upscale_image": lambda api_key, image_url: ( - "https://ideogram.ai/api/images/test-upscaled-image-url.png" - ), + "run_model": lambda api_key, model_name, prompt, seed, aspect_ratio, magic_prompt_option, style_type, negative_prompt, color_palette_name, custom_colors: "https://ideogram.ai/api/images/test-generated-image-url.png", + "upscale_image": lambda api_key, image_url: "https://ideogram.ai/api/images/test-upscaled-image-url.png", }, test_credentials=TEST_CREDENTIALS, ) diff --git a/autogpt_platform/backend/backend/blocks/io.py b/autogpt_platform/backend/backend/blocks/io.py index 0e7e5922c6..a9c3859490 100644 --- a/autogpt_platform/backend/backend/blocks/io.py +++ b/autogpt_platform/backend/backend/blocks/io.py @@ -210,11 +210,8 @@ class AgentOutputBlock(Block): if input_data.format: try: formatter = TextFormatter(autoescape=input_data.escape_html) - yield ( - "output", - formatter.format_string( - input_data.format, {input_data.name: input_data.value} - ), + yield "output", formatter.format_string( + input_data.format, {input_data.name: input_data.value} ) except Exception as e: yield "output", f"Error: {e}, {input_data.value}" @@ -477,13 +474,10 @@ class AgentFileInputBlock(AgentInputBlock): # for_block_output: smart format - workspace:// in CoPilot, data URI in graphs return_format = "for_external_api" if input_data.base_64 else "for_block_output" - yield ( - "result", - await store_media_file( - file=input_data.value, - execution_context=execution_context, - return_format=return_format, - ), + yield "result", await store_media_file( + file=input_data.value, + execution_context=execution_context, + return_format=return_format, ) diff --git a/autogpt_platform/backend/backend/blocks/linear/_api.py b/autogpt_platform/backend/backend/blocks/linear/_api.py index 33f7f26546..ea609d515a 100644 --- a/autogpt_platform/backend/backend/blocks/linear/_api.py +++ b/autogpt_platform/backend/backend/blocks/linear/_api.py @@ -75,6 +75,7 @@ class LinearClient: response_data = response.json() if "errors" in response_data: + error_messages = [ error.get("message", "") for error in response_data["errors"] ] diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py index fa4a245fc1..7a020593d7 100644 --- a/autogpt_platform/backend/backend/blocks/llm.py +++ b/autogpt_platform/backend/backend/blocks/llm.py @@ -692,6 +692,7 @@ async def llm_call( reasoning=reasoning, ) elif provider == "anthropic": + an_tools = convert_openai_tool_fmt_to_anthropic(tools) system_messages = [p["content"] for p in prompt if p["role"] == "system"] diff --git a/autogpt_platform/backend/backend/blocks/persistence.py b/autogpt_platform/backend/backend/blocks/persistence.py index 15d063d79e..a327fd22c7 100644 --- a/autogpt_platform/backend/backend/blocks/persistence.py +++ b/autogpt_platform/backend/backend/blocks/persistence.py @@ -75,14 +75,11 @@ class PersistInformationBlock(Block): storage_key = get_storage_key(input_data.key, input_data.scope, graph_id) # Store the data - yield ( - "value", - await self._store_data( - user_id=user_id, - node_exec_id=node_exec_id, - key=storage_key, - data=input_data.value, - ), + yield "value", await self._store_data( + user_id=user_id, + node_exec_id=node_exec_id, + key=storage_key, + data=input_data.value, ) async def _store_data( diff --git a/autogpt_platform/backend/backend/blocks/pinecone.py b/autogpt_platform/backend/backend/blocks/pinecone.py index eb6a712543..878f6f72fb 100644 --- a/autogpt_platform/backend/backend/blocks/pinecone.py +++ b/autogpt_platform/backend/backend/blocks/pinecone.py @@ -160,13 +160,10 @@ class PineconeQueryBlock(Block): combined_text = "\n\n".join(texts) # Return both the raw matches and combined text - yield ( - "results", - { - "matches": results["matches"], - "combined_text": combined_text, - }, - ) + yield "results", { + "matches": results["matches"], + "combined_text": combined_text, + } yield "combined_results", combined_text except Exception as e: diff --git a/autogpt_platform/backend/backend/blocks/reddit.py b/autogpt_platform/backend/backend/blocks/reddit.py index 97479e2b59..1109d568db 100644 --- a/autogpt_platform/backend/backend/blocks/reddit.py +++ b/autogpt_platform/backend/backend/blocks/reddit.py @@ -309,13 +309,10 @@ class PostRedditCommentBlock(Block): async def run( self, input_data: Input, *, credentials: RedditCredentials, **kwargs ) -> BlockOutput: - yield ( - "comment_id", - self.reply_post( - credentials, - post_id=input_data.post_id, - comment=input_data.comment, - ), + yield "comment_id", self.reply_post( + credentials, + post_id=input_data.post_id, + comment=input_data.comment, ) yield "post_id", input_data.post_id diff --git a/autogpt_platform/backend/backend/blocks/replicate/flux_advanced.py b/autogpt_platform/backend/backend/blocks/replicate/flux_advanced.py index 11429edc32..c112ce75c4 100644 --- a/autogpt_platform/backend/backend/blocks/replicate/flux_advanced.py +++ b/autogpt_platform/backend/backend/blocks/replicate/flux_advanced.py @@ -141,9 +141,7 @@ class ReplicateFluxAdvancedModelBlock(Block): ), ], test_mock={ - "run_model": lambda api_key, model_name, prompt, seed, steps, guidance, interval, aspect_ratio, output_format, output_quality, safety_tolerance: ( - "https://replicate.com/output/generated-image-url.jpg" - ), + "run_model": lambda api_key, model_name, prompt, seed, steps, guidance, interval, aspect_ratio, output_format, output_quality, safety_tolerance: "https://replicate.com/output/generated-image-url.jpg", }, test_credentials=TEST_CREDENTIALS, ) diff --git a/autogpt_platform/backend/backend/blocks/slant3d/base.py b/autogpt_platform/backend/backend/blocks/slant3d/base.py index 9e39f7d01c..e368a1b451 100644 --- a/autogpt_platform/backend/backend/blocks/slant3d/base.py +++ b/autogpt_platform/backend/backend/blocks/slant3d/base.py @@ -48,7 +48,7 @@ class Slant3DBlockBase(Block): raise ValueError( f"""Invalid color profile combination {color_tag}. Valid colors for {profile.value} are: -{",".join([filament["colorTag"].replace(profile.value.lower(), "") for filament in response["filaments"] if filament["profile"] == profile.value])} +{','.join([filament['colorTag'].replace(profile.value.lower(), '') for filament in response['filaments'] if filament['profile'] == profile.value])} """ ) return color_tag diff --git a/autogpt_platform/backend/backend/blocks/smart_decision_maker.py b/autogpt_platform/backend/backend/blocks/smart_decision_maker.py index de45254b97..ff6042eaab 100644 --- a/autogpt_platform/backend/backend/blocks/smart_decision_maker.py +++ b/autogpt_platform/backend/backend/blocks/smart_decision_maker.py @@ -933,10 +933,7 @@ class SmartDecisionMakerBlock(Block): credentials, input_data, iteration_prompt, tool_functions ) except Exception as e: - yield ( - "error", - f"LLM call failed in agent mode iteration {iteration}: {str(e)}", - ) + yield "error", f"LLM call failed in agent mode iteration {iteration}: {str(e)}" return # Process tool calls @@ -976,10 +973,7 @@ class SmartDecisionMakerBlock(Block): if max_iterations < 0: yield "finished", f"Agent mode completed after {iteration} iterations" else: - yield ( - "finished", - f"Agent mode completed after {max_iterations} iterations (limit reached)", - ) + yield "finished", f"Agent mode completed after {max_iterations} iterations (limit reached)" yield "conversations", current_prompt async def run( diff --git a/autogpt_platform/backend/backend/blocks/smartlead/campaign.py b/autogpt_platform/backend/backend/blocks/smartlead/campaign.py index b86bc6326f..c3bf930068 100644 --- a/autogpt_platform/backend/backend/blocks/smartlead/campaign.py +++ b/autogpt_platform/backend/backend/blocks/smartlead/campaign.py @@ -180,22 +180,20 @@ class AddLeadToCampaignBlock(Block): ), ], test_mock={ - "add_leads_to_campaign": lambda campaign_id, lead_list, credentials: ( - AddLeadsToCampaignResponse( - ok=True, - upload_count=1, - already_added_to_campaign=0, - duplicate_count=0, - invalid_email_count=0, - is_lead_limit_exhausted=False, - lead_import_stopped_count=0, - error="", - total_leads=1, - block_count=0, - invalid_emails=[], - unsubscribed_leads=[], - bounce_count=0, - ) + "add_leads_to_campaign": lambda campaign_id, lead_list, credentials: AddLeadsToCampaignResponse( + ok=True, + upload_count=1, + already_added_to_campaign=0, + duplicate_count=0, + invalid_email_count=0, + is_lead_limit_exhausted=False, + lead_import_stopped_count=0, + error="", + total_leads=1, + block_count=0, + invalid_emails=[], + unsubscribed_leads=[], + bounce_count=0, ) }, ) @@ -297,11 +295,9 @@ class SaveCampaignSequencesBlock(Block): ), ], test_mock={ - "save_campaign_sequences": lambda campaign_id, sequences, credentials: ( - SaveSequencesResponse( - ok=True, - message="Sequences saved successfully", - ) + "save_campaign_sequences": lambda campaign_id, sequences, credentials: SaveSequencesResponse( + ok=True, + message="Sequences saved successfully", ) }, ) diff --git a/autogpt_platform/backend/backend/blocks/stagehand/blocks.py b/autogpt_platform/backend/backend/blocks/stagehand/blocks.py index 61ed3e8a97..91c096ffe4 100644 --- a/autogpt_platform/backend/backend/blocks/stagehand/blocks.py +++ b/autogpt_platform/backend/backend/blocks/stagehand/blocks.py @@ -97,9 +97,9 @@ class StagehandRecommendedLlmModel(str, Enum): if len(model_name.split("/")) == 1 and not self.value.startswith( model_metadata.provider ): - assert model_metadata.provider != "open_router", ( - "Logic failed and open_router provider attempted to be prepended to model name! in stagehand/_config.py" - ) + assert ( + model_metadata.provider != "open_router" + ), "Logic failed and open_router provider attempted to be prepended to model name! in stagehand/_config.py" model_name = f"{model_metadata.provider}/{model_name}" logger.error(f"Model name: {model_name}") diff --git a/autogpt_platform/backend/backend/blocks/test/test_block.py b/autogpt_platform/backend/backend/blocks/test/test_block.py index 612323f622..7a1fdbcc73 100644 --- a/autogpt_platform/backend/backend/blocks/test/test_block.py +++ b/autogpt_platform/backend/backend/blocks/test/test_block.py @@ -128,9 +128,9 @@ async def test_block_ids_valid(block: Type[Block]): try: parsed_uuid = uuid.UUID(block_instance.id) # Verify it's specifically UUID version 4 - assert parsed_uuid.version == 4, ( - f"Block {block.name} ID is UUID version {parsed_uuid.version}, expected version 4" - ) + assert ( + parsed_uuid.version == 4 + ), f"Block {block.name} ID is UUID version {parsed_uuid.version}, expected version 4" except ValueError: pytest.fail(f"Block {block.name} has invalid UUID format: {block_instance.id}") diff --git a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker.py b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker.py index 68ac940e79..0f9da7e10b 100644 --- a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker.py +++ b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker.py @@ -174,9 +174,9 @@ async def test_smart_decision_maker_function_signature(server: SpinTestServer): ) assert tool_functions is not None, "Tool functions should not be None" - assert len(tool_functions) == 2, ( - f"Expected 2 tool functions, got {len(tool_functions)}" - ) + assert ( + len(tool_functions) == 2 + ), f"Expected 2 tool functions, got {len(tool_functions)}" # Check the first tool function (testgraph) assert tool_functions[0]["type"] == "function" @@ -219,19 +219,17 @@ async def test_smart_decision_maker_tracks_llm_stats(): # Mock the _create_tool_node_signatures method to avoid database calls - with ( - patch( - "backend.blocks.llm.llm_call", - new_callable=AsyncMock, - return_value=mock_response, - ), - patch.object( - SmartDecisionMakerBlock, - "_create_tool_node_signatures", - new_callable=AsyncMock, - return_value=[], - ), + with patch( + "backend.blocks.llm.llm_call", + new_callable=AsyncMock, + return_value=mock_response, + ), patch.object( + SmartDecisionMakerBlock, + "_create_tool_node_signatures", + new_callable=AsyncMock, + return_value=[], ): + # Create test input input_data = SmartDecisionMakerBlock.Input( prompt="Should I continue with this task?", @@ -324,19 +322,17 @@ async def test_smart_decision_maker_parameter_validation(): mock_response_with_typo.reasoning = None mock_response_with_typo.raw_response = {"role": "assistant", "content": None} - with ( - patch( - "backend.blocks.llm.llm_call", - new_callable=AsyncMock, - return_value=mock_response_with_typo, - ) as mock_llm_call, - patch.object( - SmartDecisionMakerBlock, - "_create_tool_node_signatures", - new_callable=AsyncMock, - return_value=mock_tool_functions, - ), + with patch( + "backend.blocks.llm.llm_call", + new_callable=AsyncMock, + return_value=mock_response_with_typo, + ) as mock_llm_call, patch.object( + SmartDecisionMakerBlock, + "_create_tool_node_signatures", + new_callable=AsyncMock, + return_value=mock_tool_functions, ): + input_data = SmartDecisionMakerBlock.Input( prompt="Search for keywords", model=llm_module.DEFAULT_LLM_MODEL, @@ -393,19 +389,17 @@ async def test_smart_decision_maker_parameter_validation(): mock_response_missing_required.reasoning = None mock_response_missing_required.raw_response = {"role": "assistant", "content": None} - with ( - patch( - "backend.blocks.llm.llm_call", - new_callable=AsyncMock, - return_value=mock_response_missing_required, - ), - patch.object( - SmartDecisionMakerBlock, - "_create_tool_node_signatures", - new_callable=AsyncMock, - return_value=mock_tool_functions, - ), + with patch( + "backend.blocks.llm.llm_call", + new_callable=AsyncMock, + return_value=mock_response_missing_required, + ), patch.object( + SmartDecisionMakerBlock, + "_create_tool_node_signatures", + new_callable=AsyncMock, + return_value=mock_tool_functions, ): + input_data = SmartDecisionMakerBlock.Input( prompt="Search for keywords", model=llm_module.DEFAULT_LLM_MODEL, @@ -455,19 +449,17 @@ async def test_smart_decision_maker_parameter_validation(): mock_response_valid.reasoning = None mock_response_valid.raw_response = {"role": "assistant", "content": None} - with ( - patch( - "backend.blocks.llm.llm_call", - new_callable=AsyncMock, - return_value=mock_response_valid, - ), - patch.object( - SmartDecisionMakerBlock, - "_create_tool_node_signatures", - new_callable=AsyncMock, - return_value=mock_tool_functions, - ), + with patch( + "backend.blocks.llm.llm_call", + new_callable=AsyncMock, + return_value=mock_response_valid, + ), patch.object( + SmartDecisionMakerBlock, + "_create_tool_node_signatures", + new_callable=AsyncMock, + return_value=mock_tool_functions, ): + input_data = SmartDecisionMakerBlock.Input( prompt="Search for keywords", model=llm_module.DEFAULT_LLM_MODEL, @@ -521,19 +513,17 @@ async def test_smart_decision_maker_parameter_validation(): mock_response_all_params.reasoning = None mock_response_all_params.raw_response = {"role": "assistant", "content": None} - with ( - patch( - "backend.blocks.llm.llm_call", - new_callable=AsyncMock, - return_value=mock_response_all_params, - ), - patch.object( - SmartDecisionMakerBlock, - "_create_tool_node_signatures", - new_callable=AsyncMock, - return_value=mock_tool_functions, - ), + with patch( + "backend.blocks.llm.llm_call", + new_callable=AsyncMock, + return_value=mock_response_all_params, + ), patch.object( + SmartDecisionMakerBlock, + "_create_tool_node_signatures", + new_callable=AsyncMock, + return_value=mock_tool_functions, ): + input_data = SmartDecisionMakerBlock.Input( prompt="Search for keywords", model=llm_module.DEFAULT_LLM_MODEL, @@ -644,14 +634,13 @@ async def test_smart_decision_maker_raw_response_conversion(): # Mock llm_call to return different responses on different calls - with ( - patch("backend.blocks.llm.llm_call", new_callable=AsyncMock) as mock_llm_call, - patch.object( - SmartDecisionMakerBlock, - "_create_tool_node_signatures", - new_callable=AsyncMock, - return_value=mock_tool_functions, - ), + with patch( + "backend.blocks.llm.llm_call", new_callable=AsyncMock + ) as mock_llm_call, patch.object( + SmartDecisionMakerBlock, + "_create_tool_node_signatures", + new_callable=AsyncMock, + return_value=mock_tool_functions, ): # First call returns response that will trigger retry due to validation error # Second call returns successful response @@ -721,18 +710,15 @@ async def test_smart_decision_maker_raw_response_conversion(): "I'll help you with that." # Ollama returns string ) - with ( - patch( - "backend.blocks.llm.llm_call", - new_callable=AsyncMock, - return_value=mock_response_ollama, - ), - patch.object( - SmartDecisionMakerBlock, - "_create_tool_node_signatures", - new_callable=AsyncMock, - return_value=[], # No tools for this test - ), + with patch( + "backend.blocks.llm.llm_call", + new_callable=AsyncMock, + return_value=mock_response_ollama, + ), patch.object( + SmartDecisionMakerBlock, + "_create_tool_node_signatures", + new_callable=AsyncMock, + return_value=[], # No tools for this test ): input_data = SmartDecisionMakerBlock.Input( prompt="Simple prompt", @@ -780,18 +766,15 @@ async def test_smart_decision_maker_raw_response_conversion(): "content": "Test response", } # Dict format - with ( - patch( - "backend.blocks.llm.llm_call", - new_callable=AsyncMock, - return_value=mock_response_dict, - ), - patch.object( - SmartDecisionMakerBlock, - "_create_tool_node_signatures", - new_callable=AsyncMock, - return_value=[], - ), + with patch( + "backend.blocks.llm.llm_call", + new_callable=AsyncMock, + return_value=mock_response_dict, + ), patch.object( + SmartDecisionMakerBlock, + "_create_tool_node_signatures", + new_callable=AsyncMock, + return_value=[], ): input_data = SmartDecisionMakerBlock.Input( prompt="Another test", @@ -907,21 +890,18 @@ async def test_smart_decision_maker_agent_mode(): # No longer need mock_execute_node since we use execution_processor.on_node_execution - with ( - patch("backend.blocks.llm.llm_call", llm_call_mock), - patch.object( - block, "_create_tool_node_signatures", return_value=mock_tool_signatures - ), - patch( - "backend.blocks.smart_decision_maker.get_database_manager_async_client", - return_value=mock_db_client, - ), - patch( - "backend.executor.manager.async_update_node_execution_status", - new_callable=AsyncMock, - ), - patch("backend.integrations.creds_manager.IntegrationCredentialsManager"), + with patch("backend.blocks.llm.llm_call", llm_call_mock), patch.object( + block, "_create_tool_node_signatures", return_value=mock_tool_signatures + ), patch( + "backend.blocks.smart_decision_maker.get_database_manager_async_client", + return_value=mock_db_client, + ), patch( + "backend.executor.manager.async_update_node_execution_status", + new_callable=AsyncMock, + ), patch( + "backend.integrations.creds_manager.IntegrationCredentialsManager" ): + # Create a mock execution context mock_execution_context = ExecutionContext( @@ -1029,16 +1009,14 @@ async def test_smart_decision_maker_traditional_mode_default(): } ] - with ( - patch( - "backend.blocks.llm.llm_call", - new_callable=AsyncMock, - return_value=mock_response, - ), - patch.object( - block, "_create_tool_node_signatures", return_value=mock_tool_signatures - ), + with patch( + "backend.blocks.llm.llm_call", + new_callable=AsyncMock, + return_value=mock_response, + ), patch.object( + block, "_create_tool_node_signatures", return_value=mock_tool_signatures ): + # Test default behavior (traditional mode) input_data = SmartDecisionMakerBlock.Input( prompt="Test prompt", diff --git a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dict.py b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dict.py index 8039376c51..2087c0b7d6 100644 --- a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dict.py +++ b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dict.py @@ -41,8 +41,7 @@ async def test_smart_decision_maker_handles_dynamic_dict_fields(): # Generate function signature signature = await SmartDecisionMakerBlock._create_block_function_signature( - mock_node, - mock_links, # type: ignore + mock_node, mock_links # type: ignore ) # Verify the signature was created successfully @@ -99,8 +98,7 @@ async def test_smart_decision_maker_handles_dynamic_list_fields(): # Generate function signature signature = await SmartDecisionMakerBlock._create_block_function_signature( - mock_node, - mock_links, # type: ignore + mock_node, mock_links # type: ignore ) # Verify dynamic list fields are handled properly diff --git a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dynamic_fields.py b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dynamic_fields.py index 102e4e65d8..0427b13466 100644 --- a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dynamic_fields.py +++ b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dynamic_fields.py @@ -314,14 +314,11 @@ async def test_output_yielding_with_dynamic_fields(): mock_llm.return_value = mock_response # Mock the database manager to avoid HTTP calls during tool execution - with ( - patch( - "backend.blocks.smart_decision_maker.get_database_manager_async_client" - ) as mock_db_manager, - patch.object( - block, "_create_tool_node_signatures", new_callable=AsyncMock - ) as mock_sig, - ): + with patch( + "backend.blocks.smart_decision_maker.get_database_manager_async_client" + ) as mock_db_manager, patch.object( + block, "_create_tool_node_signatures", new_callable=AsyncMock + ) as mock_sig: # Set up the mock database manager mock_db_client = AsyncMock() mock_db_manager.return_value = mock_db_client @@ -670,6 +667,6 @@ async def test_validation_errors_dont_pollute_conversation(): if msg.get("role") == "user" and "parameter errors" in msg.get("content", "") ] - assert len(error_messages) == 0, ( - "Validation error leaked into final conversation" - ) + assert ( + len(error_messages) == 0 + ), "Validation error leaked into final conversation" diff --git a/autogpt_platform/backend/backend/blocks/time_blocks.py b/autogpt_platform/backend/backend/blocks/time_blocks.py index 59c2f17953..3a1f4c678e 100644 --- a/autogpt_platform/backend/backend/blocks/time_blocks.py +++ b/autogpt_platform/backend/backend/blocks/time_blocks.py @@ -275,30 +275,24 @@ class GetCurrentDateBlock(Block): test_output=[ ( "date", - lambda t: ( - abs( - datetime.now().date() - - datetime.strptime(t, "%Y-%m-%d").date() - ) - <= timedelta(days=8) - ), # 7 days difference + 1 day error margin. + lambda t: abs( + datetime.now().date() - datetime.strptime(t, "%Y-%m-%d").date() + ) + <= timedelta(days=8), # 7 days difference + 1 day error margin. ), ( "date", - lambda t: ( - abs( - datetime.now().date() - - datetime.strptime(t, "%m/%d/%Y").date() - ) - <= timedelta(days=8) - ), + lambda t: abs( + datetime.now().date() - datetime.strptime(t, "%m/%d/%Y").date() + ) + <= timedelta(days=8), # 7 days difference + 1 day error margin. ), ( "date", - lambda t: ( - len(t) == 10 and t[4] == "-" and t[7] == "-" - ), # ISO date format YYYY-MM-DD + lambda t: len(t) == 10 + and t[4] == "-" + and t[7] == "-", # ISO date format YYYY-MM-DD ), ], ) @@ -386,32 +380,25 @@ class GetCurrentDateAndTimeBlock(Block): test_output=[ ( "date_time", - lambda t: ( - abs( - datetime.now(tz=ZoneInfo("UTC")) - - datetime.strptime(t + "+00:00", "%Y-%m-%d %H:%M:%S%z") - ) - < timedelta(seconds=10) - ), # 10 seconds error margin. + lambda t: abs( + datetime.now(tz=ZoneInfo("UTC")) + - datetime.strptime(t + "+00:00", "%Y-%m-%d %H:%M:%S%z") + ) + < timedelta(seconds=10), # 10 seconds error margin. ), ( "date_time", - lambda t: ( - abs( - datetime.now().date() - - datetime.strptime(t, "%Y/%m/%d").date() - ) - <= timedelta(days=1) - ), # Date format only, no time component + lambda t: abs( + datetime.now().date() - datetime.strptime(t, "%Y/%m/%d").date() + ) + <= timedelta(days=1), # Date format only, no time component ), ( "date_time", - lambda t: ( - abs( - datetime.now(tz=ZoneInfo("UTC")) - datetime.fromisoformat(t) - ) - < timedelta(seconds=10) - ), # 10 seconds error margin for ISO format. + lambda t: abs( + datetime.now(tz=ZoneInfo("UTC")) - datetime.fromisoformat(t) + ) + < timedelta(seconds=10), # 10 seconds error margin for ISO format. ), ], ) diff --git a/autogpt_platform/backend/backend/blocks/todoist/projects.py b/autogpt_platform/backend/backend/blocks/todoist/projects.py index c9e96e6ee6..c6d345c116 100644 --- a/autogpt_platform/backend/backend/blocks/todoist/projects.py +++ b/autogpt_platform/backend/backend/blocks/todoist/projects.py @@ -160,7 +160,7 @@ class TodoistCreateProjectBlock(Block): test_input={"credentials": TEST_CREDENTIALS_INPUT, "name": "Test Project"}, test_credentials=TEST_CREDENTIALS, test_output=[("success", True)], - test_mock={"create_project": lambda *args, **kwargs: True}, + test_mock={"create_project": lambda *args, **kwargs: (True)}, ) @staticmethod @@ -346,7 +346,7 @@ class TodoistUpdateProjectBlock(Block): }, test_credentials=TEST_CREDENTIALS, test_output=[("success", True)], - test_mock={"update_project": lambda *args, **kwargs: True}, + test_mock={"update_project": lambda *args, **kwargs: (True)}, ) @staticmethod @@ -426,7 +426,7 @@ class TodoistDeleteProjectBlock(Block): }, test_credentials=TEST_CREDENTIALS, test_output=[("success", True)], - test_mock={"delete_project": lambda *args, **kwargs: True}, + test_mock={"delete_project": lambda *args, **kwargs: (True)}, ) @staticmethod diff --git a/autogpt_platform/backend/backend/blocks/todoist/sections.py b/autogpt_platform/backend/backend/blocks/todoist/sections.py index 7024d6243c..52dceb70b9 100644 --- a/autogpt_platform/backend/backend/blocks/todoist/sections.py +++ b/autogpt_platform/backend/backend/blocks/todoist/sections.py @@ -285,7 +285,7 @@ class TodoistDeleteSectionBlock(Block): test_input={"credentials": TEST_CREDENTIALS_INPUT, "section_id": "7025"}, test_credentials=TEST_CREDENTIALS, test_output=[("success", True)], - test_mock={"delete_section": lambda *args, **kwargs: True}, + test_mock={"delete_section": lambda *args, **kwargs: (True)}, ) @staticmethod diff --git a/autogpt_platform/backend/backend/blocks/todoist/tasks.py b/autogpt_platform/backend/backend/blocks/todoist/tasks.py index 450766905a..183a3340b3 100644 --- a/autogpt_platform/backend/backend/blocks/todoist/tasks.py +++ b/autogpt_platform/backend/backend/blocks/todoist/tasks.py @@ -580,7 +580,7 @@ class TodoistReopenTaskBlock(Block): test_output=[ ("success", True), ], - test_mock={"reopen_task": lambda *args, **kwargs: True}, + test_mock={"reopen_task": lambda *args, **kwargs: (True)}, ) @staticmethod @@ -632,7 +632,7 @@ class TodoistDeleteTaskBlock(Block): test_output=[ ("success", True), ], - test_mock={"delete_task": lambda *args, **kwargs: True}, + test_mock={"delete_task": lambda *args, **kwargs: (True)}, ) @staticmethod diff --git a/autogpt_platform/backend/backend/blocks/twitter/_types.py b/autogpt_platform/backend/backend/blocks/twitter/_types.py index 50c452fdd3..88050ed545 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/_types.py +++ b/autogpt_platform/backend/backend/blocks/twitter/_types.py @@ -256,6 +256,7 @@ class ListFieldsFilter(BaseModel): # --------- [Input Types] ------------- class TweetExpansionInputs(BlockSchemaInput): + expansions: ExpansionFilter | None = SchemaField( description="Choose what extra information you want to get with your tweets. For example:\n- Select 'Media_Keys' to get media details\n- Select 'Author_User_ID' to get user information\n- Select 'Place_ID' to get location details", placeholder="Pick the extra information you want to see", diff --git a/autogpt_platform/backend/backend/blocks/twitter/lists/manage_lists.py b/autogpt_platform/backend/backend/blocks/twitter/lists/manage_lists.py index f9d85f6d37..4092fbaa93 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/lists/manage_lists.py +++ b/autogpt_platform/backend/backend/blocks/twitter/lists/manage_lists.py @@ -232,7 +232,7 @@ class TwitterCreateListBlock(Block): ("list_id", "1234567890"), ("url", "https://twitter.com/i/lists/1234567890"), ], - test_mock={"create_list": lambda *args, **kwargs: "1234567890"}, + test_mock={"create_list": lambda *args, **kwargs: ("1234567890")}, ) @staticmethod diff --git a/autogpt_platform/backend/backend/blocks/twitter/tweets/tweet_lookup.py b/autogpt_platform/backend/backend/blocks/twitter/tweets/tweet_lookup.py index e4230aca81..540aa1395f 100644 --- a/autogpt_platform/backend/backend/blocks/twitter/tweets/tweet_lookup.py +++ b/autogpt_platform/backend/backend/blocks/twitter/tweets/tweet_lookup.py @@ -159,6 +159,7 @@ class TwitterGetTweetBlock(Block): **kwargs, ) -> BlockOutput: try: + tweet_data, included, meta, user_id, user_name = self.get_tweet( credentials, input_data.tweet_id, diff --git a/autogpt_platform/backend/backend/blocks/video/narration.py b/autogpt_platform/backend/backend/blocks/video/narration.py index 67a49208e8..adf41753c8 100644 --- a/autogpt_platform/backend/backend/blocks/video/narration.py +++ b/autogpt_platform/backend/backend/blocks/video/narration.py @@ -44,8 +44,7 @@ class VideoNarrationBlock(Block): ) script: str = SchemaField(description="Narration script text") voice_id: str = SchemaField( - description="ElevenLabs voice ID", - default="21m00Tcm4TlvDq8ikWAM", # Rachel + description="ElevenLabs voice ID", default="21m00Tcm4TlvDq8ikWAM" # Rachel ) model_id: Literal[ "eleven_multilingual_v2", diff --git a/autogpt_platform/backend/backend/blocks/youtube.py b/autogpt_platform/backend/backend/blocks/youtube.py index 076ee1c6e1..6d81a86b4c 100644 --- a/autogpt_platform/backend/backend/blocks/youtube.py +++ b/autogpt_platform/backend/backend/blocks/youtube.py @@ -94,9 +94,7 @@ class TranscribeYoutubeVideoBlock(Block): {"text": "Never gonna give you up"}, {"text": "Never gonna let you down"}, ], - "format_transcript": lambda transcript: ( - "Never gonna give you up\nNever gonna let you down" - ), + "format_transcript": lambda transcript: "Never gonna give you up\nNever gonna let you down", }, ) diff --git a/autogpt_platform/backend/backend/blocks/zerobounce/validate_emails.py b/autogpt_platform/backend/backend/blocks/zerobounce/validate_emails.py index 8fc1f29a3d..fa5283f324 100644 --- a/autogpt_platform/backend/backend/blocks/zerobounce/validate_emails.py +++ b/autogpt_platform/backend/backend/blocks/zerobounce/validate_emails.py @@ -140,22 +140,20 @@ class ValidateEmailsBlock(Block): ) ], test_mock={ - "validate_email": lambda email, ip_address, credentials: ( - ZBValidateResponse( - data={ - "address": email, - "status": ZBValidateStatus.valid, - "sub_status": ZBValidateSubStatus.allowed, - "account": "test", - "domain": "test.com", - "did_you_mean": None, - "domain_age_days": None, - "free_email": False, - "mx_found": False, - "mx_record": None, - "smtp_provider": None, - } - ) + "validate_email": lambda email, ip_address, credentials: ZBValidateResponse( + data={ + "address": email, + "status": ZBValidateStatus.valid, + "sub_status": ZBValidateSubStatus.allowed, + "account": "test", + "domain": "test.com", + "did_you_mean": None, + "domain_age_days": None, + "free_email": False, + "mx_found": False, + "mx_record": None, + "smtp_provider": None, + } ) }, ) diff --git a/autogpt_platform/backend/backend/check_db.py b/autogpt_platform/backend/backend/check_db.py index 65f82626fd..7e1c3ee14f 100644 --- a/autogpt_platform/backend/backend/check_db.py +++ b/autogpt_platform/backend/backend/check_db.py @@ -172,7 +172,7 @@ async def add_test_data(db): "storeListingId": listing.id, "agentGraphId": graph.id, "agentGraphVersion": graph.version, - "name": f"Test Agent {i + 1}", + "name": f"Test Agent {i+1}", "subHeading": faker.catch_phrase(), "description": faker.paragraph(nb_sentences=5), "imageUrls": [faker.image_url()], @@ -245,7 +245,9 @@ async def compare_counts(before, after): print("🔍 Agent run changes:") before_runs = before["agent_runs"].get("total_runs") or 0 after_runs = after["agent_runs"].get("total_runs") or 0 - print(f" Total runs: {before_runs} → {after_runs} (+{after_runs - before_runs})") + print( + f" Total runs: {before_runs} → {after_runs} " f"(+{after_runs - before_runs})" + ) # Compare reviews print("\n🔍 Review changes:") diff --git a/autogpt_platform/backend/backend/cli/oauth_tool.py b/autogpt_platform/backend/backend/cli/oauth_tool.py index 9781f78673..57982d359b 100755 --- a/autogpt_platform/backend/backend/cli/oauth_tool.py +++ b/autogpt_platform/backend/backend/cli/oauth_tool.py @@ -147,7 +147,7 @@ def format_sql_insert(creds: dict) -> str: sql = f""" -- ============================================================ --- OAuth Application: {creds["name"]} +-- OAuth Application: {creds['name']} -- Generated: {now_iso} UTC -- ============================================================ @@ -167,14 +167,14 @@ INSERT INTO "OAuthApplication" ( "isActive" ) VALUES ( - '{creds["id"]}', + '{creds['id']}', NOW(), NOW(), - '{creds["name"]}', - {f"'{creds['description']}'" if creds["description"] else "NULL"}, - '{creds["client_id"]}', - '{creds["client_secret_hash"]}', - '{creds["client_secret_salt"]}', + '{creds['name']}', + {f"'{creds['description']}'" if creds['description'] else 'NULL'}, + '{creds['client_id']}', + '{creds['client_secret_hash']}', + '{creds['client_secret_salt']}', ARRAY{redirect_uris_pg}::TEXT[], ARRAY{grant_types_pg}::TEXT[], ARRAY{scopes_pg}::"APIKeyPermission"[], @@ -186,8 +186,8 @@ VALUES ( -- ⚠️ IMPORTANT: Save these credentials securely! -- ============================================================ -- --- Client ID: {creds["client_id"]} --- Client Secret: {creds["client_secret_plaintext"]} +-- Client ID: {creds['client_id']} +-- Client Secret: {creds['client_secret_plaintext']} -- -- ⚠️ The client secret is shown ONLY ONCE! -- ⚠️ Store it securely and share only with the application developer. @@ -200,7 +200,7 @@ VALUES ( -- To verify the application was created: -- SELECT "clientId", name, scopes, "redirectUris", "isActive" -- FROM "OAuthApplication" --- WHERE "clientId" = '{creds["client_id"]}'; +-- WHERE "clientId" = '{creds['client_id']}'; """ return sql diff --git a/autogpt_platform/backend/backend/data/credit.py b/autogpt_platform/backend/backend/data/credit.py index 9257fd66fa..f3c5365446 100644 --- a/autogpt_platform/backend/backend/data/credit.py +++ b/autogpt_platform/backend/backend/data/credit.py @@ -431,7 +431,7 @@ class UserCreditBase(ABC): current_balance, _ = await self._get_credits(user_id) if current_balance >= ceiling_balance: raise ValueError( - f"You already have enough balance of ${current_balance / 100}, top-up is not required when you already have at least ${ceiling_balance / 100}" + f"You already have enough balance of ${current_balance/100}, top-up is not required when you already have at least ${ceiling_balance/100}" ) # Single unified atomic operation for all transaction types using UserBalance @@ -570,7 +570,7 @@ class UserCreditBase(ABC): if amount < 0 and fail_insufficient_credits: current_balance, _ = await self._get_credits(user_id) raise InsufficientBalanceError( - message=f"Insufficient balance of ${current_balance / 100}, where this will cost ${abs(amount) / 100}", + message=f"Insufficient balance of ${current_balance/100}, where this will cost ${abs(amount)/100}", user_id=user_id, balance=current_balance, amount=amount, @@ -581,6 +581,7 @@ class UserCreditBase(ABC): class UserCredit(UserCreditBase): + async def _send_refund_notification( self, notification_request: RefundRequestData, @@ -732,7 +733,7 @@ class UserCredit(UserCreditBase): ) if request.amount <= 0 or request.amount > transaction.amount: raise AssertionError( - f"Invalid amount to deduct ${request.amount / 100} from ${transaction.amount / 100} top-up" + f"Invalid amount to deduct ${request.amount/100} from ${transaction.amount/100} top-up" ) balance, _ = await self._add_transaction( @@ -786,12 +787,12 @@ class UserCredit(UserCreditBase): # If the user has enough balance, just let them win the dispute. if balance - amount >= settings.config.refund_credit_tolerance_threshold: - logger.warning(f"Accepting dispute from {user_id} for ${amount / 100}") + logger.warning(f"Accepting dispute from {user_id} for ${amount/100}") dispute.close() return logger.warning( - f"Adding extra info for dispute from {user_id} for ${amount / 100}" + f"Adding extra info for dispute from {user_id} for ${amount/100}" ) # Retrieve recent transaction history to support our evidence. # This provides a concise timeline that shows service usage and proper credit application. diff --git a/autogpt_platform/backend/backend/data/credit_ceiling_test.py b/autogpt_platform/backend/backend/data/credit_ceiling_test.py index 42375214cd..de048a3b72 100644 --- a/autogpt_platform/backend/backend/data/credit_ceiling_test.py +++ b/autogpt_platform/backend/backend/data/credit_ceiling_test.py @@ -107,15 +107,15 @@ async def test_ceiling_balance_clamps_when_would_exceed(server: SpinTestServer): ) # Balance should be clamped to ceiling - assert final_balance == 1000, ( - f"Balance should be clamped to 1000, got {final_balance}" - ) + assert ( + final_balance == 1000 + ), f"Balance should be clamped to 1000, got {final_balance}" # Verify with get_credits too stored_balance = await credit_system.get_credits(user_id) - assert stored_balance == 1000, ( - f"Stored balance should be 1000, got {stored_balance}" - ) + assert ( + stored_balance == 1000 + ), f"Stored balance should be 1000, got {stored_balance}" # Verify transaction shows the clamped amount transactions = await CreditTransaction.prisma().find_many( @@ -164,9 +164,9 @@ async def test_ceiling_balance_allows_when_under_threshold(server: SpinTestServe # Verify with get_credits too stored_balance = await credit_system.get_credits(user_id) - assert stored_balance == 500, ( - f"Stored balance should be 500, got {stored_balance}" - ) + assert ( + stored_balance == 500 + ), f"Stored balance should be 500, got {stored_balance}" finally: await cleanup_test_user(user_id) diff --git a/autogpt_platform/backend/backend/data/credit_concurrency_test.py b/autogpt_platform/backend/backend/data/credit_concurrency_test.py index 201f0d9c09..90f9487e21 100644 --- a/autogpt_platform/backend/backend/data/credit_concurrency_test.py +++ b/autogpt_platform/backend/backend/data/credit_concurrency_test.py @@ -108,9 +108,9 @@ async def test_concurrent_spends_same_user(server: SpinTestServer): transactions = await CreditTransaction.prisma().find_many( where={"userId": user_id, "type": prisma.enums.CreditTransactionType.USAGE} ) - assert len(transactions) == 10, ( - f"Expected 10 transactions, got {len(transactions)}" - ) + assert ( + len(transactions) == 10 + ), f"Expected 10 transactions, got {len(transactions)}" finally: await cleanup_test_user(user_id) @@ -321,9 +321,9 @@ async def test_onboarding_reward_idempotency(server: SpinTestServer): "transactionKey": f"REWARD-{user_id}-WELCOME", } ) - assert len(transactions) == 1, ( - f"Expected 1 reward transaction, got {len(transactions)}" - ) + assert ( + len(transactions) == 1 + ), f"Expected 1 reward transaction, got {len(transactions)}" finally: await cleanup_test_user(user_id) @@ -358,9 +358,9 @@ async def test_integer_overflow_protection(server: SpinTestServer): # Balance should be clamped to max_int, not overflowed final_balance = await credit_system.get_credits(user_id) - assert final_balance == max_int, ( - f"Balance should be clamped to {max_int}, got {final_balance}" - ) + assert ( + final_balance == max_int + ), f"Balance should be clamped to {max_int}, got {final_balance}" # Verify transaction was created with clamped amount transactions = await CreditTransaction.prisma().find_many( @@ -371,9 +371,9 @@ async def test_integer_overflow_protection(server: SpinTestServer): order={"createdAt": "desc"}, ) assert len(transactions) > 0, "Transaction should be created" - assert transactions[0].runningBalance == max_int, ( - "Transaction should show clamped balance" - ) + assert ( + transactions[0].runningBalance == max_int + ), "Transaction should show clamped balance" finally: await cleanup_test_user(user_id) @@ -432,9 +432,9 @@ async def test_high_concurrency_stress(server: SpinTestServer): # Verify final balance final_balance = await credit_system.get_credits(user_id) - assert final_balance == expected_balance, ( - f"Expected {expected_balance}, got {final_balance}" - ) + assert ( + final_balance == expected_balance + ), f"Expected {expected_balance}, got {final_balance}" assert final_balance >= 0, "Balance went negative!" finally: @@ -507,7 +507,7 @@ async def test_concurrent_multiple_spends_sufficient_balance(server: SpinTestSer sorted_timings = sorted(timings.items(), key=lambda x: x[1]["start"]) print("\nExecution order by start time:") for i, (label, timing) in enumerate(sorted_timings): - print(f" {i + 1}. {label}: {timing['start']:.4f} -> {timing['end']:.4f}") + print(f" {i+1}. {label}: {timing['start']:.4f} -> {timing['end']:.4f}") # Check for overlap (true concurrency) vs serialization overlaps = [] @@ -533,9 +533,9 @@ async def test_concurrent_multiple_spends_sufficient_balance(server: SpinTestSer print(f"Successful: {len(successful)}, Failed: {len(failed)}") # All should succeed since 150 - (10 + 20 + 30) = 90 > 0 - assert len(successful) == 3, ( - f"Expected all 3 to succeed, got {len(successful)} successes: {results}" - ) + assert ( + len(successful) == 3 + ), f"Expected all 3 to succeed, got {len(successful)} successes: {results}" assert final_balance == 90, f"Expected balance 90, got {final_balance}" # Check transaction timestamps to confirm database-level serialization @@ -546,7 +546,7 @@ async def test_concurrent_multiple_spends_sufficient_balance(server: SpinTestSer print("\nDatabase transaction order (by createdAt):") for i, tx in enumerate(transactions): print( - f" {i + 1}. Amount {tx.amount}, Running balance: {tx.runningBalance}, Created: {tx.createdAt}" + f" {i+1}. Amount {tx.amount}, Running balance: {tx.runningBalance}, Created: {tx.createdAt}" ) # Verify running balances are chronologically consistent (ordered by createdAt) @@ -575,38 +575,38 @@ async def test_concurrent_multiple_spends_sufficient_balance(server: SpinTestSer # Verify all balances are valid intermediate states for balance in actual_balances: - assert balance in expected_possible_balances, ( - f"Invalid balance {balance}, expected one of {expected_possible_balances}" - ) + assert ( + balance in expected_possible_balances + ), f"Invalid balance {balance}, expected one of {expected_possible_balances}" # Final balance should always be 90 (150 - 60) - assert min(actual_balances) == 90, ( - f"Final balance should be 90, got {min(actual_balances)}" - ) + assert ( + min(actual_balances) == 90 + ), f"Final balance should be 90, got {min(actual_balances)}" # The final transaction should always have balance 90 # The other transactions should have valid intermediate balances - assert 90 in actual_balances, ( - f"Final balance 90 should be in actual_balances: {actual_balances}" - ) + assert ( + 90 in actual_balances + ), f"Final balance 90 should be in actual_balances: {actual_balances}" # All balances should be >= 90 (the final state) - assert all(balance >= 90 for balance in actual_balances), ( - f"All balances should be >= 90, got {actual_balances}" - ) + assert all( + balance >= 90 for balance in actual_balances + ), f"All balances should be >= 90, got {actual_balances}" # CRITICAL: Transactions are atomic but can complete in any order # What matters is that all running balances are valid intermediate states # Each balance should be between 90 (final) and 140 (after first transaction) for balance in actual_balances: - assert 90 <= balance <= 140, ( - f"Balance {balance} is outside valid range [90, 140]" - ) + assert ( + 90 <= balance <= 140 + ), f"Balance {balance} is outside valid range [90, 140]" # Final balance (minimum) should always be 90 - assert min(actual_balances) == 90, ( - f"Final balance should be 90, got {min(actual_balances)}" - ) + assert ( + min(actual_balances) == 90 + ), f"Final balance should be 90, got {min(actual_balances)}" finally: await cleanup_test_user(user_id) @@ -707,7 +707,7 @@ async def test_prove_database_locking_behavior(server: SpinTestServer): for i, result in enumerate(sorted_results): print( - f" {i + 1}. {result['label']}: DB operation took {result['db_duration']:.4f}s" + f" {i+1}. {result['label']}: DB operation took {result['db_duration']:.4f}s" ) # Check if any operations overlapped at the database level @@ -722,9 +722,9 @@ async def test_prove_database_locking_behavior(server: SpinTestServer): print(f"\n💰 Final balance: {final_balance}") if len(successful) == 3: - assert final_balance == 0, ( - f"If all succeeded, balance should be 0, got {final_balance}" - ) + assert ( + final_balance == 0 + ), f"If all succeeded, balance should be 0, got {final_balance}" print( "✅ CONCLUSION: Database row locking causes requests to WAIT and execute serially" ) diff --git a/autogpt_platform/backend/backend/data/credit_refund_test.py b/autogpt_platform/backend/backend/data/credit_refund_test.py index c69af8e790..c2f8b29714 100644 --- a/autogpt_platform/backend/backend/data/credit_refund_test.py +++ b/autogpt_platform/backend/backend/data/credit_refund_test.py @@ -109,9 +109,9 @@ async def test_deduct_credits_atomic(server: SpinTestServer): where={"userId": REFUND_TEST_USER_ID} ) assert user_balance is not None - assert user_balance.balance == 500, ( - f"Expected balance 500, got {user_balance.balance}" - ) + assert ( + user_balance.balance == 500 + ), f"Expected balance 500, got {user_balance.balance}" # Verify refund transaction was created refund_tx = await CreditTransaction.prisma().find_first( @@ -205,9 +205,9 @@ async def test_handle_dispute_with_sufficient_balance( where={"userId": REFUND_TEST_USER_ID} ) assert user_balance is not None - assert user_balance.balance == 1000, ( - f"Balance should remain 1000, got {user_balance.balance}" - ) + assert ( + user_balance.balance == 1000 + ), f"Balance should remain 1000, got {user_balance.balance}" finally: await cleanup_test_user() @@ -332,9 +332,9 @@ async def test_concurrent_refunds(server: SpinTestServer): print(f"DEBUG: Final balance = {user_balance.balance}, expected = 500") # With atomic implementation, all 5 refunds should process correctly - assert user_balance.balance == 500, ( - f"Expected balance 500 after 5 refunds of 100 each, got {user_balance.balance}" - ) + assert ( + user_balance.balance == 500 + ), f"Expected balance 500 after 5 refunds of 100 each, got {user_balance.balance}" # Verify all refund transactions exist refund_txs = await CreditTransaction.prisma().find_many( @@ -343,9 +343,9 @@ async def test_concurrent_refunds(server: SpinTestServer): "type": CreditTransactionType.REFUND, } ) - assert len(refund_txs) == 5, ( - f"Expected 5 refund transactions, got {len(refund_txs)}" - ) + assert ( + len(refund_txs) == 5 + ), f"Expected 5 refund transactions, got {len(refund_txs)}" running_balances: set[int] = { tx.runningBalance for tx in refund_txs if tx.runningBalance is not None @@ -353,20 +353,20 @@ async def test_concurrent_refunds(server: SpinTestServer): # Verify all balances are valid intermediate states for balance in running_balances: - assert 500 <= balance <= 1000, ( - f"Invalid balance {balance}, should be between 500 and 1000" - ) + assert ( + 500 <= balance <= 1000 + ), f"Invalid balance {balance}, should be between 500 and 1000" # Final balance should be present - assert 500 in running_balances, ( - f"Final balance 500 should be in {running_balances}" - ) + assert ( + 500 in running_balances + ), f"Final balance 500 should be in {running_balances}" # All balances should be unique and form a valid sequence sorted_balances = sorted(running_balances, reverse=True) - assert len(sorted_balances) == 5, ( - f"Expected 5 unique balances, got {len(sorted_balances)}" - ) + assert ( + len(sorted_balances) == 5 + ), f"Expected 5 unique balances, got {len(sorted_balances)}" finally: await cleanup_test_user() diff --git a/autogpt_platform/backend/backend/data/credit_underflow_test.py b/autogpt_platform/backend/backend/data/credit_underflow_test.py index 27382b1a59..25e9f0341d 100644 --- a/autogpt_platform/backend/backend/data/credit_underflow_test.py +++ b/autogpt_platform/backend/backend/data/credit_underflow_test.py @@ -82,7 +82,9 @@ async def test_debug_underflow_step_by_step(server: SpinTestServer): # Test 2: Apply amount that should cause underflow print("\n=== Test 2: Testing underflow protection ===") - test_amount = -200 # This should cause underflow: (POSTGRES_INT_MIN + 100) + (-200) = POSTGRES_INT_MIN - 100 + test_amount = ( + -200 + ) # This should cause underflow: (POSTGRES_INT_MIN + 100) + (-200) = POSTGRES_INT_MIN - 100 expected_without_protection = current_balance + test_amount print(f"Current balance: {current_balance}") print(f"Test amount: {test_amount}") @@ -99,9 +101,9 @@ async def test_debug_underflow_step_by_step(server: SpinTestServer): print(f"Actual result: {balance_result}") # Check if underflow protection worked - assert balance_result == POSTGRES_INT_MIN, ( - f"Expected underflow protection to clamp balance to {POSTGRES_INT_MIN}, got {balance_result}" - ) + assert ( + balance_result == POSTGRES_INT_MIN + ), f"Expected underflow protection to clamp balance to {POSTGRES_INT_MIN}, got {balance_result}" # Test 3: Edge case - exactly at POSTGRES_INT_MIN print("\n=== Test 3: Testing exact POSTGRES_INT_MIN boundary ===") @@ -126,9 +128,9 @@ async def test_debug_underflow_step_by_step(server: SpinTestServer): ) print(f"After subtracting 1: {edge_result}") - assert edge_result == POSTGRES_INT_MIN, ( - f"Expected balance to remain clamped at {POSTGRES_INT_MIN}, got {edge_result}" - ) + assert ( + edge_result == POSTGRES_INT_MIN + ), f"Expected balance to remain clamped at {POSTGRES_INT_MIN}, got {edge_result}" finally: await cleanup_test_user(user_id) @@ -174,18 +176,18 @@ async def test_underflow_protection_large_refunds(server: SpinTestServer): ) # Balance should be clamped to POSTGRES_INT_MIN, not the calculated underflow value - assert final_balance == POSTGRES_INT_MIN, ( - f"Balance should be clamped to {POSTGRES_INT_MIN}, got {final_balance}" - ) - assert final_balance > expected_without_protection, ( - f"Balance should be greater than underflow result {expected_without_protection}, got {final_balance}" - ) + assert ( + final_balance == POSTGRES_INT_MIN + ), f"Balance should be clamped to {POSTGRES_INT_MIN}, got {final_balance}" + assert ( + final_balance > expected_without_protection + ), f"Balance should be greater than underflow result {expected_without_protection}, got {final_balance}" # Verify with get_credits too stored_balance = await credit_system.get_credits(user_id) - assert stored_balance == POSTGRES_INT_MIN, ( - f"Stored balance should be {POSTGRES_INT_MIN}, got {stored_balance}" - ) + assert ( + stored_balance == POSTGRES_INT_MIN + ), f"Stored balance should be {POSTGRES_INT_MIN}, got {stored_balance}" # Verify transaction was created with the underflow-protected balance transactions = await CreditTransaction.prisma().find_many( @@ -193,9 +195,9 @@ async def test_underflow_protection_large_refunds(server: SpinTestServer): order={"createdAt": "desc"}, ) assert len(transactions) > 0, "Refund transaction should be created" - assert transactions[0].runningBalance == POSTGRES_INT_MIN, ( - f"Transaction should show clamped balance {POSTGRES_INT_MIN}, got {transactions[0].runningBalance}" - ) + assert ( + transactions[0].runningBalance == POSTGRES_INT_MIN + ), f"Transaction should show clamped balance {POSTGRES_INT_MIN}, got {transactions[0].runningBalance}" finally: await cleanup_test_user(user_id) @@ -236,12 +238,12 @@ async def test_multiple_large_refunds_cumulative_underflow(server: SpinTestServe expected_balance_1 = ( initial_balance + refund_amount ) # Should be POSTGRES_INT_MIN + 200 - assert balance_1 == expected_balance_1, ( - f"First refund should result in {expected_balance_1}, got {balance_1}" - ) - assert balance_1 >= POSTGRES_INT_MIN, ( - f"First refund should not go below {POSTGRES_INT_MIN}, got {balance_1}" - ) + assert ( + balance_1 == expected_balance_1 + ), f"First refund should result in {expected_balance_1}, got {balance_1}" + assert ( + balance_1 >= POSTGRES_INT_MIN + ), f"First refund should not go below {POSTGRES_INT_MIN}, got {balance_1}" # Second refund: (POSTGRES_INT_MIN + 200) + (-300) = POSTGRES_INT_MIN - 100 (would underflow) balance_2, _ = await credit_system._add_transaction( @@ -252,9 +254,9 @@ async def test_multiple_large_refunds_cumulative_underflow(server: SpinTestServe ) # Should be clamped to minimum due to underflow protection - assert balance_2 == POSTGRES_INT_MIN, ( - f"Second refund should be clamped to {POSTGRES_INT_MIN}, got {balance_2}" - ) + assert ( + balance_2 == POSTGRES_INT_MIN + ), f"Second refund should be clamped to {POSTGRES_INT_MIN}, got {balance_2}" # Third refund: Should stay at minimum balance_3, _ = await credit_system._add_transaction( @@ -265,15 +267,15 @@ async def test_multiple_large_refunds_cumulative_underflow(server: SpinTestServe ) # Should still be at minimum - assert balance_3 == POSTGRES_INT_MIN, ( - f"Third refund should stay at {POSTGRES_INT_MIN}, got {balance_3}" - ) + assert ( + balance_3 == POSTGRES_INT_MIN + ), f"Third refund should stay at {POSTGRES_INT_MIN}, got {balance_3}" # Final balance check final_balance = await credit_system.get_credits(user_id) - assert final_balance == POSTGRES_INT_MIN, ( - f"Final balance should be {POSTGRES_INT_MIN}, got {final_balance}" - ) + assert ( + final_balance == POSTGRES_INT_MIN + ), f"Final balance should be {POSTGRES_INT_MIN}, got {final_balance}" finally: await cleanup_test_user(user_id) @@ -325,35 +327,35 @@ async def test_concurrent_large_refunds_no_underflow(server: SpinTestServer): for i, result in enumerate(results): if isinstance(result, tuple): balance, _ = result - assert balance >= POSTGRES_INT_MIN, ( - f"Result {i} balance {balance} underflowed below {POSTGRES_INT_MIN}" - ) + assert ( + balance >= POSTGRES_INT_MIN + ), f"Result {i} balance {balance} underflowed below {POSTGRES_INT_MIN}" valid_results.append(balance) elif isinstance(result, str) and "FAILED" in result: # Some operations might fail due to validation, that's okay pass else: # Unexpected exception - assert not isinstance(result, Exception), ( - f"Unexpected exception in result {i}: {result}" - ) + assert not isinstance( + result, Exception + ), f"Unexpected exception in result {i}: {result}" # At least one operation should succeed - assert len(valid_results) > 0, ( - f"At least one refund should succeed, got results: {results}" - ) + assert ( + len(valid_results) > 0 + ), f"At least one refund should succeed, got results: {results}" # All successful results should be >= POSTGRES_INT_MIN for balance in valid_results: - assert balance >= POSTGRES_INT_MIN, ( - f"Balance {balance} should not be below {POSTGRES_INT_MIN}" - ) + assert ( + balance >= POSTGRES_INT_MIN + ), f"Balance {balance} should not be below {POSTGRES_INT_MIN}" # Final balance should be valid and at or above POSTGRES_INT_MIN final_balance = await credit_system.get_credits(user_id) - assert final_balance >= POSTGRES_INT_MIN, ( - f"Final balance {final_balance} should not underflow below {POSTGRES_INT_MIN}" - ) + assert ( + final_balance >= POSTGRES_INT_MIN + ), f"Final balance {final_balance} should not underflow below {POSTGRES_INT_MIN}" finally: await cleanup_test_user(user_id) diff --git a/autogpt_platform/backend/backend/data/credit_user_balance_migration_test.py b/autogpt_platform/backend/backend/data/credit_user_balance_migration_test.py index f0c196a938..2316b4d7c4 100644 --- a/autogpt_platform/backend/backend/data/credit_user_balance_migration_test.py +++ b/autogpt_platform/backend/backend/data/credit_user_balance_migration_test.py @@ -60,9 +60,9 @@ async def test_user_balance_migration_complete(server: SpinTestServer): # User.balance should not exist or should be None/0 if it exists user_balance_attr = getattr(user, "balance", None) if user_balance_attr is not None: - assert user_balance_attr == 0 or user_balance_attr is None, ( - f"User.balance should be 0 or None, got {user_balance_attr}" - ) + assert ( + user_balance_attr == 0 or user_balance_attr is None + ), f"User.balance should be 0 or None, got {user_balance_attr}" # 2. Perform various credit operations using internal method (bypasses Stripe) await credit_system._add_transaction( @@ -87,9 +87,9 @@ async def test_user_balance_migration_complete(server: SpinTestServer): # 3. Verify UserBalance table has correct values user_balance = await UserBalance.prisma().find_unique(where={"userId": user_id}) assert user_balance is not None - assert user_balance.balance == 700, ( - f"UserBalance should be 700, got {user_balance.balance}" - ) + assert ( + user_balance.balance == 700 + ), f"UserBalance should be 700, got {user_balance.balance}" # 4. CRITICAL: Verify User.balance is NEVER updated during operations user_after = await User.prisma().find_unique(where={"id": user_id}) @@ -97,15 +97,15 @@ async def test_user_balance_migration_complete(server: SpinTestServer): user_balance_after = getattr(user_after, "balance", None) if user_balance_after is not None: # If User.balance exists, it should still be 0 (never updated) - assert user_balance_after == 0 or user_balance_after is None, ( - f"User.balance should remain 0/None after operations, got {user_balance_after}. This indicates User.balance is still being used!" - ) + assert ( + user_balance_after == 0 or user_balance_after is None + ), f"User.balance should remain 0/None after operations, got {user_balance_after}. This indicates User.balance is still being used!" # 5. Verify get_credits always returns UserBalance value, not User.balance final_balance = await credit_system.get_credits(user_id) - assert final_balance == user_balance.balance, ( - f"get_credits should return UserBalance value {user_balance.balance}, got {final_balance}" - ) + assert ( + final_balance == user_balance.balance + ), f"get_credits should return UserBalance value {user_balance.balance}, got {final_balance}" finally: await cleanup_test_user(user_id) @@ -126,9 +126,9 @@ async def test_detect_stale_user_balance_queries(server: SpinTestServer): # Verify that get_credits returns UserBalance value (5000), not any stale User.balance value balance = await credit_system.get_credits(user_id) - assert balance == 5000, ( - f"Expected get_credits to return 5000 from UserBalance, got {balance}" - ) + assert ( + balance == 5000 + ), f"Expected get_credits to return 5000 from UserBalance, got {balance}" # Verify all operations use UserBalance using internal method (bypasses Stripe) await credit_system._add_transaction( @@ -143,9 +143,9 @@ async def test_detect_stale_user_balance_queries(server: SpinTestServer): # Verify UserBalance table has the correct value user_balance = await UserBalance.prisma().find_unique(where={"userId": user_id}) assert user_balance is not None - assert user_balance.balance == 6000, ( - f"UserBalance should be 6000, got {user_balance.balance}" - ) + assert ( + user_balance.balance == 6000 + ), f"UserBalance should be 6000, got {user_balance.balance}" finally: await cleanup_test_user(user_id) @@ -196,9 +196,9 @@ async def test_concurrent_operations_use_userbalance_only(server: SpinTestServer # Verify UserBalance has correct value user_balance = await UserBalance.prisma().find_unique(where={"userId": user_id}) assert user_balance is not None - assert user_balance.balance == 400, ( - f"UserBalance should be 400, got {user_balance.balance}" - ) + assert ( + user_balance.balance == 400 + ), f"UserBalance should be 400, got {user_balance.balance}" # Critical: If User.balance exists and was used, it might have wrong value try: diff --git a/autogpt_platform/backend/backend/data/graph.py b/autogpt_platform/backend/backend/data/graph.py index 527f48236f..2433a5d270 100644 --- a/autogpt_platform/backend/backend/data/graph.py +++ b/autogpt_platform/backend/backend/data/graph.py @@ -569,6 +569,7 @@ class GraphModel(Graph, GraphMeta): field_name, field_info, ) in node.block.input_schema.get_credentials_fields_info().items(): + discriminator = field_info.discriminator if not discriminator: node_credential_data.append((field_info, (node.id, field_name))) @@ -835,9 +836,9 @@ class GraphModel(Graph, GraphMeta): # Check for missing dependencies when dependent field is present missing_deps = [dep for dep in dependencies if not has_value(node, dep)] if missing_deps and (field_has_value or field_is_required): - node_errors[node.id][field_name] = ( - f"Requires {', '.join(missing_deps)} to be set" - ) + node_errors[node.id][ + field_name + ] = f"Requires {', '.join(missing_deps)} to be set" return node_errors diff --git a/autogpt_platform/backend/backend/data/model.py b/autogpt_platform/backend/backend/data/model.py index f43de44706..7bdfef059b 100644 --- a/autogpt_platform/backend/backend/data/model.py +++ b/autogpt_platform/backend/backend/data/model.py @@ -468,6 +468,7 @@ class UserMetadataRaw(TypedDict, total=False): class UserIntegrations(BaseModel): + class ManagedCredentials(BaseModel): """Integration credentials managed by us, rather than by the user""" diff --git a/autogpt_platform/backend/backend/data/workspace.py b/autogpt_platform/backend/backend/data/workspace.py index e49d5c5223..f3dba0a294 100644 --- a/autogpt_platform/backend/backend/data/workspace.py +++ b/autogpt_platform/backend/backend/data/workspace.py @@ -100,7 +100,8 @@ async def create_workspace_file( ) logger.info( - f"Created workspace file {file.id} at path {path} in workspace {workspace_id}" + f"Created workspace file {file.id} at path {path} " + f"in workspace {workspace_id}" ) return file diff --git a/autogpt_platform/backend/backend/executor/activity_status_generator_test.py b/autogpt_platform/backend/backend/executor/activity_status_generator_test.py index 50adbae660..c3ce0b6bf0 100644 --- a/autogpt_platform/backend/backend/executor/activity_status_generator_test.py +++ b/autogpt_platform/backend/backend/executor/activity_status_generator_test.py @@ -224,9 +224,7 @@ class TestBuildExecutionSummary: # Check that errors are now in node's recent_errors field # Find the output node (with truncated UUID) output_node = next( - n - for n in summary["nodes"] - if n["node_id"] == "678e9012" # Truncated + n for n in summary["nodes"] if n["node_id"] == "678e9012" # Truncated ) assert output_node["error_count"] == 1 assert output_node["execution_count"] == 1 @@ -354,9 +352,7 @@ class TestBuildExecutionSummary: # String error format - find node with truncated ID string_error_node = next( - n - for n in summary["nodes"] - if n["node_id"] == "333e4444" # Truncated + n for n in summary["nodes"] if n["node_id"] == "333e4444" # Truncated ) assert len(string_error_node["recent_errors"]) == 1 assert ( @@ -366,9 +362,7 @@ class TestBuildExecutionSummary: # No error output format - find node with truncated ID no_error_node = next( - n - for n in summary["nodes"] - if n["node_id"] == "777e8888" # Truncated + n for n in summary["nodes"] if n["node_id"] == "777e8888" # Truncated ) assert len(no_error_node["recent_errors"]) == 1 assert no_error_node["recent_errors"][0]["error"] == "Unknown error" @@ -385,9 +379,8 @@ class TestLLMCall: from backend.blocks.llm import AIStructuredResponseGeneratorBlock from backend.data.model import APIKeyCredentials - with ( - patch("backend.blocks.llm.llm_call") as mock_llm_call, - patch("backend.blocks.llm.secrets.token_hex", return_value="test123"), + with patch("backend.blocks.llm.llm_call") as mock_llm_call, patch( + "backend.blocks.llm.secrets.token_hex", return_value="test123" ): mock_llm_call.return_value = LLMResponse( raw_response={}, @@ -449,9 +442,8 @@ class TestLLMCall: from backend.blocks.llm import AIStructuredResponseGeneratorBlock from backend.data.model import APIKeyCredentials - with ( - patch("backend.blocks.llm.llm_call") as mock_llm_call, - patch("backend.blocks.llm.secrets.token_hex", return_value="test123"), + with patch("backend.blocks.llm.llm_call") as mock_llm_call, patch( + "backend.blocks.llm.secrets.token_hex", return_value="test123" ): # Return invalid JSON that will fail validation (missing required field) mock_llm_call.return_value = LLMResponse( @@ -523,21 +515,17 @@ class TestGenerateActivityStatusForExecution: mock_graph.links = [] mock_db_client.get_graph.return_value = mock_graph - with ( - patch( - "backend.executor.activity_status_generator.get_block" - ) as mock_get_block, - patch( - "backend.executor.activity_status_generator.Settings" - ) as mock_settings, - patch( - "backend.executor.activity_status_generator.AIStructuredResponseGeneratorBlock" - ) as mock_structured_block, - patch( - "backend.executor.activity_status_generator.is_feature_enabled", - return_value=True, - ), + with patch( + "backend.executor.activity_status_generator.get_block" + ) as mock_get_block, patch( + "backend.executor.activity_status_generator.Settings" + ) as mock_settings, patch( + "backend.executor.activity_status_generator.AIStructuredResponseGeneratorBlock" + ) as mock_structured_block, patch( + "backend.executor.activity_status_generator.is_feature_enabled", + return_value=True, ): + mock_get_block.side_effect = lambda block_id: mock_blocks.get(block_id) mock_settings.return_value.secrets.openai_internal_api_key = "test_key" @@ -545,13 +533,10 @@ class TestGenerateActivityStatusForExecution: mock_instance = mock_structured_block.return_value async def mock_run(*args, **kwargs): - yield ( - "response", - { - "activity_status": "I analyzed your data and provided the requested insights.", - "correctness_score": 0.85, - }, - ) + yield "response", { + "activity_status": "I analyzed your data and provided the requested insights.", + "correctness_score": 0.85, + } mock_instance.run = mock_run @@ -601,14 +586,11 @@ class TestGenerateActivityStatusForExecution: """Test activity status generation with no API key.""" mock_db_client = AsyncMock() - with ( - patch( - "backend.executor.activity_status_generator.Settings" - ) as mock_settings, - patch( - "backend.executor.activity_status_generator.is_feature_enabled", - return_value=True, - ), + with patch( + "backend.executor.activity_status_generator.Settings" + ) as mock_settings, patch( + "backend.executor.activity_status_generator.is_feature_enabled", + return_value=True, ): mock_settings.return_value.secrets.openai_internal_api_key = "" @@ -630,14 +612,11 @@ class TestGenerateActivityStatusForExecution: mock_db_client = AsyncMock() mock_db_client.get_node_executions.side_effect = Exception("Database error") - with ( - patch( - "backend.executor.activity_status_generator.Settings" - ) as mock_settings, - patch( - "backend.executor.activity_status_generator.is_feature_enabled", - return_value=True, - ), + with patch( + "backend.executor.activity_status_generator.Settings" + ) as mock_settings, patch( + "backend.executor.activity_status_generator.is_feature_enabled", + return_value=True, ): mock_settings.return_value.secrets.openai_internal_api_key = "test_key" @@ -662,21 +641,17 @@ class TestGenerateActivityStatusForExecution: mock_db_client.get_graph_metadata.return_value = None # No metadata mock_db_client.get_graph.return_value = None # No graph - with ( - patch( - "backend.executor.activity_status_generator.get_block" - ) as mock_get_block, - patch( - "backend.executor.activity_status_generator.Settings" - ) as mock_settings, - patch( - "backend.executor.activity_status_generator.AIStructuredResponseGeneratorBlock" - ) as mock_structured_block, - patch( - "backend.executor.activity_status_generator.is_feature_enabled", - return_value=True, - ), + with patch( + "backend.executor.activity_status_generator.get_block" + ) as mock_get_block, patch( + "backend.executor.activity_status_generator.Settings" + ) as mock_settings, patch( + "backend.executor.activity_status_generator.AIStructuredResponseGeneratorBlock" + ) as mock_structured_block, patch( + "backend.executor.activity_status_generator.is_feature_enabled", + return_value=True, ): + mock_get_block.side_effect = lambda block_id: mock_blocks.get(block_id) mock_settings.return_value.secrets.openai_internal_api_key = "test_key" @@ -684,13 +659,10 @@ class TestGenerateActivityStatusForExecution: mock_instance = mock_structured_block.return_value async def mock_run(*args, **kwargs): - yield ( - "response", - { - "activity_status": "Agent completed execution.", - "correctness_score": 0.8, - }, - ) + yield "response", { + "activity_status": "Agent completed execution.", + "correctness_score": 0.8, + } mock_instance.run = mock_run @@ -732,21 +704,17 @@ class TestIntegration: expected_activity = "I processed user input but failed during final output generation due to system error." - with ( - patch( - "backend.executor.activity_status_generator.get_block" - ) as mock_get_block, - patch( - "backend.executor.activity_status_generator.Settings" - ) as mock_settings, - patch( - "backend.executor.activity_status_generator.AIStructuredResponseGeneratorBlock" - ) as mock_structured_block, - patch( - "backend.executor.activity_status_generator.is_feature_enabled", - return_value=True, - ), + with patch( + "backend.executor.activity_status_generator.get_block" + ) as mock_get_block, patch( + "backend.executor.activity_status_generator.Settings" + ) as mock_settings, patch( + "backend.executor.activity_status_generator.AIStructuredResponseGeneratorBlock" + ) as mock_structured_block, patch( + "backend.executor.activity_status_generator.is_feature_enabled", + return_value=True, ): + mock_get_block.side_effect = lambda block_id: mock_blocks.get(block_id) mock_settings.return_value.secrets.openai_internal_api_key = "test_key" @@ -754,13 +722,10 @@ class TestIntegration: mock_instance = mock_structured_block.return_value async def mock_run(*args, **kwargs): - yield ( - "response", - { - "activity_status": expected_activity, - "correctness_score": 0.3, # Low score since there was a failure - }, - ) + yield "response", { + "activity_status": expected_activity, + "correctness_score": 0.3, # Low score since there was a failure + } mock_instance.run = mock_run diff --git a/autogpt_platform/backend/backend/executor/automod/manager.py b/autogpt_platform/backend/backend/executor/automod/manager.py index 392aa34e03..81001196dd 100644 --- a/autogpt_platform/backend/backend/executor/automod/manager.py +++ b/autogpt_platform/backend/backend/executor/automod/manager.py @@ -20,6 +20,7 @@ logger = logging.getLogger(__name__) class AutoModManager: + def __init__(self): self.config = self._load_config() diff --git a/autogpt_platform/backend/backend/executor/manager_insufficient_funds_test.py b/autogpt_platform/backend/backend/executor/manager_insufficient_funds_test.py index 86a7d78adf..276c9f4f7a 100644 --- a/autogpt_platform/backend/backend/executor/manager_insufficient_funds_test.py +++ b/autogpt_platform/backend/backend/executor/manager_insufficient_funds_test.py @@ -35,14 +35,16 @@ async def test_handle_insufficient_funds_sends_discord_alert_first_time( amount=-714, # Attempting to spend $7.14 ) - with ( - patch("backend.executor.manager.queue_notification") as mock_queue_notif, - patch( - "backend.executor.manager.get_notification_manager_client" - ) as mock_get_client, - patch("backend.executor.manager.settings") as mock_settings, - patch("backend.executor.manager.redis") as mock_redis_module, - ): + with patch( + "backend.executor.manager.queue_notification" + ) as mock_queue_notif, patch( + "backend.executor.manager.get_notification_manager_client" + ) as mock_get_client, patch( + "backend.executor.manager.settings" + ) as mock_settings, patch( + "backend.executor.manager.redis" + ) as mock_redis_module: + # Setup mocks mock_client = MagicMock() mock_get_client.return_value = mock_client @@ -107,14 +109,16 @@ async def test_handle_insufficient_funds_skips_duplicate_notifications( amount=-714, ) - with ( - patch("backend.executor.manager.queue_notification") as mock_queue_notif, - patch( - "backend.executor.manager.get_notification_manager_client" - ) as mock_get_client, - patch("backend.executor.manager.settings") as mock_settings, - patch("backend.executor.manager.redis") as mock_redis_module, - ): + with patch( + "backend.executor.manager.queue_notification" + ) as mock_queue_notif, patch( + "backend.executor.manager.get_notification_manager_client" + ) as mock_get_client, patch( + "backend.executor.manager.settings" + ) as mock_settings, patch( + "backend.executor.manager.redis" + ) as mock_redis_module: + # Setup mocks mock_client = MagicMock() mock_get_client.return_value = mock_client @@ -162,14 +166,14 @@ async def test_handle_insufficient_funds_different_agents_get_separate_alerts( amount=-714, ) - with ( - patch("backend.executor.manager.queue_notification"), - patch( - "backend.executor.manager.get_notification_manager_client" - ) as mock_get_client, - patch("backend.executor.manager.settings") as mock_settings, - patch("backend.executor.manager.redis") as mock_redis_module, - ): + with patch("backend.executor.manager.queue_notification"), patch( + "backend.executor.manager.get_notification_manager_client" + ) as mock_get_client, patch( + "backend.executor.manager.settings" + ) as mock_settings, patch( + "backend.executor.manager.redis" + ) as mock_redis_module: + mock_client = MagicMock() mock_get_client.return_value = mock_client mock_settings.config.frontend_base_url = "https://test.com" @@ -224,6 +228,7 @@ async def test_clear_insufficient_funds_notifications(server: SpinTestServer): user_id = "test-user-123" with patch("backend.executor.manager.redis") as mock_redis_module: + mock_redis_client = MagicMock() # get_redis_async is an async function, so we need AsyncMock for it mock_redis_module.get_redis_async = AsyncMock(return_value=mock_redis_client) @@ -259,6 +264,7 @@ async def test_clear_insufficient_funds_notifications_no_keys(server: SpinTestSe user_id = "test-user-no-notifications" with patch("backend.executor.manager.redis") as mock_redis_module: + mock_redis_client = MagicMock() # get_redis_async is an async function, so we need AsyncMock for it mock_redis_module.get_redis_async = AsyncMock(return_value=mock_redis_client) @@ -285,6 +291,7 @@ async def test_clear_insufficient_funds_notifications_handles_redis_error( user_id = "test-user-redis-error" with patch("backend.executor.manager.redis") as mock_redis_module: + # Mock get_redis_async to raise an error mock_redis_module.get_redis_async = AsyncMock( side_effect=Exception("Redis connection failed") @@ -313,14 +320,16 @@ async def test_handle_insufficient_funds_continues_on_redis_error( amount=-714, ) - with ( - patch("backend.executor.manager.queue_notification") as mock_queue_notif, - patch( - "backend.executor.manager.get_notification_manager_client" - ) as mock_get_client, - patch("backend.executor.manager.settings") as mock_settings, - patch("backend.executor.manager.redis") as mock_redis_module, - ): + with patch( + "backend.executor.manager.queue_notification" + ) as mock_queue_notif, patch( + "backend.executor.manager.get_notification_manager_client" + ) as mock_get_client, patch( + "backend.executor.manager.settings" + ) as mock_settings, patch( + "backend.executor.manager.redis" + ) as mock_redis_module: + mock_client = MagicMock() mock_get_client.return_value = mock_client mock_settings.config.frontend_base_url = "https://test.com" @@ -360,10 +369,10 @@ async def test_add_transaction_clears_notifications_on_grant(server: SpinTestSer user_id = "test-user-grant-clear" - with ( - patch("backend.data.credit.query_raw_with_schema") as mock_query, - patch("backend.executor.manager.redis") as mock_redis_module, - ): + with patch("backend.data.credit.query_raw_with_schema") as mock_query, patch( + "backend.executor.manager.redis" + ) as mock_redis_module: + # Mock the query to return a successful transaction mock_query.return_value = [{"balance": 1000, "transactionKey": "test-tx-key"}] @@ -402,10 +411,10 @@ async def test_add_transaction_clears_notifications_on_top_up(server: SpinTestSe user_id = "test-user-topup-clear" - with ( - patch("backend.data.credit.query_raw_with_schema") as mock_query, - patch("backend.executor.manager.redis") as mock_redis_module, - ): + with patch("backend.data.credit.query_raw_with_schema") as mock_query, patch( + "backend.executor.manager.redis" + ) as mock_redis_module: + # Mock the query to return a successful transaction mock_query.return_value = [{"balance": 2000, "transactionKey": "test-tx-key-2"}] @@ -440,10 +449,10 @@ async def test_add_transaction_skips_clearing_for_inactive_transaction( user_id = "test-user-inactive" - with ( - patch("backend.data.credit.query_raw_with_schema") as mock_query, - patch("backend.executor.manager.redis") as mock_redis_module, - ): + with patch("backend.data.credit.query_raw_with_schema") as mock_query, patch( + "backend.executor.manager.redis" + ) as mock_redis_module: + # Mock the query to return a successful transaction mock_query.return_value = [{"balance": 500, "transactionKey": "test-tx-key-3"}] @@ -476,10 +485,10 @@ async def test_add_transaction_skips_clearing_for_usage_transaction( user_id = "test-user-usage" - with ( - patch("backend.data.credit.query_raw_with_schema") as mock_query, - patch("backend.executor.manager.redis") as mock_redis_module, - ): + with patch("backend.data.credit.query_raw_with_schema") as mock_query, patch( + "backend.executor.manager.redis" + ) as mock_redis_module: + # Mock the query to return a successful transaction mock_query.return_value = [{"balance": 400, "transactionKey": "test-tx-key-4"}] @@ -510,11 +519,10 @@ async def test_enable_transaction_clears_notifications(server: SpinTestServer): user_id = "test-user-enable" - with ( - patch("backend.data.credit.CreditTransaction") as mock_credit_tx, - patch("backend.data.credit.query_raw_with_schema") as mock_query, - patch("backend.executor.manager.redis") as mock_redis_module, - ): + with patch("backend.data.credit.CreditTransaction") as mock_credit_tx, patch( + "backend.data.credit.query_raw_with_schema" + ) as mock_query, patch("backend.executor.manager.redis") as mock_redis_module: + # Mock finding the pending transaction mock_transaction = MagicMock() mock_transaction.amount = 1000 diff --git a/autogpt_platform/backend/backend/executor/manager_low_balance_test.py b/autogpt_platform/backend/backend/executor/manager_low_balance_test.py index 50db2d2808..d51ffb2511 100644 --- a/autogpt_platform/backend/backend/executor/manager_low_balance_test.py +++ b/autogpt_platform/backend/backend/executor/manager_low_balance_test.py @@ -18,13 +18,14 @@ async def test_handle_low_balance_threshold_crossing(server: SpinTestServer): transaction_cost = 600 # $6 transaction # Mock dependencies - with ( - patch("backend.executor.manager.queue_notification") as mock_queue_notif, - patch( - "backend.executor.manager.get_notification_manager_client" - ) as mock_get_client, - patch("backend.executor.manager.settings") as mock_settings, - ): + with patch( + "backend.executor.manager.queue_notification" + ) as mock_queue_notif, patch( + "backend.executor.manager.get_notification_manager_client" + ) as mock_get_client, patch( + "backend.executor.manager.settings" + ) as mock_settings: + # Setup mocks mock_client = MagicMock() mock_get_client.return_value = mock_client @@ -76,13 +77,14 @@ async def test_handle_low_balance_no_notification_when_not_crossing( ) # Mock dependencies - with ( - patch("backend.executor.manager.queue_notification") as mock_queue_notif, - patch( - "backend.executor.manager.get_notification_manager_client" - ) as mock_get_client, - patch("backend.executor.manager.settings") as mock_settings, - ): + with patch( + "backend.executor.manager.queue_notification" + ) as mock_queue_notif, patch( + "backend.executor.manager.get_notification_manager_client" + ) as mock_get_client, patch( + "backend.executor.manager.settings" + ) as mock_settings: + # Setup mocks mock_client = MagicMock() mock_get_client.return_value = mock_client @@ -118,13 +120,14 @@ async def test_handle_low_balance_no_duplicate_when_already_below( ) # Mock dependencies - with ( - patch("backend.executor.manager.queue_notification") as mock_queue_notif, - patch( - "backend.executor.manager.get_notification_manager_client" - ) as mock_get_client, - patch("backend.executor.manager.settings") as mock_settings, - ): + with patch( + "backend.executor.manager.queue_notification" + ) as mock_queue_notif, patch( + "backend.executor.manager.get_notification_manager_client" + ) as mock_get_client, patch( + "backend.executor.manager.settings" + ) as mock_settings: + # Setup mocks mock_client = MagicMock() mock_get_client.return_value = mock_client diff --git a/autogpt_platform/backend/backend/executor/manager_test.py b/autogpt_platform/backend/backend/executor/manager_test.py index 4805e3054a..69deba4b00 100644 --- a/autogpt_platform/backend/backend/executor/manager_test.py +++ b/autogpt_platform/backend/backend/executor/manager_test.py @@ -92,12 +92,12 @@ async def assert_sample_graph_executions( logger.info(f"Checking first StoreValueBlock execution: {exec}") assert exec.status == execution.ExecutionStatus.COMPLETED assert exec.graph_exec_id == graph_exec_id - assert exec.output_data in output_list, ( - f"Output data: {exec.output_data} and {output_list}" - ) - assert exec.input_data in input_list, ( - f"Input data: {exec.input_data} and {input_list}" - ) + assert ( + exec.output_data in output_list + ), f"Output data: {exec.output_data} and {output_list}" + assert ( + exec.input_data in input_list + ), f"Input data: {exec.input_data} and {input_list}" assert exec.node_id in [test_graph.nodes[0].id, test_graph.nodes[1].id] # Executing StoreValueBlock @@ -105,12 +105,12 @@ async def assert_sample_graph_executions( logger.info(f"Checking second StoreValueBlock execution: {exec}") assert exec.status == execution.ExecutionStatus.COMPLETED assert exec.graph_exec_id == graph_exec_id - assert exec.output_data in output_list, ( - f"Output data: {exec.output_data} and {output_list}" - ) - assert exec.input_data in input_list, ( - f"Input data: {exec.input_data} and {input_list}" - ) + assert ( + exec.output_data in output_list + ), f"Output data: {exec.output_data} and {output_list}" + assert ( + exec.input_data in input_list + ), f"Input data: {exec.input_data} and {input_list}" assert exec.node_id in [test_graph.nodes[0].id, test_graph.nodes[1].id] # Executing FillTextTemplateBlock @@ -301,7 +301,7 @@ async def test_static_input_link_on_graph(server: SpinTestServer): assert len(graph_exec.node_executions) == 8 # The last 3 executions will be a+b=4+5=9 for i, exec_data in enumerate(graph_exec.node_executions[-3:]): - logger.info(f"Checking execution {i + 1} of last 3: {exec_data}") + logger.info(f"Checking execution {i+1} of last 3: {exec_data}") assert exec_data.status == execution.ExecutionStatus.COMPLETED assert exec_data.output_data == {"result": [9]} logger.info("Completed test_static_input_link_on_graph") diff --git a/autogpt_platform/backend/backend/executor/utils.py b/autogpt_platform/backend/backend/executor/utils.py index 958f98f19e..d26424aefc 100644 --- a/autogpt_platform/backend/backend/executor/utils.py +++ b/autogpt_platform/backend/backend/executor/utils.py @@ -292,9 +292,9 @@ async def _validate_node_input_credentials( if node.credentials_optional: continue # Don't add error, will be marked for skip after loop else: - credential_errors[node.id][field_name] = ( - "These credentials are required" - ) + credential_errors[node.id][ + field_name + ] = "These credentials are required" continue credentials_meta = credentials_meta_type.model_validate(field_value) @@ -313,15 +313,15 @@ async def _validate_node_input_credentials( except Exception as e: # Handle any errors fetching credentials # If credentials were explicitly configured but unavailable, it's an error - credential_errors[node.id][field_name] = ( - f"Credentials not available: {e}" - ) + credential_errors[node.id][ + field_name + ] = f"Credentials not available: {e}" continue if not credentials: - credential_errors[node.id][field_name] = ( - f"Unknown credentials #{credentials_meta.id}" - ) + credential_errors[node.id][ + field_name + ] = f"Unknown credentials #{credentials_meta.id}" continue if ( @@ -334,9 +334,9 @@ async def _validate_node_input_credentials( f"{credentials_meta.type}<>{credentials.type};" f"{credentials_meta.provider}<>{credentials.provider}" ) - credential_errors[node.id][field_name] = ( - "Invalid credentials: type/provider mismatch" - ) + credential_errors[node.id][ + field_name + ] = "Invalid credentials: type/provider mismatch" continue # If node has optional credentials and any are missing, mark for skipping @@ -410,10 +410,9 @@ async def validate_graph_with_credentials( ) # Get credential input/availability/validation errors and nodes to skip - ( - node_credential_input_errors, - nodes_to_skip, - ) = await _validate_node_input_credentials(graph, user_id, nodes_input_masks) + node_credential_input_errors, nodes_to_skip = ( + await _validate_node_input_credentials(graph, user_id, nodes_input_masks) + ) # Merge credential errors with structural errors for node_id, field_errors in node_credential_input_errors.items(): @@ -561,14 +560,13 @@ async def validate_and_construct_node_execution_input( nodes_input_masks or {}, ) - ( - starting_nodes_input, - nodes_to_skip, - ) = await _construct_starting_node_execution_input( - graph=graph, - user_id=user_id, - graph_inputs=graph_inputs, - nodes_input_masks=nodes_input_masks, + starting_nodes_input, nodes_to_skip = ( + await _construct_starting_node_execution_input( + graph=graph, + user_id=user_id, + graph_inputs=graph_inputs, + nodes_input_masks=nodes_input_masks, + ) ) return graph, starting_nodes_input, nodes_input_masks, nodes_to_skip @@ -632,7 +630,8 @@ def create_execution_queue_config() -> RabbitMQConfig: # Solution: Disable consumer timeout entirely - let graphs run indefinitely # Safety: Heartbeat mechanism now handles dead consumer detection instead # Use case: Graph executions that take hours to complete (AI model training, etc.) - "x-consumer-timeout": GRACEFUL_SHUTDOWN_TIMEOUT_SECONDS * 1000, + "x-consumer-timeout": GRACEFUL_SHUTDOWN_TIMEOUT_SECONDS + * 1000, }, ) cancel_queue = Queue( @@ -858,19 +857,16 @@ async def add_graph_execution( ) # Create new execution - ( - graph, - starting_nodes_input, - compiled_nodes_input_masks, - nodes_to_skip, - ) = await validate_and_construct_node_execution_input( - graph_id=graph_id, - user_id=user_id, - graph_inputs=inputs or {}, - graph_version=graph_version, - graph_credentials_inputs=graph_credentials_inputs, - nodes_input_masks=nodes_input_masks, - is_sub_graph=parent_exec_id is not None, + graph, starting_nodes_input, compiled_nodes_input_masks, nodes_to_skip = ( + await validate_and_construct_node_execution_input( + graph_id=graph_id, + user_id=user_id, + graph_inputs=inputs or {}, + graph_version=graph_version, + graph_credentials_inputs=graph_credentials_inputs, + nodes_input_masks=nodes_input_masks, + is_sub_graph=parent_exec_id is not None, + ) ) graph_exec = await edb.create_graph_execution( diff --git a/autogpt_platform/backend/backend/integrations/credentials_store.py b/autogpt_platform/backend/backend/integrations/credentials_store.py index fa544e8aa8..384405b0c7 100644 --- a/autogpt_platform/backend/backend/integrations/credentials_store.py +++ b/autogpt_platform/backend/backend/integrations/credentials_store.py @@ -486,6 +486,7 @@ class IntegrationCredentialsStore: user_integrations.oauth_states.append(state) async with await self.locked_user_integrations(user_id): + user_integrations = await self._get_user_integrations(user_id) oauth_states = user_integrations.oauth_states oauth_states.append(state) diff --git a/autogpt_platform/backend/backend/integrations/creds_manager.py b/autogpt_platform/backend/backend/integrations/creds_manager.py index 9b75450c20..f2b6a9da4f 100644 --- a/autogpt_platform/backend/backend/integrations/creds_manager.py +++ b/autogpt_platform/backend/backend/integrations/creds_manager.py @@ -140,7 +140,8 @@ class IntegrationCredentialsManager: oauth_handler = await _get_provider_oauth_handler(credentials.provider) if oauth_handler.needs_refresh(credentials): logger.debug( - f"Refreshing '{credentials.provider}' credentials #{credentials.id}" + f"Refreshing '{credentials.provider}' " + f"credentials #{credentials.id}" ) _lock = None if lock: diff --git a/autogpt_platform/backend/backend/notifications/test_notifications.py b/autogpt_platform/backend/backend/notifications/test_notifications.py index 6d6ed7cb1b..9827b46cfb 100644 --- a/autogpt_platform/backend/backend/notifications/test_notifications.py +++ b/autogpt_platform/backend/backend/notifications/test_notifications.py @@ -77,23 +77,18 @@ class TestNotificationErrorHandling: self, notification_manager, sample_batch_event ): """Test that 406 inactive recipient error stops ALL processing for that user.""" - with ( - patch("backend.notifications.notifications.logger"), - patch( - "backend.notifications.notifications.set_user_email_verification", - new_callable=AsyncMock, - ) as mock_set_verification, - patch( - "backend.notifications.notifications.disable_all_user_notifications", - new_callable=AsyncMock, - ) as mock_disable_all, - patch( - "backend.notifications.notifications.get_database_manager_async_client" - ) as mock_db_client, - patch( - "backend.notifications.notifications.generate_unsubscribe_link" - ) as mock_unsub_link, - ): + with patch("backend.notifications.notifications.logger"), patch( + "backend.notifications.notifications.set_user_email_verification", + new_callable=AsyncMock, + ) as mock_set_verification, patch( + "backend.notifications.notifications.disable_all_user_notifications", + new_callable=AsyncMock, + ) as mock_disable_all, patch( + "backend.notifications.notifications.get_database_manager_async_client" + ) as mock_db_client, patch( + "backend.notifications.notifications.generate_unsubscribe_link" + ) as mock_unsub_link: + # Create batch of 5 notifications notifications = [] for i in range(5): @@ -174,15 +169,12 @@ class TestNotificationErrorHandling: self, notification_manager, sample_batch_event ): """Test that 422 error permanently removes the malformed notification from batch and continues with others.""" - with ( - patch("backend.notifications.notifications.logger") as mock_logger, - patch( - "backend.notifications.notifications.get_database_manager_async_client" - ) as mock_db_client, - patch( - "backend.notifications.notifications.generate_unsubscribe_link" - ) as mock_unsub_link, - ): + with patch("backend.notifications.notifications.logger") as mock_logger, patch( + "backend.notifications.notifications.get_database_manager_async_client" + ) as mock_db_client, patch( + "backend.notifications.notifications.generate_unsubscribe_link" + ) as mock_unsub_link: + # Create batch of 5 notifications notifications = [] for i in range(5): @@ -280,15 +272,12 @@ class TestNotificationErrorHandling: self, notification_manager, sample_batch_event ): """Test that oversized notifications are permanently removed from batch but others continue.""" - with ( - patch("backend.notifications.notifications.logger") as mock_logger, - patch( - "backend.notifications.notifications.get_database_manager_async_client" - ) as mock_db_client, - patch( - "backend.notifications.notifications.generate_unsubscribe_link" - ) as mock_unsub_link, - ): + with patch("backend.notifications.notifications.logger") as mock_logger, patch( + "backend.notifications.notifications.get_database_manager_async_client" + ) as mock_db_client, patch( + "backend.notifications.notifications.generate_unsubscribe_link" + ) as mock_unsub_link: + # Create batch of 5 notifications notifications = [] for i in range(5): @@ -393,15 +382,12 @@ class TestNotificationErrorHandling: self, notification_manager, sample_batch_event ): """Test that generic API errors keep notifications in batch for retry while others continue.""" - with ( - patch("backend.notifications.notifications.logger") as mock_logger, - patch( - "backend.notifications.notifications.get_database_manager_async_client" - ) as mock_db_client, - patch( - "backend.notifications.notifications.generate_unsubscribe_link" - ) as mock_unsub_link, - ): + with patch("backend.notifications.notifications.logger") as mock_logger, patch( + "backend.notifications.notifications.get_database_manager_async_client" + ) as mock_db_client, patch( + "backend.notifications.notifications.generate_unsubscribe_link" + ) as mock_unsub_link: + # Create batch of 5 notifications notifications = [] for i in range(5): @@ -513,15 +499,12 @@ class TestNotificationErrorHandling: self, notification_manager, sample_batch_event ): """Test successful batch processing where all notifications are sent without errors.""" - with ( - patch("backend.notifications.notifications.logger") as mock_logger, - patch( - "backend.notifications.notifications.get_database_manager_async_client" - ) as mock_db_client, - patch( - "backend.notifications.notifications.generate_unsubscribe_link" - ) as mock_unsub_link, - ): + with patch("backend.notifications.notifications.logger") as mock_logger, patch( + "backend.notifications.notifications.get_database_manager_async_client" + ) as mock_db_client, patch( + "backend.notifications.notifications.generate_unsubscribe_link" + ) as mock_unsub_link: + # Create batch of 5 notifications notifications = [] for i in range(5): diff --git a/autogpt_platform/backend/backend/sdk/__init__.py b/autogpt_platform/backend/backend/sdk/__init__.py index 3beb559124..b3a23dc735 100644 --- a/autogpt_platform/backend/backend/sdk/__init__.py +++ b/autogpt_platform/backend/backend/sdk/__init__.py @@ -6,7 +6,7 @@ Usage: from backend.sdk import * This module provides: - All block base classes and types -- All credential and authentication components +- All credential and authentication components - All cost tracking components - All webhook components - All utility functions diff --git a/autogpt_platform/backend/backend/sdk/cost_integration.py b/autogpt_platform/backend/backend/sdk/cost_integration.py index 09ae0a1467..04c027ffa3 100644 --- a/autogpt_platform/backend/backend/sdk/cost_integration.py +++ b/autogpt_platform/backend/backend/sdk/cost_integration.py @@ -1,7 +1,7 @@ """ Integration between SDK provider costs and the execution cost system. -This module provides the glue between provider-defined base costs and the +This module provides the glue between provider-defined base costs and the BLOCK_COSTS configuration used by the execution system. """ diff --git a/autogpt_platform/backend/backend/sdk/registry.py b/autogpt_platform/backend/backend/sdk/registry.py index b01a72549b..5543a3ed96 100644 --- a/autogpt_platform/backend/backend/sdk/registry.py +++ b/autogpt_platform/backend/backend/sdk/registry.py @@ -91,6 +91,7 @@ class AutoRegistry: not hasattr(provider.webhook_manager, "PROVIDER_NAME") or provider.webhook_manager.PROVIDER_NAME is None ): + # This works because ProviderName has _missing_ method provider.webhook_manager.PROVIDER_NAME = ProviderName(provider.name) cls._webhook_managers[provider.name] = provider.webhook_manager diff --git a/autogpt_platform/backend/backend/util/decorator.py b/autogpt_platform/backend/backend/util/decorator.py index 697d67e781..3767435646 100644 --- a/autogpt_platform/backend/backend/util/decorator.py +++ b/autogpt_platform/backend/backend/util/decorator.py @@ -168,9 +168,7 @@ def async_error_logged() -> Callable[ ]: ... -def async_error_logged( - *, swallow: bool = True -) -> ( +def async_error_logged(*, swallow: bool = True) -> ( Callable[ [Callable[P, Coroutine[Any, Any, T]]], Callable[P, Coroutine[Any, Any, T | None]], diff --git a/autogpt_platform/backend/backend/util/dynamic_fields.py b/autogpt_platform/backend/backend/util/dynamic_fields.py index ddb0ffd302..65be301e78 100644 --- a/autogpt_platform/backend/backend/util/dynamic_fields.py +++ b/autogpt_platform/backend/backend/util/dynamic_fields.py @@ -3,7 +3,7 @@ Utilities for handling dynamic field names and delimiters in the AutoGPT Platfor Dynamic fields allow graphs to connect complex data structures using special delimiters: - _#_ for dictionary keys (e.g., "values_#_name" → values["name"]) -- _$_ for list indices (e.g., "items_$_0" → items[0]) +- _$_ for list indices (e.g., "items_$_0" → items[0]) - _@_ for object attributes (e.g., "obj_@_attr" → obj.attr) This module provides utilities for: diff --git a/autogpt_platform/backend/backend/util/file_test.py b/autogpt_platform/backend/backend/util/file_test.py index 8be3e283a8..87c53e4305 100644 --- a/autogpt_platform/backend/backend/util/file_test.py +++ b/autogpt_platform/backend/backend/util/file_test.py @@ -33,11 +33,14 @@ class TestFileCloudIntegration: cloud_path = "gcs://test-bucket/uploads/456/source.txt" cloud_content = b"cloud file content" - with ( - patch("backend.util.file.get_cloud_storage_handler") as mock_handler_getter, - patch("backend.util.file.scan_content_safe") as mock_scan, - patch("backend.util.file.Path") as mock_path_class, - ): + with patch( + "backend.util.file.get_cloud_storage_handler" + ) as mock_handler_getter, patch( + "backend.util.file.scan_content_safe" + ) as mock_scan, patch( + "backend.util.file.Path" + ) as mock_path_class: + # Mock cloud storage handler mock_handler = MagicMock() mock_handler.is_cloud_path.return_value = True @@ -107,13 +110,18 @@ class TestFileCloudIntegration: cloud_path = "gcs://test-bucket/uploads/456/image.png" cloud_content = b"\\x89PNG\\r\\n\\x1a\\n\\x00\\x00\\x00\\rIHDR" # PNG header - with ( - patch("backend.util.file.get_cloud_storage_handler") as mock_handler_getter, - patch("backend.util.file.scan_content_safe") as mock_scan, - patch("backend.util.file.get_mime_type") as mock_mime, - patch("backend.util.file.base64.b64encode") as mock_b64, - patch("backend.util.file.Path") as mock_path_class, - ): + with patch( + "backend.util.file.get_cloud_storage_handler" + ) as mock_handler_getter, patch( + "backend.util.file.scan_content_safe" + ) as mock_scan, patch( + "backend.util.file.get_mime_type" + ) as mock_mime, patch( + "backend.util.file.base64.b64encode" + ) as mock_b64, patch( + "backend.util.file.Path" + ) as mock_path_class: + # Mock cloud storage handler mock_handler = MagicMock() mock_handler.is_cloud_path.return_value = True @@ -161,13 +169,18 @@ class TestFileCloudIntegration: graph_exec_id = "test-exec-123" data_uri = "data:text/plain;base64,SGVsbG8gd29ybGQ=" - with ( - patch("backend.util.file.get_cloud_storage_handler") as mock_handler_getter, - patch("backend.util.file.scan_content_safe") as mock_scan, - patch("backend.util.file.base64.b64decode") as mock_b64decode, - patch("backend.util.file.uuid.uuid4") as mock_uuid, - patch("backend.util.file.Path") as mock_path_class, - ): + with patch( + "backend.util.file.get_cloud_storage_handler" + ) as mock_handler_getter, patch( + "backend.util.file.scan_content_safe" + ) as mock_scan, patch( + "backend.util.file.base64.b64decode" + ) as mock_b64decode, patch( + "backend.util.file.uuid.uuid4" + ) as mock_uuid, patch( + "backend.util.file.Path" + ) as mock_path_class: + # Mock cloud storage handler mock_handler = MagicMock() mock_handler.is_cloud_path.return_value = False @@ -217,6 +230,7 @@ class TestFileCloudIntegration: with patch( "backend.util.file.get_cloud_storage_handler" ) as mock_handler_getter: + # Mock cloud storage handler to raise error mock_handler = AsyncMock() mock_handler.is_cloud_path.return_value = True @@ -241,11 +255,14 @@ class TestFileCloudIntegration: local_file = "test_video.mp4" file_content = b"fake video content" - with ( - patch("backend.util.file.get_cloud_storage_handler") as mock_handler_getter, - patch("backend.util.file.scan_content_safe") as mock_scan, - patch("backend.util.file.Path") as mock_path_class, - ): + with patch( + "backend.util.file.get_cloud_storage_handler" + ) as mock_handler_getter, patch( + "backend.util.file.scan_content_safe" + ) as mock_scan, patch( + "backend.util.file.Path" + ) as mock_path_class: + # Mock cloud storage handler - not a cloud path mock_handler = MagicMock() mock_handler.is_cloud_path.return_value = False @@ -290,11 +307,14 @@ class TestFileCloudIntegration: local_file = "infected.exe" file_content = b"malicious content" - with ( - patch("backend.util.file.get_cloud_storage_handler") as mock_handler_getter, - patch("backend.util.file.scan_content_safe") as mock_scan, - patch("backend.util.file.Path") as mock_path_class, - ): + with patch( + "backend.util.file.get_cloud_storage_handler" + ) as mock_handler_getter, patch( + "backend.util.file.scan_content_safe" + ) as mock_scan, patch( + "backend.util.file.Path" + ) as mock_path_class: + # Mock cloud storage handler - not a cloud path mock_handler = MagicMock() mock_handler.is_cloud_path.return_value = False diff --git a/autogpt_platform/backend/backend/util/request.py b/autogpt_platform/backend/backend/util/request.py index 3ad96a08a0..95e5ee32f7 100644 --- a/autogpt_platform/backend/backend/util/request.py +++ b/autogpt_platform/backend/backend/util/request.py @@ -500,6 +500,7 @@ class Requests: json=json, **kwargs, ) as response: + if self.raise_for_status: try: response.raise_for_status() diff --git a/autogpt_platform/backend/backend/util/service.py b/autogpt_platform/backend/backend/util/service.py index 09d9532062..00b938c170 100644 --- a/autogpt_platform/backend/backend/util/service.py +++ b/autogpt_platform/backend/backend/util/service.py @@ -558,6 +558,7 @@ def get_service_client( self._connection_failure_count >= 3 and current_time - self._last_client_reset > 30 ): + logger.warning( f"Connection failures detected ({self._connection_failure_count}), recreating HTTP clients" ) diff --git a/autogpt_platform/backend/backend/util/service_test.py b/autogpt_platform/backend/backend/util/service_test.py index 25783d1c03..faa0dd6c84 100644 --- a/autogpt_platform/backend/backend/util/service_test.py +++ b/autogpt_platform/backend/backend/util/service_test.py @@ -154,6 +154,7 @@ class TestDynamicClientConnectionHealing: self._connection_failure_count >= 3 and current_time - self._last_client_reset > 30 ): + # Clear cached clients to force recreation on next access if hasattr(self, "sync_client"): delattr(self, "sync_client") @@ -203,12 +204,12 @@ class TestDynamicClientConnectionHealing: sync_after = self.client.sync_client async_after = self.client.async_client - assert sync_before is sync_after, ( - "Sync client should not be reset before threshold" - ) - assert async_before is async_after, ( - "Async client should not be reset before threshold" - ) + assert ( + sync_before is sync_after + ), "Sync client should not be reset before threshold" + assert ( + async_before is async_after + ), "Async client should not be reset before threshold" assert self.client._connection_failure_count == 2 def test_no_reset_within_time_window(self): @@ -228,12 +229,12 @@ class TestDynamicClientConnectionHealing: sync_after = self.client.sync_client async_after = self.client.async_client - assert sync_before is sync_after, ( - "Sync client should not be reset within time window" - ) - assert async_before is async_after, ( - "Async client should not be reset within time window" - ) + assert ( + sync_before is sync_after + ), "Sync client should not be reset within time window" + assert ( + async_before is async_after + ), "Async client should not be reset within time window" assert self.client._connection_failure_count == 3 def test_reset_after_threshold_and_time(self): @@ -253,15 +254,15 @@ class TestDynamicClientConnectionHealing: sync_after = self.client.sync_client async_after = self.client.async_client - assert sync_before is not sync_after, ( - "Sync client should be reset after threshold" - ) - assert async_before is not async_after, ( - "Async client should be reset after threshold" - ) - assert self.client._connection_failure_count == 0, ( - "Failure count should be reset" - ) + assert ( + sync_before is not sync_after + ), "Sync client should be reset after threshold" + assert ( + async_before is not async_after + ), "Async client should be reset after threshold" + assert ( + self.client._connection_failure_count == 0 + ), "Failure count should be reset" def test_reset_counters_after_healing(self): """Test that counters are properly reset after healing""" @@ -313,9 +314,9 @@ class TestConnectionHealingIntegration: time_condition = current_time - last_reset_time > 30 should_trigger_reset = failure_count >= 3 and time_condition - assert should_trigger_reset == should_reset, ( - f"Time window logic failed for {current_time - last_reset_time} seconds ago" - ) + assert ( + should_trigger_reset == should_reset + ), f"Time window logic failed for {current_time - last_reset_time} seconds ago" def test_cached_property_behavior(): diff --git a/autogpt_platform/backend/backend/util/test_json.py b/autogpt_platform/backend/backend/util/test_json.py index e93bba233d..2e30dafec6 100644 --- a/autogpt_platform/backend/backend/util/test_json.py +++ b/autogpt_platform/backend/backend/util/test_json.py @@ -222,9 +222,9 @@ class TestSafeJson: problematic_data = { "null_byte": "data with \x00 null", "bell_char": "data with \x07 bell", - "form_feed": "data with \x0c feed", - "escape_char": "data with \x1b escape", - "delete_char": "data with \x7f delete", + "form_feed": "data with \x0C feed", + "escape_char": "data with \x1B escape", + "delete_char": "data with \x7F delete", } # SafeJson should successfully process data with control characters @@ -235,9 +235,9 @@ class TestSafeJson: result_data = result.data assert "\x00" not in str(result_data) # null byte removed assert "\x07" not in str(result_data) # bell removed - assert "\x0c" not in str(result_data) # form feed removed - assert "\x1b" not in str(result_data) # escape removed - assert "\x7f" not in str(result_data) # delete removed + assert "\x0C" not in str(result_data) # form feed removed + assert "\x1B" not in str(result_data) # escape removed + assert "\x7F" not in str(result_data) # delete removed # Test that safe whitespace characters are preserved safe_data = { @@ -263,7 +263,7 @@ class TestSafeJson: def test_web_scraping_content_sanitization(self): """Test sanitization of typical web scraping content with null characters.""" # Simulate web content that might contain null bytes from SearchTheWebBlock - web_content = "Article title\x00Hidden null\x01Start of heading\x08Backspace\x0cForm feed content\x1fUnit separator\x7fDelete char" + web_content = "Article title\x00Hidden null\x01Start of heading\x08Backspace\x0CForm feed content\x1FUnit separator\x7FDelete char" result = SafeJson(web_content) assert isinstance(result, Json) @@ -273,9 +273,9 @@ class TestSafeJson: assert "\x00" not in sanitized_content assert "\x01" not in sanitized_content assert "\x08" not in sanitized_content - assert "\x0c" not in sanitized_content - assert "\x1f" not in sanitized_content - assert "\x7f" not in sanitized_content + assert "\x0C" not in sanitized_content + assert "\x1F" not in sanitized_content + assert "\x7F" not in sanitized_content # Verify the content is still readable assert "Article title" in sanitized_content @@ -391,7 +391,7 @@ class TestSafeJson: mixed_content = { "safe_and_unsafe": "Good text\twith tab\x00NULL BYTE\nand newline\x08BACKSPACE", "file_path_with_null": "C:\\temp\\file\x00.txt", - "json_with_controls": '{"text": "data\x01\x0c\x1f"}', + "json_with_controls": '{"text": "data\x01\x0C\x1F"}', } result = SafeJson(mixed_content) @@ -419,13 +419,13 @@ class TestSafeJson: # Create data with various problematic escape sequences that could cause JSON parsing errors problematic_output_data = { - "web_content": "Article text\x00with null\x01and control\x08chars\x0c\x1f\x7f", + "web_content": "Article text\x00with null\x01and control\x08chars\x0C\x1F\x7F", "file_path": "C:\\Users\\test\\file\x00.txt", - "json_like_string": '{"text": "data\x00\x08\x1f"}', + "json_like_string": '{"text": "data\x00\x08\x1F"}', "escaped_sequences": "Text with \\u0000 and \\u0008 sequences", - "mixed_content": "Normal text\tproperly\nformatted\rwith\x00invalid\x08chars\x1fmixed", + "mixed_content": "Normal text\tproperly\nformatted\rwith\x00invalid\x08chars\x1Fmixed", "large_text": "A" * 35000 - + "\x00\x08\x1f" + + "\x00\x08\x1F" + "B" * 5000, # Large text like in the error } @@ -446,9 +446,9 @@ class TestSafeJson: assert "\x00" not in str(web_content) assert "\x01" not in str(web_content) assert "\x08" not in str(web_content) - assert "\x0c" not in str(web_content) - assert "\x1f" not in str(web_content) - assert "\x7f" not in str(web_content) + assert "\x0C" not in str(web_content) + assert "\x1F" not in str(web_content) + assert "\x7F" not in str(web_content) # Check that legitimate content is preserved assert "Article text" in str(web_content) @@ -467,7 +467,7 @@ class TestSafeJson: assert "B" * 1000 in str(large_text) # B's preserved assert "\x00" not in str(large_text) # Control chars removed assert "\x08" not in str(large_text) - assert "\x1f" not in str(large_text) + assert "\x1F" not in str(large_text) # Most importantly: ensure the result can be JSON-serialized without errors # This would have failed with the old approach @@ -602,7 +602,7 @@ class TestSafeJson: model = SamplePydanticModel( name="Test\x00User", # Has null byte age=30, - metadata={"info": "data\x08with\x0ccontrols"}, + metadata={"info": "data\x08with\x0Ccontrols"}, ) data = {"credential": model} @@ -616,7 +616,7 @@ class TestSafeJson: json_string = json.dumps(result.data) assert "\x00" not in json_string assert "\x08" not in json_string - assert "\x0c" not in json_string + assert "\x0C" not in json_string assert "TestUser" in json_string # Name preserved minus null byte def test_deeply_nested_pydantic_models_control_char_sanitization(self): @@ -639,16 +639,16 @@ class TestSafeJson: # Create test data with control characters at every nesting level inner = InnerModel( - deep_string="Deepest\x00Level\x08Control\x0cChars", # Multiple control chars at deepest level + deep_string="Deepest\x00Level\x08Control\x0CChars", # Multiple control chars at deepest level metadata={ - "nested_key": "Nested\x1fValue\x7fDelete" + "nested_key": "Nested\x1FValue\x7FDelete" }, # Control chars in nested dict ) middle = MiddleModel( - middle_string="Middle\x01StartOfHeading\x1fUnitSeparator", + middle_string="Middle\x01StartOfHeading\x1FUnitSeparator", inner=inner, - data="Some\x0bVerticalTab\x0eShiftOut", + data="Some\x0BVerticalTab\x0EShiftOut", ) outer = OuterModel(outer_string="Outer\x00Null\x07Bell", middle=middle) @@ -659,7 +659,7 @@ class TestSafeJson: "nested_model": outer, "list_with_strings": [ "List\x00Item1", - "List\x0cItem2\x1f", + "List\x0CItem2\x1F", {"dict_in_list": "Dict\x08Value"}, ], } @@ -684,10 +684,10 @@ class TestSafeJson: "\x06", "\x07", "\x08", - "\x0b", - "\x0c", - "\x0e", - "\x0f", + "\x0B", + "\x0C", + "\x0E", + "\x0F", "\x10", "\x11", "\x12", @@ -698,19 +698,19 @@ class TestSafeJson: "\x17", "\x18", "\x19", - "\x1a", - "\x1b", - "\x1c", - "\x1d", - "\x1e", - "\x1f", - "\x7f", + "\x1A", + "\x1B", + "\x1C", + "\x1D", + "\x1E", + "\x1F", + "\x7F", ] for char in control_chars: - assert char not in json_string, ( - f"Control character {repr(char)} found in result" - ) + assert ( + char not in json_string + ), f"Control character {repr(char)} found in result" # Verify specific sanitized content is present (control chars removed but text preserved) result_data = cast(dict[str, Any], result.data) diff --git a/autogpt_platform/backend/scripts/test_generate_block_docs.py b/autogpt_platform/backend/scripts/test_generate_block_docs.py index a565cc933d..a6d6d34992 100644 --- a/autogpt_platform/backend/scripts/test_generate_block_docs.py +++ b/autogpt_platform/backend/scripts/test_generate_block_docs.py @@ -1,6 +1,5 @@ #!/usr/bin/env python3 """Tests for the block documentation generator.""" - import pytest from scripts.generate_block_docs import ( diff --git a/autogpt_platform/backend/test/agent_generator/test_core_integration.py b/autogpt_platform/backend/test/agent_generator/test_core_integration.py index e40634aa9d..528763e751 100644 --- a/autogpt_platform/backend/test/agent_generator/test_core_integration.py +++ b/autogpt_platform/backend/test/agent_generator/test_core_integration.py @@ -48,12 +48,11 @@ class TestDecomposeGoal: """Test that decompose_goal calls the external service.""" expected_result = {"type": "instructions", "steps": ["Step 1"]} - with ( - patch.object(core, "is_external_service_configured", return_value=True), - patch.object( - core, "decompose_goal_external", new_callable=AsyncMock - ) as mock_external, - ): + with patch.object( + core, "is_external_service_configured", return_value=True + ), patch.object( + core, "decompose_goal_external", new_callable=AsyncMock + ) as mock_external: mock_external.return_value = expected_result result = await core.decompose_goal("Build a chatbot") @@ -67,12 +66,11 @@ class TestDecomposeGoal: """Test that decompose_goal passes context to external service.""" expected_result = {"type": "instructions", "steps": ["Step 1"]} - with ( - patch.object(core, "is_external_service_configured", return_value=True), - patch.object( - core, "decompose_goal_external", new_callable=AsyncMock - ) as mock_external, - ): + with patch.object( + core, "is_external_service_configured", return_value=True + ), patch.object( + core, "decompose_goal_external", new_callable=AsyncMock + ) as mock_external: mock_external.return_value = expected_result await core.decompose_goal("Build a chatbot", "Use Python") @@ -83,12 +81,11 @@ class TestDecomposeGoal: @pytest.mark.asyncio async def test_returns_none_on_service_failure(self): """Test that decompose_goal returns None when external service fails.""" - with ( - patch.object(core, "is_external_service_configured", return_value=True), - patch.object( - core, "decompose_goal_external", new_callable=AsyncMock - ) as mock_external, - ): + with patch.object( + core, "is_external_service_configured", return_value=True + ), patch.object( + core, "decompose_goal_external", new_callable=AsyncMock + ) as mock_external: mock_external.return_value = None result = await core.decompose_goal("Build a chatbot") @@ -104,12 +101,11 @@ class TestGenerateAgent: """Test that generate_agent calls the external service.""" expected_result = {"name": "Test Agent", "nodes": [], "links": []} - with ( - patch.object(core, "is_external_service_configured", return_value=True), - patch.object( - core, "generate_agent_external", new_callable=AsyncMock - ) as mock_external, - ): + with patch.object( + core, "is_external_service_configured", return_value=True + ), patch.object( + core, "generate_agent_external", new_callable=AsyncMock + ) as mock_external: mock_external.return_value = expected_result instructions = {"type": "instructions", "steps": ["Step 1"]} @@ -132,12 +128,11 @@ class TestGenerateAgent: "name": "Test Agent", } - with ( - patch.object(core, "is_external_service_configured", return_value=True), - patch.object( - core, "generate_agent_external", new_callable=AsyncMock - ) as mock_external, - ): + with patch.object( + core, "is_external_service_configured", return_value=True + ), patch.object( + core, "generate_agent_external", new_callable=AsyncMock + ) as mock_external: mock_external.return_value = expected_result.copy() result = await core.generate_agent({"steps": []}) @@ -150,12 +145,11 @@ class TestGenerateAgent: @pytest.mark.asyncio async def test_returns_none_when_external_service_fails(self): """Test that generate_agent returns None when external service fails.""" - with ( - patch.object(core, "is_external_service_configured", return_value=True), - patch.object( - core, "generate_agent_external", new_callable=AsyncMock - ) as mock_external, - ): + with patch.object( + core, "is_external_service_configured", return_value=True + ), patch.object( + core, "generate_agent_external", new_callable=AsyncMock + ) as mock_external: mock_external.return_value = None result = await core.generate_agent({"steps": []}) @@ -171,12 +165,11 @@ class TestGenerateAgentPatch: """Test that generate_agent_patch calls the external service.""" expected_result = {"name": "Updated Agent", "nodes": [], "links": []} - with ( - patch.object(core, "is_external_service_configured", return_value=True), - patch.object( - core, "generate_agent_patch_external", new_callable=AsyncMock - ) as mock_external, - ): + with patch.object( + core, "is_external_service_configured", return_value=True + ), patch.object( + core, "generate_agent_patch_external", new_callable=AsyncMock + ) as mock_external: mock_external.return_value = expected_result current_agent = {"nodes": [], "links": []} @@ -195,12 +188,11 @@ class TestGenerateAgentPatch: "questions": [{"question": "What type of node?"}], } - with ( - patch.object(core, "is_external_service_configured", return_value=True), - patch.object( - core, "generate_agent_patch_external", new_callable=AsyncMock - ) as mock_external, - ): + with patch.object( + core, "is_external_service_configured", return_value=True + ), patch.object( + core, "generate_agent_patch_external", new_callable=AsyncMock + ) as mock_external: mock_external.return_value = expected_result result = await core.generate_agent_patch("Add a node", {"nodes": []}) @@ -210,12 +202,11 @@ class TestGenerateAgentPatch: @pytest.mark.asyncio async def test_returns_none_when_external_service_fails(self): """Test that generate_agent_patch returns None when service fails.""" - with ( - patch.object(core, "is_external_service_configured", return_value=True), - patch.object( - core, "generate_agent_patch_external", new_callable=AsyncMock - ) as mock_external, - ): + with patch.object( + core, "is_external_service_configured", return_value=True + ), patch.object( + core, "generate_agent_patch_external", new_callable=AsyncMock + ) as mock_external: mock_external.return_value = None result = await core.generate_agent_patch("Add a node", {"nodes": []}) diff --git a/autogpt_platform/backend/test/sdk/test_sdk_block_creation.py b/autogpt_platform/backend/test/sdk/test_sdk_block_creation.py index 90c23dbe3d..1f7a253a5a 100644 --- a/autogpt_platform/backend/test/sdk/test_sdk_block_creation.py +++ b/autogpt_platform/backend/test/sdk/test_sdk_block_creation.py @@ -499,13 +499,10 @@ class TestComplexBlockScenarios: async def run(self, input_data: Input, **kwargs) -> BlockOutput: if input_data.value < 0: - yield ( - "error", - { - "type": "ValidationError", - "message": "Value must be non-negative", - }, - ) + yield "error", { + "type": "ValidationError", + "message": "Value must be non-negative", + } yield "result", 0 else: yield "result", input_data.value * 2 @@ -624,15 +621,12 @@ class TestAuthenticationVariants: yield "data", f"OAuth data for {input_data.resource}" yield "scopes_used", credentials.scopes or [] - yield ( - "token_info", - { - "has_token": bool(token), - "has_refresh": credentials.refresh_token is not None, - "provider": credentials.provider, - "expires_at": credentials.access_token_expires_at, - }, - ) + yield "token_info", { + "has_token": bool(token), + "has_refresh": credentials.refresh_token is not None, + "provider": credentials.provider, + "expires_at": credentials.access_token_expires_at, + } # Create test OAuth credentials test_oauth_creds = OAuth2Credentials( diff --git a/autogpt_platform/backend/test/sdk/test_sdk_webhooks.py b/autogpt_platform/backend/test/sdk/test_sdk_webhooks.py index 0e9f42907a..a8c1f8b7e1 100644 --- a/autogpt_platform/backend/test/sdk/test_sdk_webhooks.py +++ b/autogpt_platform/backend/test/sdk/test_sdk_webhooks.py @@ -388,9 +388,8 @@ class TestWebhookManagerIntegration: manager_class = managers.get("integrated_webhooks") yield "status", "configured" - yield ( - "manager_type", - (manager_class.__name__ if manager_class else "none"), + yield "manager_type", ( + manager_class.__name__ if manager_class else "none" ) # Test the block diff --git a/autogpt_platform/backend/test_requeue_integration.py b/autogpt_platform/backend/test_requeue_integration.py index 3a350413f3..da1e00e357 100644 --- a/autogpt_platform/backend/test_requeue_integration.py +++ b/autogpt_platform/backend/test_requeue_integration.py @@ -173,15 +173,15 @@ def test_queue_ordering_behavior(): messages = tester.consume_messages(max_messages=3) assert len(messages) == 3, f"Expected 3 messages, got {len(messages)}" - assert messages[0]["graph_exec_id"] == "exec-A", ( - f"First message should be A, got {messages[0]['graph_exec_id']}" - ) - assert messages[1]["graph_exec_id"] == "exec-B", ( - f"Second message should be B, got {messages[1]['graph_exec_id']}" - ) - assert messages[2]["graph_exec_id"] == "exec-C", ( - f"Third message should be C, got {messages[2]['graph_exec_id']}" - ) + assert ( + messages[0]["graph_exec_id"] == "exec-A" + ), f"First message should be A, got {messages[0]['graph_exec_id']}" + assert ( + messages[1]["graph_exec_id"] == "exec-B" + ), f"Second message should be B, got {messages[1]['graph_exec_id']}" + assert ( + messages[2]["graph_exec_id"] == "exec-C" + ), f"Third message should be C, got {messages[2]['graph_exec_id']}" print("✅ FIFO order confirmed: A -> B -> C") @@ -250,9 +250,9 @@ def test_queue_ordering_behavior(): if msg["graph_exec_id"] == "exec-X" ) - assert y_index < republished_x_index, ( - f"Y should come before republished X, but got order: {[m['graph_exec_id'] for m in messages]}" - ) + assert ( + y_index < republished_x_index + ), f"Y should come before republished X, but got order: {[m['graph_exec_id'] for m in messages]}" print("✅ Republishing confirmed: messages go to back of queue") @@ -291,9 +291,9 @@ def test_traditional_requeue_behavior(): assert method_frame is not None, "Should have received message A" consumed_msg = json.loads(body.decode()) - assert consumed_msg["graph_exec_id"] == "exec-A", ( - f"Should have consumed message A, got {consumed_msg['graph_exec_id']}" - ) + assert ( + consumed_msg["graph_exec_id"] == "exec-A" + ), f"Should have consumed message A, got {consumed_msg['graph_exec_id']}" # Traditional requeue: basic_nack with requeue=True (sends to FRONT) channel.basic_nack(delivery_tag=method_frame.delivery_tag, requeue=True) @@ -320,20 +320,20 @@ def test_traditional_requeue_behavior(): # CRITICAL ASSERTION: Traditional requeue should put A at FRONT # Expected order: A (requeued to front), B - assert len(received_messages) == 2, ( - f"Expected 2 messages, got {len(received_messages)}" - ) + assert ( + len(received_messages) == 2 + ), f"Expected 2 messages, got {len(received_messages)}" first_msg = received_messages[0]["graph_exec_id"] second_msg = received_messages[1]["graph_exec_id"] # This is the critical test: requeued message A should come BEFORE B - assert first_msg == "exec-A", ( - f"Traditional requeue should put A at FRONT, but first message was: {first_msg}" - ) - assert second_msg == "exec-B", ( - f"B should come after requeued A, but second message was: {second_msg}" - ) + assert ( + first_msg == "exec-A" + ), f"Traditional requeue should put A at FRONT, but first message was: {first_msg}" + assert ( + second_msg == "exec-B" + ), f"B should come after requeued A, but second message was: {second_msg}" print( "✅ HYPOTHESIS CONFIRMED: Traditional requeue sends messages to FRONT of queue"