From 4b58eac8771c0c8199a64dcc508b76a89b267d01 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Wed, 28 Jan 2026 00:01:05 -0600 Subject: [PATCH] fix(backend): prevent race condition in concurrent node execution context Use model_copy() instead of mutating shared ExecutionContext to prevent race conditions when multiple nodes execute concurrently. Each node now gets its own isolated copy with correct node_id and node_exec_id values. Co-Authored-By: Claude Opus 4.5 --- autogpt_platform/backend/backend/executor/manager.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py index d4261b4ad8..8362dae828 100644 --- a/autogpt_platform/backend/backend/executor/manager.py +++ b/autogpt_platform/backend/backend/executor/manager.py @@ -236,9 +236,11 @@ async def execute_node( input_size = len(input_data_str) log_metadata.debug("Executed node with input", input=input_data_str) - # Update execution_context with node-level info - execution_context.node_id = node_id - execution_context.node_exec_id = node_exec_id + # Create node-specific execution context to avoid race conditions + # (multiple nodes can execute concurrently and would otherwise mutate shared state) + execution_context = execution_context.model_copy( + update={"node_id": node_id, "node_exec_id": node_exec_id} + ) # Inject extra execution arguments for the blocks via kwargs # Keep individual kwargs for backwards compatibility with existing blocks