diff --git a/autogpt_platform/backend/backend/blocks/claude_code.py b/autogpt_platform/backend/backend/blocks/claude_code.py index d8d7b536dd..2474d25f06 100644 --- a/autogpt_platform/backend/backend/blocks/claude_code.py +++ b/autogpt_platform/backend/backend/blocks/claude_code.py @@ -20,7 +20,10 @@ from backend.data.model import ( SchemaField, ) from backend.integrations.providers import ProviderName -from backend.util.sandbox_files import SandboxFileOutput, extract_and_store_sandbox_files +from backend.util.sandbox_files import ( + SandboxFileOutput, + extract_and_store_sandbox_files, +) if TYPE_CHECKING: from backend.executor.utils import ExecutionContext diff --git a/autogpt_platform/backend/backend/util/sandbox_files.py b/autogpt_platform/backend/backend/util/sandbox_files.py index 2c45805519..cf6ffe744a 100644 --- a/autogpt_platform/backend/backend/util/sandbox_files.py +++ b/autogpt_platform/backend/backend/util/sandbox_files.py @@ -13,6 +13,7 @@ from typing import TYPE_CHECKING from pydantic import BaseModel from backend.util.file import store_media_file +from backend.util.type import MediaFileType if TYPE_CHECKING: from e2b import AsyncSandbox as BaseAsyncSandbox @@ -160,6 +161,8 @@ async def extract_sandbox_files( content = await sandbox.files.read(file_path, format="bytes") if isinstance(content, str): content = content.encode("utf-8") + elif isinstance(content, bytearray): + content = bytes(content) # Extract filename from path file_name = file_path.split("/")[-1] @@ -229,7 +232,7 @@ async def store_sandbox_files( ) result = await store_media_file( - file=data_uri, + file=MediaFileType(data_uri), execution_context=execution_context, return_format="for_block_output", )