mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-09 06:15:41 -05:00
style: fix prettier formatting
This commit is contained in:
@@ -57,7 +57,7 @@ async def postmark_webhook_handler(
|
||||
webhook: Annotated[
|
||||
PostmarkWebhook,
|
||||
Body(discriminator="RecordType"),
|
||||
]
|
||||
],
|
||||
):
|
||||
logger.info(f"Received webhook from Postmark: {webhook}")
|
||||
match webhook:
|
||||
|
||||
@@ -114,26 +114,22 @@ class StoreAgentHandler(ContentHandler):
|
||||
async def get_stats(self) -> dict[str, int]:
|
||||
"""Get statistics about store agent embedding coverage."""
|
||||
# Count approved versions
|
||||
approved_result = await query_raw_with_schema(
|
||||
"""
|
||||
approved_result = await query_raw_with_schema("""
|
||||
SELECT COUNT(*) as count
|
||||
FROM {schema_prefix}"StoreListingVersion"
|
||||
WHERE "submissionStatus" = 'APPROVED'
|
||||
AND "isDeleted" = false
|
||||
"""
|
||||
)
|
||||
""")
|
||||
total_approved = approved_result[0]["count"] if approved_result else 0
|
||||
|
||||
# Count versions with embeddings
|
||||
embedded_result = await query_raw_with_schema(
|
||||
"""
|
||||
embedded_result = await query_raw_with_schema("""
|
||||
SELECT COUNT(*) as count
|
||||
FROM {schema_prefix}"StoreListingVersion" slv
|
||||
JOIN {schema_prefix}"UnifiedContentEmbedding" uce ON slv.id = uce."contentId" AND uce."contentType" = 'STORE_AGENT'::{schema_prefix}"ContentType"
|
||||
WHERE slv."submissionStatus" = 'APPROVED'
|
||||
AND slv."isDeleted" = false
|
||||
"""
|
||||
)
|
||||
""")
|
||||
with_embeddings = embedded_result[0]["count"] if embedded_result else 0
|
||||
|
||||
return {
|
||||
@@ -596,13 +592,11 @@ class DocumentationHandler(ContentHandler):
|
||||
return {"total": 0, "with_embeddings": 0, "without_embeddings": 0}
|
||||
|
||||
# Count embeddings in database for DOCUMENTATION type
|
||||
embedded_result = await query_raw_with_schema(
|
||||
"""
|
||||
embedded_result = await query_raw_with_schema("""
|
||||
SELECT COUNT(*) as count
|
||||
FROM {schema_prefix}"UnifiedContentEmbedding"
|
||||
WHERE "contentType" = 'DOCUMENTATION'::{schema_prefix}"ContentType"
|
||||
"""
|
||||
)
|
||||
""")
|
||||
|
||||
with_embeddings = embedded_result[0]["count"] if embedded_result else 0
|
||||
|
||||
|
||||
@@ -47,7 +47,7 @@ def mock_storage_client(mocker):
|
||||
|
||||
async def test_upload_media_success(mock_settings, mock_storage_client):
|
||||
# Create test JPEG data with valid signature
|
||||
test_data = b"\xFF\xD8\xFF" + b"test data"
|
||||
test_data = b"\xff\xd8\xff" + b"test data"
|
||||
|
||||
test_file = fastapi.UploadFile(
|
||||
filename="laptop.jpeg",
|
||||
@@ -85,7 +85,7 @@ async def test_upload_media_missing_credentials(monkeypatch):
|
||||
|
||||
test_file = fastapi.UploadFile(
|
||||
filename="laptop.jpeg",
|
||||
file=io.BytesIO(b"\xFF\xD8\xFF" + b"test data"), # Valid JPEG signature
|
||||
file=io.BytesIO(b"\xff\xd8\xff" + b"test data"), # Valid JPEG signature
|
||||
headers=starlette.datastructures.Headers({"content-type": "image/jpeg"}),
|
||||
)
|
||||
|
||||
@@ -110,7 +110,7 @@ async def test_upload_media_video_type(mock_settings, mock_storage_client):
|
||||
|
||||
|
||||
async def test_upload_media_file_too_large(mock_settings, mock_storage_client):
|
||||
large_data = b"\xFF\xD8\xFF" + b"x" * (
|
||||
large_data = b"\xff\xd8\xff" + b"x" * (
|
||||
50 * 1024 * 1024 + 1
|
||||
) # 50MB + 1 byte with valid JPEG signature
|
||||
test_file = fastapi.UploadFile(
|
||||
|
||||
@@ -522,8 +522,8 @@ async def test_api_keys_with_newline_variations(mock_request):
|
||||
"valid\r\ntoken", # Windows newline
|
||||
"valid\rtoken", # Mac newline
|
||||
"valid\x85token", # NEL (Next Line)
|
||||
"valid\x0Btoken", # Vertical Tab
|
||||
"valid\x0Ctoken", # Form Feed
|
||||
"valid\x0btoken", # Vertical Tab
|
||||
"valid\x0ctoken", # Form Feed
|
||||
]
|
||||
|
||||
for api_key in newline_variations:
|
||||
|
||||
@@ -8,7 +8,12 @@ from pytest_snapshot.plugin import Snapshot
|
||||
|
||||
from backend.api.conn_manager import ConnectionManager
|
||||
from backend.api.test_helpers import override_config
|
||||
from backend.api.ws_api import AppEnvironment, WebsocketServer, WSMessage, WSMethod
|
||||
from backend.api.ws_api import (
|
||||
AppEnvironment,
|
||||
WebsocketServer,
|
||||
WSMessage,
|
||||
WSMethod,
|
||||
)
|
||||
from backend.api.ws_api import app as websocket_app
|
||||
from backend.api.ws_api import (
|
||||
handle_subscribe,
|
||||
|
||||
@@ -44,11 +44,9 @@ class TextEncoderBlock(Block):
|
||||
categories={BlockCategory.TEXT},
|
||||
input_schema=TextEncoderBlock.Input,
|
||||
output_schema=TextEncoderBlock.Output,
|
||||
test_input={
|
||||
"text": """Hello
|
||||
test_input={"text": """Hello
|
||||
World!
|
||||
This is a "quoted" string."""
|
||||
},
|
||||
This is a "quoted" string."""},
|
||||
test_output=[
|
||||
(
|
||||
"encoded_text",
|
||||
|
||||
@@ -319,7 +319,7 @@ class CostDollars(BaseModel):
|
||||
|
||||
# Helper functions for payload processing
|
||||
def process_text_field(
|
||||
text: Union[bool, TextEnabled, TextDisabled, TextAdvanced, None]
|
||||
text: Union[bool, TextEnabled, TextDisabled, TextAdvanced, None],
|
||||
) -> Optional[Union[bool, Dict[str, Any]]]:
|
||||
"""Process text field for API payload."""
|
||||
if text is None:
|
||||
@@ -400,7 +400,7 @@ def process_contents_settings(contents: Optional[ContentSettings]) -> Dict[str,
|
||||
|
||||
|
||||
def process_context_field(
|
||||
context: Union[bool, dict, ContextEnabled, ContextDisabled, ContextAdvanced, None]
|
||||
context: Union[bool, dict, ContextEnabled, ContextDisabled, ContextAdvanced, None],
|
||||
) -> Optional[Union[bool, Dict[str, int]]]:
|
||||
"""Process context field for API payload."""
|
||||
if context is None:
|
||||
|
||||
@@ -1371,15 +1371,13 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
else "Please provide a"
|
||||
) + f" valid JSON {outer_output_type} that matches the expected format."
|
||||
|
||||
return trim_prompt(
|
||||
f"""
|
||||
return trim_prompt(f"""
|
||||
|{complaint}
|
||||
|
|
||||
|{indented_parse_error}
|
||||
|
|
||||
|{instruction}
|
||||
"""
|
||||
)
|
||||
""")
|
||||
|
||||
def get_json_from_response(
|
||||
self, response_text: str, *, pure_json_mode: bool, output_tag_start: str
|
||||
@@ -2016,8 +2014,7 @@ class AIListGeneratorBlock(AIBlockBase):
|
||||
for item in parsed_list:
|
||||
yield "list_item", item
|
||||
|
||||
SYSTEM_PROMPT = trim_prompt(
|
||||
"""
|
||||
SYSTEM_PROMPT = trim_prompt("""
|
||||
|You are a JSON array generator. Your task is to generate a JSON array of string values based on the user's prompt.
|
||||
|
|
||||
|The 'list' field should contain a JSON array with the generated string values.
|
||||
@@ -2027,5 +2024,4 @@ class AIListGeneratorBlock(AIBlockBase):
|
||||
|• ["string1", "string2", "string3"]
|
||||
|
|
||||
|Ensure you provide a proper JSON array with only string values in the 'list' field.
|
||||
"""
|
||||
)
|
||||
""")
|
||||
|
||||
@@ -19,7 +19,7 @@ def extract_result(output: ReplicateOutputs) -> str:
|
||||
elif isinstance(output[0], str):
|
||||
result = "".join(
|
||||
output # type: ignore we're already not a file output here
|
||||
) # type:ignore If output is a list and a str, join the elements the first element. Happens if its text
|
||||
) # type: ignore If output is a list and a str, join the elements the first element. Happens if its text
|
||||
elif isinstance(output[0], dict):
|
||||
result = str(output[0])
|
||||
else:
|
||||
|
||||
@@ -45,12 +45,10 @@ class Slant3DBlockBase(Block):
|
||||
valid_tags = [filament["colorTag"] for filament in response["filaments"]]
|
||||
|
||||
if color_tag not in valid_tags:
|
||||
raise ValueError(
|
||||
f"""Invalid color profile combination {color_tag}.
|
||||
raise ValueError(f"""Invalid color profile combination {color_tag}.
|
||||
Valid colors for {profile.value} are:
|
||||
{','.join([filament['colorTag'].replace(profile.value.lower(), '') for filament in response['filaments'] if filament['profile'] == profile.value])}
|
||||
"""
|
||||
)
|
||||
""")
|
||||
return color_tag
|
||||
|
||||
async def _convert_to_color(
|
||||
|
||||
@@ -27,13 +27,11 @@ async def check_cron_job(db):
|
||||
return False
|
||||
|
||||
# Check if the refresh job exists
|
||||
job_check = await query_raw_with_schema(
|
||||
"""
|
||||
job_check = await query_raw_with_schema("""
|
||||
SELECT jobname, schedule, command
|
||||
FROM cron.job
|
||||
WHERE jobname = 'refresh-store-views'
|
||||
"""
|
||||
)
|
||||
""")
|
||||
|
||||
if job_check:
|
||||
job = job_check[0]
|
||||
@@ -57,35 +55,29 @@ async def get_materialized_view_counts(db):
|
||||
print("-" * 40)
|
||||
|
||||
# Get counts from mv_agent_run_counts
|
||||
agent_runs = await query_raw_with_schema(
|
||||
"""
|
||||
agent_runs = await query_raw_with_schema("""
|
||||
SELECT COUNT(*) as total_agents,
|
||||
SUM(run_count) as total_runs,
|
||||
MAX(run_count) as max_runs,
|
||||
MIN(run_count) as min_runs
|
||||
FROM {schema_prefix}mv_agent_run_counts
|
||||
"""
|
||||
)
|
||||
""")
|
||||
|
||||
# Get counts from mv_review_stats
|
||||
review_stats = await query_raw_with_schema(
|
||||
"""
|
||||
review_stats = await query_raw_with_schema("""
|
||||
SELECT COUNT(*) as total_listings,
|
||||
SUM(review_count) as total_reviews,
|
||||
AVG(avg_rating) as overall_avg_rating
|
||||
FROM {schema_prefix}mv_review_stats
|
||||
"""
|
||||
)
|
||||
""")
|
||||
|
||||
# Get sample data from StoreAgent view
|
||||
store_agents = await query_raw_with_schema(
|
||||
"""
|
||||
store_agents = await query_raw_with_schema("""
|
||||
SELECT COUNT(*) as total_store_agents,
|
||||
AVG(runs) as avg_runs,
|
||||
AVG(rating) as avg_rating
|
||||
FROM {schema_prefix}"StoreAgent"
|
||||
"""
|
||||
)
|
||||
""")
|
||||
|
||||
agent_run_data = agent_runs[0] if agent_runs else {}
|
||||
review_data = review_stats[0] if review_stats else {}
|
||||
|
||||
@@ -1456,8 +1456,7 @@ async def fix_llm_provider_credentials():
|
||||
|
||||
broken_nodes = []
|
||||
try:
|
||||
broken_nodes = await query_raw_with_schema(
|
||||
"""
|
||||
broken_nodes = await query_raw_with_schema("""
|
||||
SELECT graph."userId" user_id,
|
||||
node.id node_id,
|
||||
node."constantInput" node_preset_input
|
||||
@@ -1466,8 +1465,7 @@ async def fix_llm_provider_credentials():
|
||||
ON node."agentGraphId" = graph.id
|
||||
WHERE node."constantInput"::jsonb->'credentials'->>'provider' = 'llm'
|
||||
ORDER BY graph."userId";
|
||||
"""
|
||||
)
|
||||
""")
|
||||
logger.info(f"Fixing LLM credential inputs on {len(broken_nodes)} nodes")
|
||||
except Exception as e:
|
||||
logger.error(f"Error fixing LLM credential inputs: {e}")
|
||||
|
||||
@@ -740,7 +740,9 @@ class ExecutionProcessor:
|
||||
return
|
||||
|
||||
if exec_meta.status in [ExecutionStatus.QUEUED, ExecutionStatus.INCOMPLETE]:
|
||||
log_metadata.info(f"⚙️ Starting graph execution #{graph_exec.graph_exec_id}")
|
||||
log_metadata.info(
|
||||
f"⚙️ Starting graph execution #{graph_exec.graph_exec_id}"
|
||||
)
|
||||
exec_meta.status = ExecutionStatus.RUNNING
|
||||
send_execution_update(
|
||||
db_client.update_graph_execution_start_time(graph_exec.graph_exec_id)
|
||||
|
||||
@@ -468,7 +468,7 @@ class IntegrationCredentialsStore:
|
||||
token = secrets.token_urlsafe(32)
|
||||
expires_at = datetime.now(timezone.utc) + timedelta(minutes=10)
|
||||
|
||||
(code_challenge, code_verifier) = self._generate_code_challenge()
|
||||
code_challenge, code_verifier = self._generate_code_challenge()
|
||||
|
||||
state = OAuthState(
|
||||
token=token,
|
||||
|
||||
@@ -6,7 +6,7 @@ Usage: from backend.sdk import *
|
||||
|
||||
This module provides:
|
||||
- All block base classes and types
|
||||
- All credential and authentication components
|
||||
- All credential and authentication components
|
||||
- All cost tracking components
|
||||
- All webhook components
|
||||
- All utility functions
|
||||
@@ -29,7 +29,11 @@ from backend.data.block import (
|
||||
BlockWebhookConfig,
|
||||
)
|
||||
from backend.data.integrations import Webhook, update_webhook
|
||||
from backend.data.model import APIKeyCredentials, Credentials, CredentialsField
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
Credentials,
|
||||
CredentialsField,
|
||||
)
|
||||
from backend.data.model import CredentialsMetaInput as _CredentialsMetaInput
|
||||
from backend.data.model import (
|
||||
NodeExecutionStats,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""
|
||||
Integration between SDK provider costs and the execution cost system.
|
||||
|
||||
This module provides the glue between provider-defined base costs and the
|
||||
This module provides the glue between provider-defined base costs and the
|
||||
BLOCK_COSTS configuration used by the execution system.
|
||||
"""
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ Utilities for handling dynamic field names and delimiters in the AutoGPT Platfor
|
||||
|
||||
Dynamic fields allow graphs to connect complex data structures using special delimiters:
|
||||
- _#_ for dictionary keys (e.g., "values_#_name" → values["name"])
|
||||
- _$_ for list indices (e.g., "items_$_0" → items[0])
|
||||
- _$_ for list indices (e.g., "items_$_0" → items[0])
|
||||
- _@_ for object attributes (e.g., "obj_@_attr" → obj.attr)
|
||||
|
||||
This module provides utilities for:
|
||||
|
||||
@@ -222,9 +222,9 @@ class TestSafeJson:
|
||||
problematic_data = {
|
||||
"null_byte": "data with \x00 null",
|
||||
"bell_char": "data with \x07 bell",
|
||||
"form_feed": "data with \x0C feed",
|
||||
"escape_char": "data with \x1B escape",
|
||||
"delete_char": "data with \x7F delete",
|
||||
"form_feed": "data with \x0c feed",
|
||||
"escape_char": "data with \x1b escape",
|
||||
"delete_char": "data with \x7f delete",
|
||||
}
|
||||
|
||||
# SafeJson should successfully process data with control characters
|
||||
@@ -235,9 +235,9 @@ class TestSafeJson:
|
||||
result_data = result.data
|
||||
assert "\x00" not in str(result_data) # null byte removed
|
||||
assert "\x07" not in str(result_data) # bell removed
|
||||
assert "\x0C" not in str(result_data) # form feed removed
|
||||
assert "\x1B" not in str(result_data) # escape removed
|
||||
assert "\x7F" not in str(result_data) # delete removed
|
||||
assert "\x0c" not in str(result_data) # form feed removed
|
||||
assert "\x1b" not in str(result_data) # escape removed
|
||||
assert "\x7f" not in str(result_data) # delete removed
|
||||
|
||||
# Test that safe whitespace characters are preserved
|
||||
safe_data = {
|
||||
@@ -263,7 +263,7 @@ class TestSafeJson:
|
||||
def test_web_scraping_content_sanitization(self):
|
||||
"""Test sanitization of typical web scraping content with null characters."""
|
||||
# Simulate web content that might contain null bytes from SearchTheWebBlock
|
||||
web_content = "Article title\x00Hidden null\x01Start of heading\x08Backspace\x0CForm feed content\x1FUnit separator\x7FDelete char"
|
||||
web_content = "Article title\x00Hidden null\x01Start of heading\x08Backspace\x0cForm feed content\x1fUnit separator\x7fDelete char"
|
||||
|
||||
result = SafeJson(web_content)
|
||||
assert isinstance(result, Json)
|
||||
@@ -273,9 +273,9 @@ class TestSafeJson:
|
||||
assert "\x00" not in sanitized_content
|
||||
assert "\x01" not in sanitized_content
|
||||
assert "\x08" not in sanitized_content
|
||||
assert "\x0C" not in sanitized_content
|
||||
assert "\x1F" not in sanitized_content
|
||||
assert "\x7F" not in sanitized_content
|
||||
assert "\x0c" not in sanitized_content
|
||||
assert "\x1f" not in sanitized_content
|
||||
assert "\x7f" not in sanitized_content
|
||||
|
||||
# Verify the content is still readable
|
||||
assert "Article title" in sanitized_content
|
||||
@@ -391,7 +391,7 @@ class TestSafeJson:
|
||||
mixed_content = {
|
||||
"safe_and_unsafe": "Good text\twith tab\x00NULL BYTE\nand newline\x08BACKSPACE",
|
||||
"file_path_with_null": "C:\\temp\\file\x00.txt",
|
||||
"json_with_controls": '{"text": "data\x01\x0C\x1F"}',
|
||||
"json_with_controls": '{"text": "data\x01\x0c\x1f"}',
|
||||
}
|
||||
|
||||
result = SafeJson(mixed_content)
|
||||
@@ -419,13 +419,13 @@ class TestSafeJson:
|
||||
|
||||
# Create data with various problematic escape sequences that could cause JSON parsing errors
|
||||
problematic_output_data = {
|
||||
"web_content": "Article text\x00with null\x01and control\x08chars\x0C\x1F\x7F",
|
||||
"web_content": "Article text\x00with null\x01and control\x08chars\x0c\x1f\x7f",
|
||||
"file_path": "C:\\Users\\test\\file\x00.txt",
|
||||
"json_like_string": '{"text": "data\x00\x08\x1F"}',
|
||||
"json_like_string": '{"text": "data\x00\x08\x1f"}',
|
||||
"escaped_sequences": "Text with \\u0000 and \\u0008 sequences",
|
||||
"mixed_content": "Normal text\tproperly\nformatted\rwith\x00invalid\x08chars\x1Fmixed",
|
||||
"mixed_content": "Normal text\tproperly\nformatted\rwith\x00invalid\x08chars\x1fmixed",
|
||||
"large_text": "A" * 35000
|
||||
+ "\x00\x08\x1F"
|
||||
+ "\x00\x08\x1f"
|
||||
+ "B" * 5000, # Large text like in the error
|
||||
}
|
||||
|
||||
@@ -446,9 +446,9 @@ class TestSafeJson:
|
||||
assert "\x00" not in str(web_content)
|
||||
assert "\x01" not in str(web_content)
|
||||
assert "\x08" not in str(web_content)
|
||||
assert "\x0C" not in str(web_content)
|
||||
assert "\x1F" not in str(web_content)
|
||||
assert "\x7F" not in str(web_content)
|
||||
assert "\x0c" not in str(web_content)
|
||||
assert "\x1f" not in str(web_content)
|
||||
assert "\x7f" not in str(web_content)
|
||||
|
||||
# Check that legitimate content is preserved
|
||||
assert "Article text" in str(web_content)
|
||||
@@ -467,7 +467,7 @@ class TestSafeJson:
|
||||
assert "B" * 1000 in str(large_text) # B's preserved
|
||||
assert "\x00" not in str(large_text) # Control chars removed
|
||||
assert "\x08" not in str(large_text)
|
||||
assert "\x1F" not in str(large_text)
|
||||
assert "\x1f" not in str(large_text)
|
||||
|
||||
# Most importantly: ensure the result can be JSON-serialized without errors
|
||||
# This would have failed with the old approach
|
||||
@@ -602,7 +602,7 @@ class TestSafeJson:
|
||||
model = SamplePydanticModel(
|
||||
name="Test\x00User", # Has null byte
|
||||
age=30,
|
||||
metadata={"info": "data\x08with\x0Ccontrols"},
|
||||
metadata={"info": "data\x08with\x0ccontrols"},
|
||||
)
|
||||
|
||||
data = {"credential": model}
|
||||
@@ -616,7 +616,7 @@ class TestSafeJson:
|
||||
json_string = json.dumps(result.data)
|
||||
assert "\x00" not in json_string
|
||||
assert "\x08" not in json_string
|
||||
assert "\x0C" not in json_string
|
||||
assert "\x0c" not in json_string
|
||||
assert "TestUser" in json_string # Name preserved minus null byte
|
||||
|
||||
def test_deeply_nested_pydantic_models_control_char_sanitization(self):
|
||||
@@ -639,16 +639,16 @@ class TestSafeJson:
|
||||
|
||||
# Create test data with control characters at every nesting level
|
||||
inner = InnerModel(
|
||||
deep_string="Deepest\x00Level\x08Control\x0CChars", # Multiple control chars at deepest level
|
||||
deep_string="Deepest\x00Level\x08Control\x0cChars", # Multiple control chars at deepest level
|
||||
metadata={
|
||||
"nested_key": "Nested\x1FValue\x7FDelete"
|
||||
"nested_key": "Nested\x1fValue\x7fDelete"
|
||||
}, # Control chars in nested dict
|
||||
)
|
||||
|
||||
middle = MiddleModel(
|
||||
middle_string="Middle\x01StartOfHeading\x1FUnitSeparator",
|
||||
middle_string="Middle\x01StartOfHeading\x1fUnitSeparator",
|
||||
inner=inner,
|
||||
data="Some\x0BVerticalTab\x0EShiftOut",
|
||||
data="Some\x0bVerticalTab\x0eShiftOut",
|
||||
)
|
||||
|
||||
outer = OuterModel(outer_string="Outer\x00Null\x07Bell", middle=middle)
|
||||
@@ -659,7 +659,7 @@ class TestSafeJson:
|
||||
"nested_model": outer,
|
||||
"list_with_strings": [
|
||||
"List\x00Item1",
|
||||
"List\x0CItem2\x1F",
|
||||
"List\x0cItem2\x1f",
|
||||
{"dict_in_list": "Dict\x08Value"},
|
||||
],
|
||||
}
|
||||
@@ -684,10 +684,10 @@ class TestSafeJson:
|
||||
"\x06",
|
||||
"\x07",
|
||||
"\x08",
|
||||
"\x0B",
|
||||
"\x0C",
|
||||
"\x0E",
|
||||
"\x0F",
|
||||
"\x0b",
|
||||
"\x0c",
|
||||
"\x0e",
|
||||
"\x0f",
|
||||
"\x10",
|
||||
"\x11",
|
||||
"\x12",
|
||||
@@ -698,13 +698,13 @@ class TestSafeJson:
|
||||
"\x17",
|
||||
"\x18",
|
||||
"\x19",
|
||||
"\x1A",
|
||||
"\x1B",
|
||||
"\x1C",
|
||||
"\x1D",
|
||||
"\x1E",
|
||||
"\x1F",
|
||||
"\x7F",
|
||||
"\x1a",
|
||||
"\x1b",
|
||||
"\x1c",
|
||||
"\x1d",
|
||||
"\x1e",
|
||||
"\x1f",
|
||||
"\x7f",
|
||||
]
|
||||
|
||||
for char in control_chars:
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for the block documentation generator."""
|
||||
|
||||
import pytest
|
||||
|
||||
from scripts.generate_block_docs import (
|
||||
|
||||
@@ -41,7 +41,11 @@ export function ToolCallMessage({
|
||||
}, [blocksResponse?.data]);
|
||||
|
||||
const actionPhrase = getToolActionPhrase(toolName);
|
||||
const argumentsText = formatToolArguments(toolName, toolArguments, blocksById);
|
||||
const argumentsText = formatToolArguments(
|
||||
toolName,
|
||||
toolArguments,
|
||||
blocksById,
|
||||
);
|
||||
const displayText = `${actionPhrase}${argumentsText}`;
|
||||
const IconComponent = getToolIcon(toolName);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user