feat(backend/copilot): parse @@agptfile bare refs by file extension

When the entire tool argument is a bare @@agptfile: reference, the
resolved content is now parsed based on the file's extension or MIME
type instead of content-sniffing.

Supported formats: JSON, JSONL, CSV, TSV, YAML, TOML, Parquet, Excel.
Unrecognised formats or parse failures fall back to plain string.
This commit is contained in:
Zamil Majdy
2026-03-13 02:58:00 +07:00
parent a083493aa2
commit 35f54549b5
6 changed files with 682 additions and 12 deletions

View File

@@ -52,10 +52,17 @@ Examples:
You can embed a reference inside any string argument, or use it as the entire
value. Multiple references in one argument are all expanded.
**Type coercion**: The platform automatically coerces expanded string values
to match the block's expected input types. For example, if a block expects
`list[list[str]]` and you pass a string containing a JSON array (e.g. from
an @@agptfile: expansion), the string will be parsed into the correct type.
**Structured data**: When the **entire** argument value is a single file
reference (no surrounding text), the platform automatically parses the file
content based on its extension or MIME type. Supported formats: JSON, JSONL,
CSV, TSV, YAML, TOML, Parquet, and Excel (.xlsx). For example, pass
`@@agptfile:workspace://<id>` where the file is a `.csv` and the rows will be
parsed into `list[list[str]]` automatically. If the format is unrecognised or
parsing fails, the content is returned as a plain string.
**Type coercion**: The platform also coerces expanded values to match the
block's expected input types. For example, if a block expects `list[list[str]]`
and the expanded value is a JSON string, it will be parsed into the correct type.
### Sub-agent tasks

View File

@@ -259,16 +259,64 @@ async def expand_file_refs_in_args(
String values are expanded in-place. Nested dicts and lists are
traversed. Non-string scalars are returned unchanged.
**Bare references** (the entire argument value is a single
``@@agptfile:...`` token with no surrounding text) are resolved and then
parsed according to the file's extension or MIME type. See
:mod:`backend.util.file_content_parser` for the full list of supported
formats (JSON, JSONL, CSV, TSV, YAML, TOML, Parquet, Excel).
If the format is unrecognised or parsing fails, the content is returned as
a plain string (the fallback).
**Embedded references** (``@@agptfile:`` mixed with other text) always
produce a plain string — structured parsing only applies to bare refs.
Raises :class:`FileRefExpansionError` if any reference fails to resolve,
so the tool is *not* executed with an error string as its input. The
caller (the MCP tool wrapper) should convert this into an MCP error
response that lets the model correct the reference before retrying.
"""
from backend.util.file_content_parser import (
BINARY_FORMATS,
infer_format,
parse_file_content,
)
if not args:
return args
async def _expand(value: Any) -> Any:
if isinstance(value, str):
# Check for a bare file reference first — enables structured parsing.
ref = parse_file_ref(value)
if ref is not None:
fmt = infer_format(ref.uri)
try:
if fmt is not None and fmt in BINARY_FORMATS:
# Binary formats need raw bytes, not UTF-8 text.
raw = await read_file_bytes(ref.uri, user_id, session)
content: str | bytes = (
_apply_line_range(
raw.decode("utf-8", errors="replace"),
ref.start_line,
ref.end_line,
)
if ref.start_line or ref.end_line
else raw
)
else:
content = await resolve_file_ref(ref, user_id, session)
except ValueError as exc:
raise FileRefExpansionError(str(exc)) from exc
if fmt is not None:
return parse_file_content(content, fmt)
return (
content
if isinstance(content, str)
else content.decode("utf-8", errors="replace")
)
# Not a bare ref — do normal inline expansion.
return await expand_file_refs_in_string(
value, user_id, session, raise_on_error=True
)

View File

@@ -0,0 +1,215 @@
"""Parse file content into structured Python objects based on file format.
Used by the ``@@agptfile:`` expansion system to eagerly parse well-known file
formats into native Python types *before* schema-driven coercion runs. This
lets blocks with ``Any``-typed inputs receive structured data rather than raw
strings, while blocks expecting strings get the value coerced back via
``convert()``.
Supported formats:
- **JSON** (``.json``) — arrays and objects are promoted; scalars stay as strings
- **JSON Lines** (``.jsonl``, ``.ndjson``) — each non-empty line parsed as JSON → list
- **CSV** (``.csv``) — ``csv.reader`` → ``list[list[str]]``
- **TSV** (``.tsv``) — tab-delimited → ``list[list[str]]``
- **YAML** (``.yaml``, ``.yml``) — parsed via PyYAML; containers only
- **TOML** (``.toml``) — parsed via stdlib ``tomllib``
- **Parquet** (``.parquet``) — via pandas/pyarrow → ``list[list[Any]]`` with header row
- **Excel** (``.xlsx``, ``.xls``) — via pandas → ``list[list[Any]]`` with header row
All parsers follow the **fallback contract**: if parsing fails for *any* reason,
the original content is returned unchanged (string for text formats, bytes for
binary formats). Callers should never see an exception from this module.
"""
import csv
import io
import json
import logging
import tomllib
from posixpath import splitext
from typing import Any
logger = logging.getLogger(__name__)
# ---------------------------------------------------------------------------
# Extension / MIME → format label mapping
# ---------------------------------------------------------------------------
_EXT_TO_FORMAT: dict[str, str] = {
".json": "json",
".jsonl": "jsonl",
".ndjson": "jsonl",
".csv": "csv",
".tsv": "tsv",
".yaml": "yaml",
".yml": "yaml",
".toml": "toml",
".parquet": "parquet",
".xlsx": "xlsx",
".xls": "xlsx",
}
_MIME_TO_FORMAT: dict[str, str] = {
"application/json": "json",
"application/x-ndjson": "jsonl",
"application/jsonl": "jsonl",
"text/csv": "csv",
"text/tab-separated-values": "tsv",
"application/x-yaml": "yaml",
"text/yaml": "yaml",
"application/toml": "toml",
"application/vnd.apache.parquet": "parquet",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": "xlsx",
"application/vnd.ms-excel": "xlsx",
}
# Formats that require raw bytes rather than decoded text.
BINARY_FORMATS: frozenset[str] = frozenset({"parquet", "xlsx"})
def infer_format(uri: str) -> str | None:
"""Return a format label based on URI extension or MIME fragment.
Returns ``None`` when the format cannot be determined — the caller should
fall back to returning the content as a plain string.
"""
# 1. Check MIME fragment (workspace://abc123#application/json)
if "#" in uri:
_, fragment = uri.rsplit("#", 1)
fmt = _MIME_TO_FORMAT.get(fragment)
if fmt:
return fmt
# 2. Check file extension from the path portion.
# Strip the fragment first so ".json#mime" doesn't confuse splitext.
path = uri.split("#")[0]
_, ext = splitext(path)
return _EXT_TO_FORMAT.get(ext.lower())
# ---------------------------------------------------------------------------
# Text-based parsers (content: str → Any)
# ---------------------------------------------------------------------------
def _parse_json(content: str) -> Any:
parsed = json.loads(content)
# Only promote containers. Scalar JSON values (strings, numbers,
# booleans, null) stay as the raw string so that e.g. a file containing
# just ``"42"`` doesn't silently become an int.
if isinstance(parsed, (list, dict)):
return parsed
return content
def _parse_jsonl(content: str) -> Any:
lines = [json.loads(line) for line in content.splitlines() if line.strip()]
return lines if lines else content
def _parse_csv(content: str) -> Any:
return _parse_delimited(content, delimiter=",")
def _parse_tsv(content: str) -> Any:
return _parse_delimited(content, delimiter="\t")
def _parse_delimited(content: str, *, delimiter: str) -> Any:
reader = csv.reader(io.StringIO(content), delimiter=delimiter)
rows = [row for row in reader if row]
# Require ≥1 row and ≥2 columns to qualify as tabular data.
if rows and len(rows[0]) >= 2:
return rows
return content
def _parse_yaml(content: str) -> Any:
import yaml
parsed = yaml.safe_load(content)
if isinstance(parsed, (list, dict)):
return parsed
return content
def _parse_toml(content: str) -> Any:
parsed = tomllib.loads(content)
# tomllib.loads always returns a dict.
return parsed if parsed else content
_TEXT_PARSERS: dict[str, Any] = {
"json": _parse_json,
"jsonl": _parse_jsonl,
"csv": _parse_csv,
"tsv": _parse_tsv,
"yaml": _parse_yaml,
"toml": _parse_toml,
}
# ---------------------------------------------------------------------------
# Binary-based parsers (content: bytes → Any)
# ---------------------------------------------------------------------------
def _parse_parquet(content: bytes) -> Any:
import pandas as pd
df = pd.read_parquet(io.BytesIO(content))
# Return as list[list[Any]] with the first row being the header.
header = df.columns.tolist()
rows = df.values.tolist()
return [header] + rows
def _parse_xlsx(content: bytes) -> Any:
import pandas as pd
df = pd.read_excel(io.BytesIO(content))
header = df.columns.tolist()
rows = df.values.tolist()
return [header] + rows
_BINARY_PARSERS: dict[str, Any] = {
"parquet": _parse_parquet,
"xlsx": _parse_xlsx,
}
# ---------------------------------------------------------------------------
# Public API
# ---------------------------------------------------------------------------
def parse_file_content(content: str | bytes, fmt: str) -> Any:
"""Parse *content* according to *fmt* and return a native Python value.
Returns the original *content* unchanged if:
- *fmt* is not recognised
- parsing fails for any reason (malformed content, missing dependency, etc.)
This function **never raises**.
"""
try:
if fmt in BINARY_FORMATS:
parser = _BINARY_PARSERS.get(fmt)
if parser is None:
return content
if isinstance(content, str):
# Caller gave us text for a binary format — can't parse.
return content
return parser(content)
parser = _TEXT_PARSERS.get(fmt)
if parser is None:
return content
if isinstance(content, bytes):
content = content.decode("utf-8", errors="replace")
return parser(content)
except Exception:
logger.debug("Structured parsing failed for format=%s, falling back", fmt)
return content

View File

@@ -0,0 +1,379 @@
"""Tests for file_content_parser — format inference and structured parsing."""
import io
import json
import pytest
from backend.util.file_content_parser import (
BINARY_FORMATS,
infer_format,
parse_file_content,
)
# ---------------------------------------------------------------------------
# infer_format
# ---------------------------------------------------------------------------
class TestInferFormat:
# --- extension-based ---
def test_json_extension(self):
assert infer_format("/home/user/data.json") == "json"
def test_jsonl_extension(self):
assert infer_format("/tmp/events.jsonl") == "jsonl"
def test_ndjson_extension(self):
assert infer_format("/tmp/events.ndjson") == "jsonl"
def test_csv_extension(self):
assert infer_format("workspace:///reports/sales.csv") == "csv"
def test_tsv_extension(self):
assert infer_format("/home/user/data.tsv") == "tsv"
def test_yaml_extension(self):
assert infer_format("/home/user/config.yaml") == "yaml"
def test_yml_extension(self):
assert infer_format("/home/user/config.yml") == "yaml"
def test_toml_extension(self):
assert infer_format("/home/user/config.toml") == "toml"
def test_parquet_extension(self):
assert infer_format("/data/table.parquet") == "parquet"
def test_xlsx_extension(self):
assert infer_format("/data/spreadsheet.xlsx") == "xlsx"
def test_xls_extension(self):
assert infer_format("/data/old_spreadsheet.xls") == "xlsx"
def test_case_insensitive(self):
assert infer_format("/data/FILE.JSON") == "json"
assert infer_format("/data/FILE.CSV") == "csv"
def test_unknown_extension(self):
assert infer_format("/home/user/readme.txt") is None
def test_no_extension(self):
assert infer_format("workspace://abc123") is None
# --- MIME-based ---
def test_mime_json(self):
assert infer_format("workspace://abc123#application/json") == "json"
def test_mime_csv(self):
assert infer_format("workspace://abc123#text/csv") == "csv"
def test_mime_tsv(self):
assert infer_format("workspace://abc123#text/tab-separated-values") == "tsv"
def test_mime_ndjson(self):
assert infer_format("workspace://abc123#application/x-ndjson") == "jsonl"
def test_mime_yaml(self):
assert infer_format("workspace://abc123#application/x-yaml") == "yaml"
def test_mime_xlsx(self):
uri = "workspace://abc123#application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
assert infer_format(uri) == "xlsx"
def test_mime_parquet(self):
assert (
infer_format("workspace://abc123#application/vnd.apache.parquet")
== "parquet"
)
def test_unknown_mime(self):
assert infer_format("workspace://abc123#text/plain") is None
# --- MIME takes precedence over extension ---
def test_mime_overrides_extension(self):
# .txt extension but JSON MIME → json
assert infer_format("workspace:///file.txt#application/json") == "json"
# ---------------------------------------------------------------------------
# parse_file_content — JSON
# ---------------------------------------------------------------------------
class TestParseJson:
def test_array(self):
result = parse_file_content("[1, 2, 3]", "json")
assert result == [1, 2, 3]
def test_object(self):
result = parse_file_content('{"key": "value"}', "json")
assert result == {"key": "value"}
def test_nested(self):
content = json.dumps({"rows": [[1, 2], [3, 4]]})
result = parse_file_content(content, "json")
assert result == {"rows": [[1, 2], [3, 4]]}
def test_scalar_string_stays_as_string(self):
result = parse_file_content('"hello"', "json")
assert result == '"hello"' # original content, not parsed
def test_scalar_number_stays_as_string(self):
result = parse_file_content("42", "json")
assert result == "42"
def test_scalar_boolean_stays_as_string(self):
result = parse_file_content("true", "json")
assert result == "true"
def test_null_stays_as_string(self):
result = parse_file_content("null", "json")
assert result == "null"
def test_invalid_json_fallback(self):
content = "not json at all"
result = parse_file_content(content, "json")
assert result == content
def test_empty_string_fallback(self):
result = parse_file_content("", "json")
assert result == ""
def test_bytes_input_decoded(self):
result = parse_file_content(b"[1, 2, 3]", "json")
assert result == [1, 2, 3]
# ---------------------------------------------------------------------------
# parse_file_content — JSONL
# ---------------------------------------------------------------------------
class TestParseJsonl:
def test_basic(self):
content = '{"a": 1}\n{"a": 2}\n{"a": 3}'
result = parse_file_content(content, "jsonl")
assert result == [{"a": 1}, {"a": 2}, {"a": 3}]
def test_blank_lines_skipped(self):
content = '{"a": 1}\n\n{"a": 2}\n'
result = parse_file_content(content, "jsonl")
assert result == [{"a": 1}, {"a": 2}]
def test_mixed_types(self):
content = '1\n"hello"\n[1,2]\n'
result = parse_file_content(content, "jsonl")
assert result == [1, "hello", [1, 2]]
def test_empty_file_fallback(self):
result = parse_file_content("", "jsonl")
assert result == ""
def test_all_blank_lines_fallback(self):
result = parse_file_content("\n\n\n", "jsonl")
assert result == "\n\n\n"
def test_invalid_line_fallback(self):
content = '{"a": 1}\nnot json\n'
result = parse_file_content(content, "jsonl")
assert result == content # fallback
# ---------------------------------------------------------------------------
# parse_file_content — CSV
# ---------------------------------------------------------------------------
class TestParseCsv:
def test_basic(self):
content = "Name,Score\nAlice,90\nBob,85"
result = parse_file_content(content, "csv")
assert result == [["Name", "Score"], ["Alice", "90"], ["Bob", "85"]]
def test_quoted_fields(self):
content = 'Name,Bio\nAlice,"Loves, commas"\nBob,Simple'
result = parse_file_content(content, "csv")
assert result[1] == ["Alice", "Loves, commas"]
def test_single_column_fallback(self):
# Only 1 column — not tabular enough.
content = "Name\nAlice\nBob"
result = parse_file_content(content, "csv")
assert result == content
def test_empty_rows_skipped(self):
content = "A,B\n\n1,2\n\n3,4"
result = parse_file_content(content, "csv")
assert result == [["A", "B"], ["1", "2"], ["3", "4"]]
def test_empty_file_fallback(self):
result = parse_file_content("", "csv")
assert result == ""
# ---------------------------------------------------------------------------
# parse_file_content — TSV
# ---------------------------------------------------------------------------
class TestParseTsv:
def test_basic(self):
content = "Name\tScore\nAlice\t90\nBob\t85"
result = parse_file_content(content, "tsv")
assert result == [["Name", "Score"], ["Alice", "90"], ["Bob", "85"]]
def test_single_column_fallback(self):
content = "Name\nAlice\nBob"
result = parse_file_content(content, "tsv")
assert result == content
# ---------------------------------------------------------------------------
# parse_file_content — YAML
# ---------------------------------------------------------------------------
class TestParseYaml:
def test_list(self):
content = "- apple\n- banana\n- cherry"
result = parse_file_content(content, "yaml")
assert result == ["apple", "banana", "cherry"]
def test_dict(self):
content = "name: Alice\nage: 30"
result = parse_file_content(content, "yaml")
assert result == {"name": "Alice", "age": 30}
def test_nested(self):
content = "users:\n - name: Alice\n - name: Bob"
result = parse_file_content(content, "yaml")
assert result == {"users": [{"name": "Alice"}, {"name": "Bob"}]}
def test_scalar_stays_as_string(self):
result = parse_file_content("hello world", "yaml")
assert result == "hello world"
def test_invalid_yaml_fallback(self):
content = ":\n :\n invalid: - -"
result = parse_file_content(content, "yaml")
# Either parses or falls back — should not raise.
assert result is not None
# ---------------------------------------------------------------------------
# parse_file_content — TOML
# ---------------------------------------------------------------------------
class TestParseToml:
def test_basic(self):
content = '[server]\nhost = "localhost"\nport = 8080'
result = parse_file_content(content, "toml")
assert result == {"server": {"host": "localhost", "port": 8080}}
def test_flat(self):
content = 'name = "test"\ncount = 42'
result = parse_file_content(content, "toml")
assert result == {"name": "test", "count": 42}
def test_empty_table_fallback(self):
result = parse_file_content("", "toml")
assert result == ""
def test_invalid_toml_fallback(self):
result = parse_file_content("not = [valid toml", "toml")
assert result == "not = [valid toml"
# ---------------------------------------------------------------------------
# parse_file_content — Parquet (binary)
# ---------------------------------------------------------------------------
class TestParseParquet:
@pytest.fixture
def parquet_bytes(self) -> bytes:
import pandas as pd
df = pd.DataFrame({"Name": ["Alice", "Bob"], "Score": [90, 85]})
buf = io.BytesIO()
df.to_parquet(buf, index=False)
return buf.getvalue()
def test_basic(self, parquet_bytes: bytes):
result = parse_file_content(parquet_bytes, "parquet")
assert result == [["Name", "Score"], ["Alice", 90], ["Bob", 85]]
def test_string_input_fallback(self):
# Parquet is binary — string input can't be parsed.
result = parse_file_content("not parquet", "parquet")
assert result == "not parquet"
def test_invalid_bytes_fallback(self):
result = parse_file_content(b"not parquet bytes", "parquet")
assert result == b"not parquet bytes"
# ---------------------------------------------------------------------------
# parse_file_content — Excel (binary)
# ---------------------------------------------------------------------------
class TestParseExcel:
@pytest.fixture
def xlsx_bytes(self) -> bytes:
import pandas as pd
df = pd.DataFrame({"Name": ["Alice", "Bob"], "Score": [90, 85]})
buf = io.BytesIO()
df.to_excel(buf, index=False) # type: ignore[arg-type] # BytesIO is a valid target
return buf.getvalue()
def test_basic(self, xlsx_bytes: bytes):
result = parse_file_content(xlsx_bytes, "xlsx")
assert result == [["Name", "Score"], ["Alice", 90], ["Bob", 85]]
def test_string_input_fallback(self):
result = parse_file_content("not xlsx", "xlsx")
assert result == "not xlsx"
def test_invalid_bytes_fallback(self):
result = parse_file_content(b"not xlsx bytes", "xlsx")
assert result == b"not xlsx bytes"
# ---------------------------------------------------------------------------
# parse_file_content — unknown format / fallback
# ---------------------------------------------------------------------------
class TestFallback:
def test_unknown_format_returns_content(self):
result = parse_file_content("hello world", "xml")
assert result == "hello world"
def test_none_format_returns_content(self):
# Shouldn't normally be called with unrecognised format, but must not crash.
result = parse_file_content("hello", "unknown_format")
assert result == "hello"
# ---------------------------------------------------------------------------
# BINARY_FORMATS
# ---------------------------------------------------------------------------
class TestBinaryFormats:
def test_parquet_is_binary(self):
assert "parquet" in BINARY_FORMATS
def test_xlsx_is_binary(self):
assert "xlsx" in BINARY_FORMATS
def test_text_formats_not_binary(self):
for fmt in ("json", "jsonl", "csv", "tsv", "yaml", "toml"):
assert fmt not in BINARY_FORMATS

View File

@@ -1360,6 +1360,18 @@ files = [
dnspython = ">=2.0.0"
idna = ">=2.0.0"
[[package]]
name = "et-xmlfile"
version = "2.0.0"
description = "An implementation of lxml.xmlfile for the standard library"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "et_xmlfile-2.0.0-py3-none-any.whl", hash = "sha256:7a91720bc756843502c3b7504c77b8fe44217c85c537d85037f0f536151b2caa"},
{file = "et_xmlfile-2.0.0.tar.gz", hash = "sha256:dab3f4764309081ce75662649be815c4c9081e88f0837825f90fd28317d4da54"},
]
[[package]]
name = "exa-py"
version = "1.16.1"
@@ -4228,6 +4240,21 @@ datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"]
realtime = ["websockets (>=13,<16)"]
voice-helpers = ["numpy (>=2.0.2)", "sounddevice (>=0.5.1)"]
[[package]]
name = "openpyxl"
version = "3.1.5"
description = "A Python library to read/write Excel 2010 xlsx/xlsm files"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "openpyxl-3.1.5-py2.py3-none-any.whl", hash = "sha256:5282c12b107bffeef825f4617dc029afaf41d0ea60823bbb665ef3079dc79de2"},
{file = "openpyxl-3.1.5.tar.gz", hash = "sha256:cf0e3cf56142039133628b5acffe8ef0c12bc902d2aadd3e0fe5878dc08d1050"},
]
[package.dependencies]
et-xmlfile = "*"
[[package]]
name = "opentelemetry-api"
version = "1.39.1"
@@ -6262,13 +6289,6 @@ optional = false
python-versions = ">=3.8"
groups = ["main", "dev"]
files = [
{file = "PyYAML-6.0.3-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:c2514fceb77bc5e7a2f7adfaa1feb2fb311607c9cb518dbc378688ec73d8292f"},
{file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c57bb8c96f6d1808c030b1687b9b5fb476abaa47f0db9c0101f5e9f394e97f4"},
{file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:efd7b85f94a6f21e4932043973a7ba2613b059c4a000551892ac9f1d11f5baf3"},
{file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22ba7cfcad58ef3ecddc7ed1db3409af68d023b7f940da23c6c2a1890976eda6"},
{file = "PyYAML-6.0.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6344df0d5755a2c9a276d4473ae6b90647e216ab4757f8426893b5dd2ac3f369"},
{file = "PyYAML-6.0.3-cp38-cp38-win32.whl", hash = "sha256:3ff07ec89bae51176c0549bc4c63aa6202991da2d9a6129d7aef7f1407d3f295"},
{file = "PyYAML-6.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:5cf4e27da7e3fbed4d6c3d8e797387aaad68102272f8f9752883bc32d61cb87b"},
{file = "pyyaml-6.0.3-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:214ed4befebe12df36bcc8bc2b64b396ca31be9304b8f59e25c11cf94a4c033b"},
{file = "pyyaml-6.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02ea2dfa234451bbb8772601d7b8e426c2bfa197136796224e50e35a78777956"},
{file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b30236e45cf30d2b8e7b3e85881719e98507abed1011bf463a8fa23e9c3e98a8"},
@@ -8882,4 +8902,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
[metadata]
lock-version = "2.1"
python-versions = ">=3.10,<3.14"
content-hash = "618d61b0586ab82fec1e28d1feb549a198e0b5c9d152e808862e55efc00a65b9"
content-hash = "a80b9fd658af0ddb733b2919b6591381b994311dda8f910480871b22dacf5c2f"

View File

@@ -92,6 +92,7 @@ gravitas-md2gdocs = "^0.1.0"
posthog = "^7.6.0"
fpdf2 = "^2.8.6"
langsmith = "^0.7.7"
openpyxl = "^3.1.5"
[tool.poetry.group.dev.dependencies]
aiohappyeyeballs = "^2.6.1"