mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-04-08 03:00:28 -04:00
fix(backend): address coderabbitai nitpicks in cost tracking files
- token_tracking.py: convert logger.info %s calls to f-strings per style guide - cost_tracking.py: simplify metadata=meta (was redundantly `meta or None`); move token_tracking imports to module level to remove # noqa: PLC0415 suppressors - baseline/service.py: remove dead UnboundLocalError from except tuple since response is initialized to None before the try block
This commit is contained in:
@@ -445,7 +445,7 @@ async def _baseline_llm_caller(
|
||||
cost = float(cost_header)
|
||||
if math.isfinite(cost):
|
||||
state.cost_usd = (state.cost_usd or 0.0) + max(0.0, cost)
|
||||
except (ValueError, AttributeError, UnboundLocalError):
|
||||
except (ValueError, AttributeError):
|
||||
pass
|
||||
|
||||
# Always persist partial text so the session history stays consistent,
|
||||
|
||||
@@ -144,23 +144,14 @@ async def persist_and_record_usage(
|
||||
|
||||
if cache_read_tokens or cache_creation_tokens:
|
||||
logger.info(
|
||||
"%s Turn usage: uncached=%d, cache_read=%d, cache_create=%d,"
|
||||
" output=%d, total=%d, cost_usd=%s",
|
||||
log_prefix,
|
||||
prompt_tokens,
|
||||
cache_read_tokens,
|
||||
cache_creation_tokens,
|
||||
completion_tokens,
|
||||
total_tokens,
|
||||
cost_usd,
|
||||
f"{log_prefix} Turn usage: uncached={prompt_tokens}, cache_read={cache_read_tokens},"
|
||||
f" cache_create={cache_creation_tokens}, output={completion_tokens},"
|
||||
f" total={total_tokens}, cost_usd={cost_usd}"
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
"%s Turn usage: prompt=%d, completion=%d, total=%d",
|
||||
log_prefix,
|
||||
prompt_tokens,
|
||||
completion_tokens,
|
||||
total_tokens,
|
||||
f"{log_prefix} Turn usage: prompt={prompt_tokens}, completion={completion_tokens},"
|
||||
f" total={total_tokens}"
|
||||
)
|
||||
|
||||
if user_id:
|
||||
|
||||
@@ -6,6 +6,10 @@ import threading
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
|
||||
from backend.blocks._base import Block, BlockSchema
|
||||
from backend.copilot.token_tracking import _pending_log_tasks as _copilot_tasks
|
||||
from backend.copilot.token_tracking import (
|
||||
_pending_log_tasks_lock as _copilot_tasks_lock,
|
||||
)
|
||||
from backend.data.execution import NodeExecutionEntry
|
||||
from backend.data.model import NodeExecutionStats
|
||||
from backend.data.platform_cost import PlatformCostEntry, usd_to_microdollars
|
||||
@@ -83,13 +87,6 @@ async def drain_pending_cost_logs(timeout: float = 5.0) -> None:
|
||||
timeout,
|
||||
)
|
||||
# Also drain copilot cost log tasks (token_tracking._pending_log_tasks)
|
||||
from backend.copilot.token_tracking import ( # noqa: PLC0415
|
||||
_pending_log_tasks as _copilot_tasks,
|
||||
)
|
||||
from backend.copilot.token_tracking import ( # noqa: PLC0415
|
||||
_pending_log_tasks_lock as _copilot_tasks_lock,
|
||||
)
|
||||
|
||||
with _copilot_tasks_lock:
|
||||
copilot_pending = [t for t in _copilot_tasks if t.get_loop() is current_loop]
|
||||
if copilot_pending:
|
||||
@@ -286,7 +283,7 @@ async def log_system_credential_cost(
|
||||
model=model_name,
|
||||
tracking_type=tracking_type,
|
||||
tracking_amount=tracking_amount,
|
||||
metadata=meta or None,
|
||||
metadata=meta,
|
||||
),
|
||||
)
|
||||
return # One log per execution is enough
|
||||
|
||||
Reference in New Issue
Block a user