mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-04-08 03:00:28 -04:00
fix(copilot): update rate limit comment with realistic token estimates, remove synthetic IDs from billing metadata
- config.py: Update per-turn token estimate from 10-15K to 25-35K avg (accounting for context growth), adjust daily turn estimate to ~70-100 - helpers.py: Remove synthetic graph/node IDs from UsageTransactionMetadata since they're fake data; keep only real identifiers (node_exec_id, block_id)
This commit is contained in:
@@ -71,8 +71,10 @@ class ChatConfig(BaseSettings):
|
||||
)
|
||||
|
||||
# Rate limiting — token-based limits per day and per week.
|
||||
# Each CoPilot turn consumes ~10-15K tokens (system prompt + tool schemas + response),
|
||||
# so 2.5M daily allows ~170-250 turns/day which is reasonable for normal use.
|
||||
# Per-turn token cost varies with context size: ~10-15K for early turns,
|
||||
# ~30-50K mid-session, up to ~100K pre-compaction. Average across a
|
||||
# session with compaction cycles is ~25-35K tokens/turn, so 2.5M daily
|
||||
# allows ~70-100 turns/day.
|
||||
# Checked both at the HTTP layer (routes.py pre-turn) and in the SDK service
|
||||
# just before compaction, since context can grow fast with heavy workloads.
|
||||
#
|
||||
|
||||
@@ -147,9 +147,6 @@ async def execute_block(
|
||||
user_id=user_id,
|
||||
cost=cost,
|
||||
metadata=UsageTransactionMetadata(
|
||||
graph_exec_id=synthetic_graph_id,
|
||||
graph_id=synthetic_graph_id,
|
||||
node_id=synthetic_node_id,
|
||||
node_exec_id=node_exec_id,
|
||||
block_id=block_id,
|
||||
block=block.name,
|
||||
|
||||
Reference in New Issue
Block a user