mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-04-08 03:00:28 -04:00
fix: remove commented code, obvious comments, and move inline import
- Remove useless commented line 'model_max_output already set above' - Remove self-explanatory comment about max_output_tokens - Move llm_registry import from line 1667 to top-level in graph.py Per Nick's review feedback.
This commit is contained in:
@@ -541,9 +541,7 @@ async def llm_call(
|
||||
)
|
||||
prompt = result.messages
|
||||
|
||||
# Calculate available tokens based on context window and input length
|
||||
estimated_input_tokens = estimate_token_count(prompt)
|
||||
# model_max_output already set above
|
||||
user_max = max_tokens or model_max_output
|
||||
available_tokens = max(context_window - estimated_input_tokens, 0)
|
||||
max_tokens = max(min(available_tokens, model_max_output, user_max), 1)
|
||||
@@ -1191,7 +1189,6 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
or "token limit" in str(e).lower()
|
||||
):
|
||||
if input_data.max_tokens is None:
|
||||
# Use resolved model's max_output_tokens (handles fallback correctly)
|
||||
input_data.max_tokens = resolved_max_output or 4096
|
||||
input_data.max_tokens = int(input_data.max_tokens * 0.85)
|
||||
logger.debug(
|
||||
|
||||
@@ -28,6 +28,7 @@ from backend.blocks._base import Block, BlockType, EmptySchema
|
||||
from backend.blocks.agent import AgentExecutorBlock
|
||||
from backend.blocks.io import AgentInputBlock, AgentOutputBlock
|
||||
from backend.blocks.llm import LlmModel
|
||||
from backend.data import llm_registry
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util import type as type_utils
|
||||
from backend.util.exceptions import GraphNotAccessibleError, GraphNotInLibraryError
|
||||
@@ -1664,8 +1665,6 @@ async def migrate_llm_models(migrate_to: LlmModel):
|
||||
llm_model_fields[block.id] = field_name
|
||||
|
||||
# Get all model slugs from the registry (dynamic, not hardcoded enum)
|
||||
from backend.data import llm_registry
|
||||
|
||||
enum_values = list(llm_registry.get_all_model_slugs_for_validation())
|
||||
|
||||
# Skip migration if registry is empty (fresh deployment before seeding)
|
||||
|
||||
Reference in New Issue
Block a user