mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-11 15:25:16 -05:00
Compare commits
4 Commits
chore/comb
...
seer/fix-c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
23bd9b321f | ||
|
|
f610eb6c0a | ||
|
|
4b0eda7550 | ||
|
|
c4b9cba81b |
@@ -220,7 +220,9 @@ class SmartDecisionMakerBlock(Block):
|
|||||||
return missing_input
|
return missing_input
|
||||||
|
|
||||||
conversation_history = data.get("conversation_history", [])
|
conversation_history = data.get("conversation_history", [])
|
||||||
pending_tool_calls = get_pending_tool_calls(conversation_history)
|
# Convert to dict to handle ChatCompletionMessage objects
|
||||||
|
conversation_history_dicts = [json.to_dict(p) for p in conversation_history if p]
|
||||||
|
pending_tool_calls = get_pending_tool_calls(conversation_history_dicts)
|
||||||
last_tool_output = data.get("last_tool_output")
|
last_tool_output = data.get("last_tool_output")
|
||||||
|
|
||||||
# Tool call is pending, wait for the tool output to be provided.
|
# Tool call is pending, wait for the tool output to be provided.
|
||||||
@@ -462,7 +464,7 @@ class SmartDecisionMakerBlock(Block):
|
|||||||
input_data.conversation_history = input_data.conversation_history or []
|
input_data.conversation_history = input_data.conversation_history or []
|
||||||
prompt = [json.to_dict(p) for p in input_data.conversation_history if p]
|
prompt = [json.to_dict(p) for p in input_data.conversation_history if p]
|
||||||
|
|
||||||
pending_tool_calls = get_pending_tool_calls(input_data.conversation_history)
|
pending_tool_calls = get_pending_tool_calls(prompt)
|
||||||
if pending_tool_calls and input_data.last_tool_output is None:
|
if pending_tool_calls and input_data.last_tool_output is None:
|
||||||
raise ValueError(f"Tool call requires an output for {pending_tool_calls}")
|
raise ValueError(f"Tool call requires an output for {pending_tool_calls}")
|
||||||
|
|
||||||
|
|||||||
@@ -15,11 +15,15 @@ def _tok_len(text: str, enc) -> int:
|
|||||||
return len(enc.encode(str(text)))
|
return len(enc.encode(str(text)))
|
||||||
|
|
||||||
|
|
||||||
def _msg_tokens(msg: dict, enc) -> int:
|
def _msg_tokens(msg: dict | Any, enc) -> int:
|
||||||
"""
|
"""
|
||||||
OpenAI counts ≈3 wrapper tokens per chat message, plus 1 if "name"
|
OpenAI counts ≈3 wrapper tokens per chat message, plus 1 if "name"
|
||||||
is present, plus the tokenised content length.
|
is present, plus the tokenised content length.
|
||||||
"""
|
"""
|
||||||
|
# Handle ChatCompletionMessage objects by converting to dict
|
||||||
|
if not isinstance(msg, dict):
|
||||||
|
msg = json.to_dict(msg)
|
||||||
|
|
||||||
WRAPPER = 3 + (1 if "name" in msg else 0)
|
WRAPPER = 3 + (1 if "name" in msg else 0)
|
||||||
return WRAPPER + _tok_len(msg.get("content") or "", enc)
|
return WRAPPER + _tok_len(msg.get("content") or "", enc)
|
||||||
|
|
||||||
@@ -46,7 +50,7 @@ def _truncate_middle_tokens(text: str, enc, max_tok: int) -> str:
|
|||||||
|
|
||||||
|
|
||||||
def compress_prompt(
|
def compress_prompt(
|
||||||
messages: list[dict],
|
messages: list[dict | Any],
|
||||||
target_tokens: int,
|
target_tokens: int,
|
||||||
*,
|
*,
|
||||||
model: str = "gpt-4o",
|
model: str = "gpt-4o",
|
||||||
@@ -94,7 +98,11 @@ def compress_prompt(
|
|||||||
list[dict] – A *new* messages list that abides by the rules above.
|
list[dict] – A *new* messages list that abides by the rules above.
|
||||||
"""
|
"""
|
||||||
enc = encoding_for_model(model) # best-match tokenizer
|
enc = encoding_for_model(model) # best-match tokenizer
|
||||||
msgs = deepcopy(messages) # never mutate caller
|
# Convert any ChatCompletionMessage objects to dicts first
|
||||||
|
messages_as_dicts = [
|
||||||
|
json.to_dict(m) if not isinstance(m, dict) else m for m in messages
|
||||||
|
]
|
||||||
|
msgs = deepcopy(messages_as_dicts) # never mutate caller
|
||||||
|
|
||||||
def total_tokens() -> int:
|
def total_tokens() -> int:
|
||||||
"""Current size of *msgs* in tokens."""
|
"""Current size of *msgs* in tokens."""
|
||||||
@@ -162,7 +170,7 @@ def compress_prompt(
|
|||||||
|
|
||||||
|
|
||||||
def estimate_token_count(
|
def estimate_token_count(
|
||||||
messages: list[dict],
|
messages: list[dict | Any],
|
||||||
*,
|
*,
|
||||||
model: str = "gpt-4o",
|
model: str = "gpt-4o",
|
||||||
) -> int:
|
) -> int:
|
||||||
|
|||||||
Reference in New Issue
Block a user