mirror of
https://github.com/All-Hands-AI/OpenHands.git
synced 2026-01-08 22:38:05 -05:00
fix: Disable prompt caching in default condenser (#7781)
Co-authored-by: openhands <openhands@all-hands.dev> Co-authored-by: Calvin Smith <calvin@all-hands.dev> Co-authored-by: Xingyao Wang <xingyao@all-hands.dev>
This commit is contained in:
@@ -311,8 +311,14 @@ Capture all relevant information, especially:
|
||||
def from_config(
|
||||
cls, config: StructuredSummaryCondenserConfig
|
||||
) -> StructuredSummaryCondenser:
|
||||
# This condenser cannot take advantage of prompt caching. If it happens
|
||||
# to be set, we'll pay for the cache writes but never get a chance to
|
||||
# save on a read.
|
||||
llm_config = config.llm_config.model_copy()
|
||||
llm_config.caching_prompt = False
|
||||
|
||||
return StructuredSummaryCondenser(
|
||||
llm=LLM(config=config.llm_config),
|
||||
llm=LLM(config=llm_config),
|
||||
max_size=config.max_size,
|
||||
keep_first=config.keep_first,
|
||||
max_event_length=config.max_event_length,
|
||||
|
||||
Reference in New Issue
Block a user