fix: Disable prompt caching in default condenser (#7781)

Co-authored-by: openhands <openhands@all-hands.dev>
Co-authored-by: Calvin Smith <calvin@all-hands.dev>
Co-authored-by: Xingyao Wang <xingyao@all-hands.dev>
This commit is contained in:
Calvin Smith
2025-04-11 10:09:23 -06:00
committed by GitHub
parent e2bb69908a
commit 36e092e0ac
4 changed files with 36 additions and 7 deletions

View File

@@ -311,8 +311,14 @@ Capture all relevant information, especially:
def from_config(
cls, config: StructuredSummaryCondenserConfig
) -> StructuredSummaryCondenser:
# This condenser cannot take advantage of prompt caching. If it happens
# to be set, we'll pay for the cache writes but never get a chance to
# save on a read.
llm_config = config.llm_config.model_copy()
llm_config.caching_prompt = False
return StructuredSummaryCondenser(
llm=LLM(config=config.llm_config),
llm=LLM(config=llm_config),
max_size=config.max_size,
keep_first=config.keep_first,
max_event_length=config.max_event_length,