mirror of
https://github.com/All-Hands-AI/OpenHands.git
synced 2026-01-09 14:57:59 -05:00
(feat) making prompt caching optional instead of enabled default (#3689)
* (feat) making prompt caching optional instead of enabled default At present, only the Claude models support prompt caching as a experimental feature, therefore, this feature should be implemented as an optional setting rather than being enabled by default. Signed-off-by: Yi Lin <teroincn@gmail.com> * handle the conflict * fix unittest mock return value * fix lint error in whitespace --------- Signed-off-by: Yi Lin <teroincn@gmail.com>
This commit is contained in:
@@ -141,6 +141,9 @@ model = "gpt-4o"
|
||||
# Drop any unmapped (unsupported) params without causing an exception
|
||||
#drop_params = false
|
||||
|
||||
# Using the prompt caching feature provided by the LLM
|
||||
#caching_prompt = false
|
||||
|
||||
# Base URL for the OLLAMA API
|
||||
#ollama_base_url = ""
|
||||
|
||||
|
||||
Reference in New Issue
Block a user