fix(backend): apply user-defined condenser_max_size in new v1 conversations (#11862)

This commit is contained in:
Hiep Le
2025-12-03 00:24:25 +07:00
committed by GitHub
parent 6139e39449
commit fd13c91387
5 changed files with 229 additions and 19 deletions

View File

@@ -9,6 +9,7 @@ from typing import AsyncGenerator
import base62
from openhands.app_server.app_conversation.app_conversation_models import (
AgentType,
AppConversationStartTask,
AppConversationStartTaskStatus,
)
@@ -25,7 +26,9 @@ from openhands.app_server.sandbox.sandbox_models import SandboxInfo
from openhands.app_server.user.user_context import UserContext
from openhands.sdk import Agent
from openhands.sdk.context.agent_context import AgentContext
from openhands.sdk.context.condenser import LLMSummarizingCondenser
from openhands.sdk.context.skills import load_user_skills
from openhands.sdk.llm import LLM
from openhands.sdk.workspace.remote.async_remote_workspace import AsyncRemoteWorkspace
_logger = logging.getLogger(__name__)
@@ -340,3 +343,39 @@ class AppConversationServiceBase(AppConversationService, ABC):
return
_logger.info('Git pre-commit hook installed successfully')
def _create_condenser(
self,
llm: LLM,
agent_type: AgentType,
condenser_max_size: int | None,
) -> LLMSummarizingCondenser:
"""Create a condenser based on user settings and agent type.
Args:
llm: The LLM instance to use for condensation
agent_type: Type of agent (PLAN or DEFAULT)
condenser_max_size: condenser_max_size setting
Returns:
Configured LLMSummarizingCondenser instance
"""
# LLMSummarizingCondenser has defaults: max_size=120, keep_first=4
condenser_kwargs = {
'llm': llm.model_copy(
update={
'usage_id': (
'condenser'
if agent_type == AgentType.DEFAULT
else 'planning_condenser'
)
}
),
}
# Only override max_size if user has a custom value
if condenser_max_size is not None:
condenser_kwargs['max_size'] = condenser_max_size
condenser = LLMSummarizingCondenser(**condenser_kwargs)
return condenser

View File

@@ -76,12 +76,10 @@ from openhands.sdk.security.confirmation_policy import AlwaysConfirm
from openhands.sdk.workspace.remote.async_remote_workspace import AsyncRemoteWorkspace
from openhands.server.types import AppMode
from openhands.tools.preset.default import (
get_default_condenser,
get_default_tools,
)
from openhands.tools.preset.planning import (
format_plan_structure,
get_planning_condenser,
get_planning_tools,
)
@@ -643,6 +641,7 @@ class LiveStatusAppConversationService(AppConversationServiceBase):
agent_type: AgentType,
system_message_suffix: str | None,
mcp_config: dict,
condenser_max_size: int | None,
) -> Agent:
"""Create an agent with appropriate tools and context based on agent type.
@@ -651,10 +650,14 @@ class LiveStatusAppConversationService(AppConversationServiceBase):
agent_type: Type of agent to create (PLAN or DEFAULT)
system_message_suffix: Optional suffix for system messages
mcp_config: MCP configuration dictionary
condenser_max_size: condenser_max_size setting
Returns:
Configured Agent instance with context
"""
# Create condenser with user's settings
condenser = self._create_condenser(llm, agent_type, condenser_max_size)
# Create agent based on type
if agent_type == AgentType.PLAN:
agent = Agent(
@@ -662,9 +665,7 @@ class LiveStatusAppConversationService(AppConversationServiceBase):
tools=get_planning_tools(),
system_prompt_filename='system_prompt_planning.j2',
system_prompt_kwargs={'plan_structure': format_plan_structure()},
condenser=get_planning_condenser(
llm=llm.model_copy(update={'usage_id': 'planning_condenser'})
),
condenser=condenser,
security_analyzer=None,
mcp_config=mcp_config,
)
@@ -673,9 +674,7 @@ class LiveStatusAppConversationService(AppConversationServiceBase):
llm=llm,
tools=get_default_tools(enable_browser=True),
system_prompt_kwargs={'cli_mode': False},
condenser=get_default_condenser(
llm=llm.model_copy(update={'usage_id': 'condenser'})
),
condenser=condenser,
mcp_config=mcp_config,
)
@@ -777,7 +776,7 @@ class LiveStatusAppConversationService(AppConversationServiceBase):
# Create agent with context
agent = self._create_agent_with_context(
llm, agent_type, system_message_suffix, mcp_config
llm, agent_type, system_message_suffix, mcp_config, user.condenser_max_size
)
# Finalize and return the conversation request