diff --git a/openhands/agenthub/codeact_agent/codeact_agent.py b/openhands/agenthub/codeact_agent/codeact_agent.py index ca34cdb66a..cf319b5b41 100644 --- a/openhands/agenthub/codeact_agent/codeact_agent.py +++ b/openhands/agenthub/codeact_agent/codeact_agent.py @@ -193,7 +193,11 @@ class CodeActAgent(Agent): 'messages': self.llm.format_messages_for_llm(messages), } params['tools'] = check_tools(self.tools, self.llm.config) - params['extra_body'] = {'metadata': state.to_llm_metadata(agent_name=self.name)} + params['extra_body'] = { + 'metadata': state.to_llm_metadata( + model_name=self.llm.config.model, agent_name=self.name + ) + } response = self.llm.completion(**params) logger.debug(f'Response from LLM: {response}') actions = self.response_to_actions(response) diff --git a/openhands/controller/agent_controller.py b/openhands/controller/agent_controller.py index a1ce7dfbf3..1edc609068 100644 --- a/openhands/controller/agent_controller.py +++ b/openhands/controller/agent_controller.py @@ -657,6 +657,7 @@ class AgentController: # Take a snapshot of the current metrics before starting the delegate state = State( session_id=self.id.removesuffix('-delegate'), + user_id=self.user_id, inputs=action.inputs or {}, iteration_flag=self.state.iteration_flag, budget_flag=self.state.budget_flag, diff --git a/openhands/controller/state/state.py b/openhands/controller/state/state.py index ac8f25daba..3bb17d670e 100644 --- a/openhands/controller/state/state.py +++ b/openhands/controller/state/state.py @@ -79,6 +79,7 @@ class State: """ session_id: str = '' + user_id: str | None = None iteration_flag: IterationControlFlag = field( default_factory=lambda: IterationControlFlag( limit_increase_amount=100, current_value=0, max_value=100 @@ -265,16 +266,19 @@ class State: return event return None - def to_llm_metadata(self, agent_name: str) -> dict: - return { + def to_llm_metadata(self, model_name: str, agent_name: str) -> dict: + metadata = { 'session_id': self.session_id, 'trace_version': openhands.__version__, + 'trace_user_id': self.user_id, 'tags': [ + f'model:{model_name}', f'agent:{agent_name}', f'web_host:{os.environ.get("WEB_HOST", "unspecified")}', f'openhands_version:{openhands.__version__}', ], } + return metadata def get_local_step(self): if not self.parent_iteration: diff --git a/openhands/controller/state/state_tracker.py b/openhands/controller/state/state_tracker.py index c16d688480..9b27eb90e5 100644 --- a/openhands/controller/state/state_tracker.py +++ b/openhands/controller/state/state_tracker.py @@ -73,6 +73,7 @@ class StateTracker: if state is None: self.state = State( session_id=id.removesuffix('-delegate'), + user_id=self.user_id, inputs={}, iteration_flag=IterationControlFlag( limit_increase_amount=max_iterations, diff --git a/openhands/memory/condenser/condenser.py b/openhands/memory/condenser/condenser.py index 43c2573d2a..6e52780b0b 100644 --- a/openhands/memory/condenser/condenser.py +++ b/openhands/memory/condenser/condenser.py @@ -8,6 +8,7 @@ from pydantic import BaseModel from openhands.controller.state.state import State from openhands.core.config.condenser_config import CondenserConfig +from openhands.core.logger import openhands_logger as logger from openhands.events.action.agent import CondensationAction from openhands.memory.view import View @@ -101,10 +102,29 @@ class Condenser(ABC): def condensed_history(self, state: State) -> View | Condensation: """Condense the state's history.""" - self._llm_metadata = state.to_llm_metadata('condenser') + if hasattr(self, 'llm'): + model_name = self.llm.config.model + else: + model_name = 'unknown' + + self._llm_metadata = state.to_llm_metadata( + model_name=model_name, agent_name='condenser' + ) with self.metadata_batch(state): return self.condense(state.view) + @property + def llm_metadata(self) -> dict[str, Any]: + """Metadata to be passed to the LLM when using this condenser. + + This metadata is used to provide context about the condensation process and can be used by the LLM to understand how the history was condensed. + """ + if not self._llm_metadata: + logger.warning( + 'LLM metadata is empty. Ensure to set it in the condenser implementation.' + ) + return self._llm_metadata + @classmethod def register_config(cls, configuration_type: type[CondenserConfig]) -> None: """Register a new condenser configuration type. diff --git a/openhands/memory/condenser/impl/llm_summarizing_condenser.py b/openhands/memory/condenser/impl/llm_summarizing_condenser.py index 46e9c29897..8ea73a25b2 100644 --- a/openhands/memory/condenser/impl/llm_summarizing_condenser.py +++ b/openhands/memory/condenser/impl/llm_summarizing_condenser.py @@ -133,7 +133,7 @@ CURRENT_STATE: Last flip: Heads, Haiku count: 15/20""" response = self.llm.completion( messages=self.llm.format_messages_for_llm(messages), - extra_body={'metadata': self._llm_metadata}, + extra_body={'metadata': self.llm_metadata}, ) summary = response.choices[0].message.content