diff --git a/agenthub/SWE_agent/agent.py b/agenthub/SWE_agent/agent.py index f0e0fb2b53..6662558d95 100644 --- a/agenthub/SWE_agent/agent.py +++ b/agenthub/SWE_agent/agent.py @@ -38,7 +38,7 @@ class SWEAgent(Agent): self.cur_line: int = 0 def _think_act(self, messages: list[dict]) -> tuple[Action, str]: - resp = self.llm.do_completion( + resp = self.llm.completion( messages=messages, temperature=0.05, ) diff --git a/agenthub/browsing_agent/browsing_agent.py b/agenthub/browsing_agent/browsing_agent.py index ff0716bde5..c617bbc6dd 100644 --- a/agenthub/browsing_agent/browsing_agent.py +++ b/agenthub/browsing_agent/browsing_agent.py @@ -208,7 +208,7 @@ class BrowsingAgent(Agent): prompt = get_prompt(error_prefix, cur_axtree_txt, prev_action_str) messages.append({'role': 'user', 'content': prompt}) logger.info(prompt) - response = self.llm.do_completion( + response = self.llm.completion( messages=messages, temperature=0.0, stop=[')```', ')\n```'], diff --git a/agenthub/codeact_agent/codeact_agent.py b/agenthub/codeact_agent/codeact_agent.py index 37586162e7..fbcccded3e 100644 --- a/agenthub/codeact_agent/codeact_agent.py +++ b/agenthub/codeact_agent/codeact_agent.py @@ -221,7 +221,7 @@ class CodeActAgent(Agent): f'\n\nENVIRONMENT REMINDER: You have {state.max_iterations - state.iteration} turns left to complete the task.' ) - response = self.llm.do_completion( + response = self.llm.completion( messages=messages, stop=[ '', diff --git a/agenthub/codeact_swe_agent/codeact_swe_agent.py b/agenthub/codeact_swe_agent/codeact_swe_agent.py index 23a1ec644c..358918d322 100644 --- a/agenthub/codeact_swe_agent/codeact_swe_agent.py +++ b/agenthub/codeact_swe_agent/codeact_swe_agent.py @@ -173,7 +173,7 @@ class CodeActSWEAgent(Agent): f'\n\nENVIRONMENT REMINDER: You have {state.max_iterations - state.iteration} turns left to complete the task.' ) - response = self.llm.do_completion( + response = self.llm.completion( messages=messages, stop=[ '', diff --git a/agenthub/micro/agent.py b/agenthub/micro/agent.py index c44dc882ae..866e21d4a0 100644 --- a/agenthub/micro/agent.py +++ b/agenthub/micro/agent.py @@ -64,7 +64,7 @@ class MicroAgent(Agent): latest_user_message=state.get_current_user_intent(), ) messages = [{'content': prompt, 'role': 'user'}] - resp = self.llm.do_completion(messages=messages) + resp = self.llm.completion(messages=messages) action_resp = resp['choices'][0]['message']['content'] state.num_of_chars += len(prompt) + len(action_resp) action = parse_response(action_resp) diff --git a/agenthub/monologue_agent/agent.py b/agenthub/monologue_agent/agent.py index a881e648b4..1919f452b4 100644 --- a/agenthub/monologue_agent/agent.py +++ b/agenthub/monologue_agent/agent.py @@ -181,7 +181,7 @@ class MonologueAgent(Agent): ] # format all as a single message, a monologue - resp = self.llm.do_completion(messages=messages) + resp = self.llm.completion(messages=messages) # keep track of max_chars fallback option state.num_of_chars += len(prompt) + len( diff --git a/agenthub/planner_agent/agent.py b/agenthub/planner_agent/agent.py index c3f9b8efae..68032b8c0f 100644 --- a/agenthub/planner_agent/agent.py +++ b/agenthub/planner_agent/agent.py @@ -47,7 +47,7 @@ class PlannerAgent(Agent): return AgentFinishAction() prompt = get_prompt(state) messages = [{'content': prompt, 'role': 'user'}] - resp = self.llm.do_completion(messages=messages) + resp = self.llm.completion(messages=messages) state.num_of_chars += len(prompt) + len( resp['choices'][0]['message']['content'] ) diff --git a/opendevin/llm/llm.py b/opendevin/llm/llm.py index 7344cf854d..d2390508fa 100644 --- a/opendevin/llm/llm.py +++ b/opendevin/llm/llm.py @@ -189,17 +189,31 @@ class LLM: after=attempt_on_error, ) def wrapper(*args, **kwargs): + """ + Wrapper for the litellm completion function. Logs the input and output of the completion function. + """ + + # some callers might just send the messages directly if 'messages' in kwargs: messages = kwargs['messages'] else: messages = args[1] + + # log the prompt debug_message = '' for message in messages: debug_message += message_separator + message['content'] llm_prompt_logger.debug(debug_message) + + # call the completion function resp = completion_unwrapped(*args, **kwargs) + + # log the response message_back = resp['choices'][0]['message']['content'] llm_response_logger.debug(message_back) + + # post-process to log costs + self._post_completion(resp) return resp self._completion = wrapper # type: ignore @@ -208,20 +222,12 @@ class LLM: def completion(self): """ Decorator for the litellm completion function. - """ - return self._completion - - def do_completion(self, *args, **kwargs): - """ - Wrapper for the litellm completion function. Check the complete documentation at https://litellm.vercel.app/docs/completion """ - resp = self._completion(*args, **kwargs) - self.post_completion(resp) - return resp + return self._completion - def post_completion(self, response: str) -> None: + def _post_completion(self, response: str) -> None: """ Post-process the completion response. """ diff --git a/opendevin/memory/condenser.py b/opendevin/memory/condenser.py index 1587283006..b8b1842dc7 100644 --- a/opendevin/memory/condenser.py +++ b/opendevin/memory/condenser.py @@ -16,7 +16,7 @@ class MemoryCondenser: try: messages = [{'content': summarize_prompt, 'role': 'user'}] - resp = llm.do_completion(messages=messages) + resp = llm.completion(messages=messages) summary_response = resp['choices'][0]['message']['content'] return summary_response except Exception as e: diff --git a/tests/unit/test_micro_agents.py b/tests/unit/test_micro_agents.py index 5273c080e3..ea47abb48d 100644 --- a/tests/unit/test_micro_agents.py +++ b/tests/unit/test_micro_agents.py @@ -35,9 +35,7 @@ def test_coder_agent_with_summary(): """ mock_llm = MagicMock() content = json.dumps({'action': 'finish', 'args': {}}) - mock_llm.do_completion.return_value = { - 'choices': [{'message': {'content': content}}] - } + mock_llm.completion.return_value = {'choices': [{'message': {'content': content}}]} coder_agent = Agent.get_cls('CoderAgent')(llm=mock_llm) assert coder_agent is not None @@ -49,8 +47,8 @@ def test_coder_agent_with_summary(): state = State(history=history, inputs={'summary': summary}) coder_agent.step(state) - mock_llm.do_completion.assert_called_once() - _, kwargs = mock_llm.do_completion.call_args + mock_llm.completion.assert_called_once() + _, kwargs = mock_llm.completion.call_args prompt = kwargs['messages'][0]['content'] assert task in prompt assert "Here's a summary of the codebase, as it relates to this task" in prompt @@ -64,9 +62,7 @@ def test_coder_agent_without_summary(): """ mock_llm = MagicMock() content = json.dumps({'action': 'finish', 'args': {}}) - mock_llm.do_completion.return_value = { - 'choices': [{'message': {'content': content}}] - } + mock_llm.completion.return_value = {'choices': [{'message': {'content': content}}]} coder_agent = Agent.get_cls('CoderAgent')(llm=mock_llm) assert coder_agent is not None @@ -77,8 +73,8 @@ def test_coder_agent_without_summary(): state = State(history=history) coder_agent.step(state) - mock_llm.do_completion.assert_called_once() - _, kwargs = mock_llm.do_completion.call_args + mock_llm.completion.assert_called_once() + _, kwargs = mock_llm.completion.call_args prompt = kwargs['messages'][0]['content'] assert task in prompt assert "Here's a summary of the codebase, as it relates to this task" not in prompt