feat(llm): convert function call request for non-funcall OSS model (#4711)

Co-authored-by: Calvin Smith <email@cjsmith.io>
This commit is contained in:
Xingyao Wang
2024-11-14 10:40:09 -06:00
committed by GitHub
parent 52a428d74a
commit 07f0d1ccb3
40 changed files with 1752 additions and 1164 deletions

View File

@@ -87,9 +87,7 @@ class Q20Game:
# others
bingo, anwser_reply = self.judge_winner(response)
if bingo:
return (
'You are bingo! quit now, run: <execute_bash> exit </execute_bash>.\n'
)
return 'You are bingo! Use the "finish" tool to finish the interaction.\n'
if self.curr_turn == self.num_turns - 2:
anwser_reply += " You must guess now, what's it?"
return anwser_reply

View File

@@ -40,7 +40,7 @@ AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {
}
AGENT_CLS_TO_INST_SUFFIX = {
'CodeActAgent': 'When you think you have fixed the issue through code changes, please run the following command: <execute_bash> exit </execute_bash>.\n'
'CodeActAgent': 'When you think you have fixed the issue through code changes, please finish the interaction using the "finish" tool.\n'
}
FILE_EXT_MAP = {

File diff suppressed because one or more lines are too long

View File

@@ -40,7 +40,7 @@ from openhands.utils.async_utils import call_async_from_sync
def codeact_user_response(state: State) -> str:
msg = (
'Please continue working on the task on whatever approach you think is suitable.\n'
'If you think you have completed the SQL, please run the following command: <execute_bash> exit </execute_bash>.\n'
'If you think you have completed the SQL, please finish the interaction using the "finish" tool.\n'
'IMPORTANT: YOU SHOULD NEVER ASK FOR HUMAN HELP OR USE THE INTERNET TO SOLVE THIS TASK.\n'
)
if state.history:
@@ -54,7 +54,7 @@ def codeact_user_response(state: State) -> str:
# let the agent know that it can give up when it has tried 3 times
return (
msg
+ 'If you want to give up, run: <execute_bash> exit </execute_bash>.\n'
+ 'If you want to give up, use the "finish" tool to finish the interaction.\n'
)
return msg
@@ -64,7 +64,7 @@ AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {
}
AGENT_CLS_TO_INST_SUFFIX = {
'CodeActAgent': 'When you think you have fixed the issue through code changes, please run the following command: <execute_bash> exit </execute_bash>.\n'
'CodeActAgent': 'When you think you have fixed the issue through code changes, please finish the interaction using the "finish" tool.\n'
}

View File

@@ -55,7 +55,7 @@ AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {
}
AGENT_CLS_TO_INST_SUFFIX = {
'CodeActAgent': 'When you think you have fixed the issue through code changes, please run the following command: <execute_bash> exit </execute_bash>.\n'
'CodeActAgent': 'When you think you have fixed the issue through code changes, please finish the interaction using the "finish" tool.\n'
}

View File

@@ -33,7 +33,7 @@ AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {
}
AGENT_CLS_TO_INST_SUFFIX = {
'CodeActAgent': 'When you think you have completed the request, please run the following command: <execute_bash> exit </execute_bash>.\n'
'CodeActAgent': 'When you think you have completed the request, please finish the interaction using the "finish" tool.\n'
}

View File

@@ -87,11 +87,10 @@ def gpqa_codeact_user_response(
msg = (
'Please continue working on the task on whatever approach you think is suitable.\n'
'Feel free to use all tools for calculations and solving the problem, and web-search for finding relevant facts during the process if needed\n'
'If you have finished reporting the answer in the expected format, (and only once that is done), please run the following command to submit: <execute_bash> exit </execute_bash>.\n'
'If you have finished reporting the answer in the expected format, (and only once that is done), please use the "finish" tool to finish the interaction.\n'
'Again you are being told a million times to first report the answer in the requested format (see again below for reference) before exiting. DO NOT EXIT WITHOUT REPORTING THE ANSWER FIRST.\n'
'That is, when you have decided on the answer report in the following format:\n'
f'{ACTION_FORMAT}\n'
'<execute_bash> exit </execute_bash>\n'
'IMPORTANT: YOU SHOULD NEVER ASK FOR HUMAN HELP TO SOLVE THIS TASK.\n'
)
return msg
@@ -100,7 +99,7 @@ def gpqa_codeact_user_response(
AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {'CodeActAgent': gpqa_codeact_user_response}
AGENT_CLS_TO_INST_SUFFIX = {
'CodeActAgent': '\n\n SUPER IMPORTANT: When you think you have solved the question, first report it back to the user in the requested format. Only once that is done, in the next turn, please run the following command: <execute_bash> exit </execute_bash>.\n'
'CodeActAgent': '\n\n SUPER IMPORTANT: When you think you have solved the question, first report it back to the user in the requested format. Only once that is done, in the next turn, please finish the interaction using the "finish" tool.\n'
}
@@ -205,12 +204,11 @@ Additional Instructions:
- Do not try to solve the question in a single step. Break it down into smaller steps.
- You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.
- SUPER IMPORTANT: When you have reported the answer to the user in the requested format, (and only once that is done) in the next turn, please run the following command: <execute_bash> exit </execute_bash>.
- SUPER IMPORTANT: When you have reported the answer to the user in the requested format, (and only once that is done) in the next turn, please finish the interaction using the "finish" tool.
- Again you are being told a million times to first report the answer in the requested format (see again below for reference) before exiting. DO NOT EXIT WITHOUT REPORTING THE ANSWER FIRST.
That is, when you have decided on the answer report in the following format:
{ACTION_FORMAT}
<execute_bash> exit </execute_bash>
Again do not quit without reporting the answer first.
Ok now its time to start solving the question. Good luck!

View File

@@ -23,7 +23,7 @@ For each problem, OpenHands is given a set number of iterations to fix the faili
```
{
"task_id": "Python/2",
"instruction": "Please fix the function in Python__2.py such that all test cases pass.\nEnvironment has been set up for you to start working. You may assume all necessary tools are installed.\n\n# Problem Statement\ndef truncate_number(number: float) -> float:\n return number % 1.0 + 1.0\n\n\n\n\n\n\ndef check(truncate_number):\n assert truncate_number(3.5) == 0.5\n assert abs(truncate_number(1.33) - 0.33) < 1e-6\n assert abs(truncate_number(123.456) - 0.456) < 1e-6\n\ncheck(truncate_number)\n\nIMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.\nYou should NOT modify any existing test case files. If needed, you can add new test cases in a NEW file to reproduce the issue.\nYou SHOULD INCLUDE PROPER INDENTATION in your edit commands.\nWhen you think you have fixed the issue through code changes, please run the following command: <execute_bash> exit </execute_bash>.\n",
"instruction": "Please fix the function in Python__2.py such that all test cases pass.\nEnvironment has been set up for you to start working. You may assume all necessary tools are installed.\n\n# Problem Statement\ndef truncate_number(number: float) -> float:\n return number % 1.0 + 1.0\n\n\n\n\n\n\ndef check(truncate_number):\n assert truncate_number(3.5) == 0.5\n assert abs(truncate_number(1.33) - 0.33) < 1e-6\n assert abs(truncate_number(123.456) - 0.456) < 1e-6\n\ncheck(truncate_number)\n\nIMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.\nYou should NOT modify any existing test case files. If needed, you can add new test cases in a NEW file to reproduce the issue.\nYou SHOULD INCLUDE PROPER INDENTATION in your edit commands.\nWhen you think you have fixed the issue through code changes, please finish the interaction using the "finish" tool.\n",
"metadata": {
"agent_class": "CodeActAgent",
"model_name": "gpt-4",
@@ -38,10 +38,10 @@ For each problem, OpenHands is given a set number of iterations to fix the faili
"id": 27,
"timestamp": "2024-05-22T20:57:24.688651",
"source": "user",
"message": "Please fix the function in Python__2.py such that all test cases pass.\nEnvironment has been set up for you to start working. You may assume all necessary tools are installed.\n\n# Problem Statement\ndef truncate_number(number: float) -> float:\n return number % 1.0 + 1.0\n\n\n\n\n\n\ndef check(truncate_number):\n assert truncate_number(3.5) == 0.5\n assert abs(truncate_number(1.33) - 0.33) < 1e-6\n assert abs(truncate_number(123.456) - 0.456) < 1e-6\n\ncheck(truncate_number)\n\nIMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.\nYou should NOT modify any existing test case files. If needed, you can add new test cases in a NEW file to reproduce the issue.\nYou SHOULD INCLUDE PROPER INDENTATION in your edit commands.\nWhen you think you have fixed the issue through code changes, please run the following command: <execute_bash> exit </execute_bash>.\n",
"message": "Please fix the function in Python__2.py such that all test cases pass.\nEnvironment has been set up for you to start working. You may assume all necessary tools are installed.\n\n# Problem Statement\ndef truncate_number(number: float) -> float:\n return number % 1.0 + 1.0\n\n\n\n\n\n\ndef check(truncate_number):\n assert truncate_number(3.5) == 0.5\n assert abs(truncate_number(1.33) - 0.33) < 1e-6\n assert abs(truncate_number(123.456) - 0.456) < 1e-6\n\ncheck(truncate_number)\n\nIMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.\nYou should NOT modify any existing test case files. If needed, you can add new test cases in a NEW file to reproduce the issue.\nYou SHOULD INCLUDE PROPER INDENTATION in your edit commands.\nWhen you think you have fixed the issue through code changes, please finish the interaction using the "finish" tool.\n",
"action": "message",
"args": {
"content": "Please fix the function in Python__2.py such that all test cases pass.\nEnvironment has been set up for you to start working. You may assume all necessary tools are installed.\n\n# Problem Statement\ndef truncate_number(number: float) -> float:\n return number % 1.0 + 1.0\n\n\n\n\n\n\ndef check(truncate_number):\n assert truncate_number(3.5) == 0.5\n assert abs(truncate_number(1.33) - 0.33) < 1e-6\n assert abs(truncate_number(123.456) - 0.456) < 1e-6\n\ncheck(truncate_number)\n\nIMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.\nYou should NOT modify any existing test case files. If needed, you can add new test cases in a NEW file to reproduce the issue.\nYou SHOULD INCLUDE PROPER INDENTATION in your edit commands.\nWhen you think you have fixed the issue through code changes, please run the following command: <execute_bash> exit </execute_bash>.\n",
"content": "Please fix the function in Python__2.py such that all test cases pass.\nEnvironment has been set up for you to start working. You may assume all necessary tools are installed.\n\n# Problem Statement\ndef truncate_number(number: float) -> float:\n return number % 1.0 + 1.0\n\n\n\n\n\n\ndef check(truncate_number):\n assert truncate_number(3.5) == 0.5\n assert abs(truncate_number(1.33) - 0.33) < 1e-6\n assert abs(truncate_number(123.456) - 0.456) < 1e-6\n\ncheck(truncate_number)\n\nIMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.\nYou should NOT modify any existing test case files. If needed, you can add new test cases in a NEW file to reproduce the issue.\nYou SHOULD INCLUDE PROPER INDENTATION in your edit commands.\nWhen you think you have fixed the issue through code changes, please finish the interaction using the "finish" tool.\n",
"wait_for_response": false
}
},

View File

@@ -75,7 +75,7 @@ AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {
}
AGENT_CLS_TO_INST_SUFFIX = {
'CodeActAgent': 'When you think you have fixed the issue through code changes, please run the following command: <execute_bash> exit </execute_bash>.\n'
'CodeActAgent': 'When you think you have fixed the issue through code changes, please finish the interaction using the "finish" tool.\n'
}

View File

@@ -70,7 +70,7 @@ AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {
}
AGENT_CLS_TO_INST_SUFFIX = {
'CodeActAgent': '\nIMPORTANT: When your answer is confirmed by the user to be correct, you can exit using the following command: <execute_bash> exit </execute_bash>.\n'
'CodeActAgent': 'IMPORTANT: When your answer is confirmed by the user to be correct, you can use the "finish" tool to finish the interaction.\n'
}
with open(os.path.join(os.path.dirname(__file__), 'requirements.txt'), 'r') as f:

View File

@@ -55,7 +55,7 @@ Here's an example of the evaluation output for a single task instance:
{
"instance_id": 3,
"repo": "https://github.com/dmlc/dgl",
"instruction": "Please complete the Machine Learning task in the following repository: dgl\n\nThe task is: DGL Implementation of NGCF model\n\nI have a deep desire to embark on a journey brimming with knowledge and expertise. My objective is to train a cutting-edge NGCF Model, known for its unparalleled capabilities, on the illustrious dataset known as gowalla. To ensure swift execution, I kindly request your assistance in crafting the code, making use of the powerful GPU #3 and an embedding size of 32. Can you lend a helping hand to transform this dream into a reality?\n\nYou should create a script named `run.sh` under the specified path in the repo to run the task.\n\nYou can find the task repo at: /workspace/dgl/examples/pytorch/NGCF/NGCF\n\nYou should terminate the subprocess after running the task (e.g., call subprocess.Popen(args).wait()).When you think you have completed the task, please run the following command: <execute_bash> exit </execute_bash>.\n",
"instruction": "Please complete the Machine Learning task in the following repository: dgl\n\nThe task is: DGL Implementation of NGCF model\n\nI have a deep desire to embark on a journey brimming with knowledge and expertise. My objective is to train a cutting-edge NGCF Model, known for its unparalleled capabilities, on the illustrious dataset known as gowalla. To ensure swift execution, I kindly request your assistance in crafting the code, making use of the powerful GPU #3 and an embedding size of 32. Can you lend a helping hand to transform this dream into a reality?\n\nYou should create a script named `run.sh` under the specified path in the repo to run the task.\n\nYou can find the task repo at: /workspace/dgl/examples/pytorch/NGCF/NGCF\n\nYou should terminate the subprocess after running the task (e.g., call subprocess.Popen(args).wait()).When you think you have completed the task, please finish the interaction using the "finish" tool.\n",
"metadata": {
"agent_class": "CodeActAgent",
"model_name": "gpt-4-1106-preview",
@@ -70,10 +70,10 @@ Here's an example of the evaluation output for a single task instance:
"id": 0,
"timestamp": "2024-05-26T17:40:41.060009",
"source": "user",
"message": "Please complete the Machine Learning task in the following repository: dgl\n\nThe task is: DGL Implementation of NGCF model\n\nI have a deep desire to embark on a journey brimming with knowledge and expertise. My objective is to train a cutting-edge NGCF Model, known for its unparalleled capabilities, on the illustrious dataset known as gowalla. To ensure swift execution, I kindly request your assistance in crafting the code, making use of the powerful GPU #3 and an embedding size of 32. Can you lend a helping hand to transform this dream into a reality?\n\nYou should create a script named `run.sh` under the specified path in the repo to run the task.\n\nYou can find the task repo at: /workspace/dgl/examples/pytorch/NGCF/NGCF\n\nYou should terminate the subprocess after running the task (e.g., call subprocess.Popen(args).wait()).When you think you have completed the task, please run the following command: <execute_bash> exit </execute_bash>.\n",
"message": "Please complete the Machine Learning task in the following repository: dgl\n\nThe task is: DGL Implementation of NGCF model\n\nI have a deep desire to embark on a journey brimming with knowledge and expertise. My objective is to train a cutting-edge NGCF Model, known for its unparalleled capabilities, on the illustrious dataset known as gowalla. To ensure swift execution, I kindly request your assistance in crafting the code, making use of the powerful GPU #3 and an embedding size of 32. Can you lend a helping hand to transform this dream into a reality?\n\nYou should create a script named `run.sh` under the specified path in the repo to run the task.\n\nYou can find the task repo at: /workspace/dgl/examples/pytorch/NGCF/NGCF\n\nYou should terminate the subprocess after running the task (e.g., call subprocess.Popen(args).wait()).When you think you have completed the task, please finish the interaction using the "finish" tool.\n",
"action": "message",
"args": {
"content": "Please complete the Machine Learning task in the following repository: dgl\n\nThe task is: DGL Implementation of NGCF model\n\nI have a deep desire to embark on a journey brimming with knowledge and expertise. My objective is to train a cutting-edge NGCF Model, known for its unparalleled capabilities, on the illustrious dataset known as gowalla. To ensure swift execution, I kindly request your assistance in crafting the code, making use of the powerful GPU #3 and an embedding size of 32. Can you lend a helping hand to transform this dream into a reality?\n\nYou should create a script named `run.sh` under the specified path in the repo to run the task.\n\nYou can find the task repo at: /workspace/dgl/examples/pytorch/NGCF/NGCF\n\nYou should terminate the subprocess after running the task (e.g., call subprocess.Popen(args).wait()).When you think you have completed the task, please run the following command: <execute_bash> exit </execute_bash>.\n",
"content": "Please complete the Machine Learning task in the following repository: dgl\n\nThe task is: DGL Implementation of NGCF model\n\nI have a deep desire to embark on a journey brimming with knowledge and expertise. My objective is to train a cutting-edge NGCF Model, known for its unparalleled capabilities, on the illustrious dataset known as gowalla. To ensure swift execution, I kindly request your assistance in crafting the code, making use of the powerful GPU #3 and an embedding size of 32. Can you lend a helping hand to transform this dream into a reality?\n\nYou should create a script named `run.sh` under the specified path in the repo to run the task.\n\nYou can find the task repo at: /workspace/dgl/examples/pytorch/NGCF/NGCF\n\nYou should terminate the subprocess after running the task (e.g., call subprocess.Popen(args).wait()).When you think you have completed the task, please finish the interaction using the "finish" tool.\n",
"wait_for_response": false
}
},

View File

@@ -52,7 +52,7 @@ AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {
}
AGENT_CLS_TO_INST_SUFFIX = {
'CodeActAgent': 'When you think you have completed the task, please run the following command: <execute_bash> exit </execute_bash>.\n'
'CodeActAgent': 'When you think you have completed the task, please finish the interaction using the "finish" tool.\n'
}
ID2CONDA = {

View File

@@ -84,7 +84,7 @@ def get_config(instance: pd.Series) -> AppConfig:
timeout=1800,
api_key=os.environ.get('ALLHANDS_API_KEY', None),
remote_runtime_api_url=os.environ.get('SANDBOX_REMOTE_RUNTIME_API_URL'),
remote_runtime_init_timeout=1800,
remote_runtime_init_timeout=3600,
),
# do not mount workspace
workspace_base=None,

File diff suppressed because one or more lines are too long

View File

@@ -1,6 +1,6 @@
CODEACT_SWE_PROMPT = """Now, you're going to solve this issue on your own. Your terminal session has started and you're in the repository's root directory. You can use any bash commands or the special interface to help you. Edit all the files you need to and run any checks or tests that you want.
Remember, YOU CAN ONLY ENTER ONE COMMAND AT A TIME. You should always wait for feedback after every command.
When you're satisfied with all of the changes you've made, you can run the following command: <execute_bash> exit </execute_bash>.
When you're satisfied with all of the changes you've made, you can use the "finish" tool to finish the interaction.
Note however that you cannot use any interactive session commands (e.g. vim) in this environment, but you can write scripts and run them. E.g. you can write a python script and then run it with `python <script_name>.py`.
NOTE ABOUT THE EDIT COMMAND: Indentation really matters! When editing a file, make sure to insert appropriate indentation before each line!

View File

@@ -145,8 +145,8 @@ def get_config(
platform='linux/amd64',
api_key=os.environ.get('ALLHANDS_API_KEY', None),
remote_runtime_api_url=os.environ.get('SANDBOX_REMOTE_RUNTIME_API_URL'),
keep_runtime_alive=False,
remote_runtime_init_timeout=1800,
keep_remote_runtime_alive=False,
remote_runtime_init_timeout=3600,
),
# do not mount workspace
workspace_base=None,

View File

@@ -34,7 +34,7 @@ AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {
}
AGENT_CLS_TO_INST_SUFFIX = {
'CodeActAgent': 'When you think you have completed the request, please run the following command: <execute_bash> exit </execute_bash>.\n'
'CodeActAgent': 'When you think you have completed the request, please finish the interaction using the "finish" tool.\n'
}

View File

@@ -137,7 +137,7 @@ def codeact_user_response(
# let the agent know that it can give up when it has tried 3 times
return (
msg
+ 'If you want to give up, run: <execute_bash> exit </execute_bash>.\n'
+ 'If you want to give up, use the "finish" tool to finish the interaction.\n'
)
return msg

View File

@@ -1,304 +0,0 @@
import re
from openhands.controller.action_parser import (
ActionParser,
ResponseParser,
)
from openhands.core.exceptions import LLMMalformedActionError
from openhands.core.logger import openhands_logger as logger
from openhands.events.action import (
Action,
AgentDelegateAction,
AgentFinishAction,
CmdRunAction,
FileEditAction,
IPythonRunCellAction,
MessageAction,
)
class CodeActResponseParser(ResponseParser):
"""Parser action:
- CmdRunAction(command) - bash command to run
- FileEditAction(path, content) - edit a file
- IPythonRunCellAction(code) - IPython code to run
- AgentDelegateAction(agent, inputs) - delegate action for (sub)task
- MessageAction(content) - Message action to run (e.g. ask for clarification)
- AgentFinishAction() - end the interaction
"""
def __init__(self):
# Need pay attention to the item order in self.action_parsers
super().__init__()
self.action_parsers = [
CodeActActionParserFinish(),
CodeActActionParserFileEdit(),
CodeActActionParserCmdRun(),
CodeActActionParserIPythonRunCell(),
CodeActActionParserAgentDelegate(),
]
self.default_parser = CodeActActionParserMessage()
def parse(self, response) -> Action:
action_str = self.parse_response(response)
return self.parse_action(action_str)
def parse_response(self, response) -> str:
action = response.choices[0].message.content
if action is None:
return ''
for lang in ['bash', 'ipython', 'browse']:
# special handling for DeepSeek: it has stop-word bug and returns </execute_ipython instead of </execute_ipython>
if f'</execute_{lang}' in action and f'</execute_{lang}>' not in action:
action = action.replace(f'</execute_{lang}', f'</execute_{lang}>')
if f'<execute_{lang}>' in action and f'</execute_{lang}>' not in action:
action += f'</execute_{lang}>'
# special handling for DeepSeek: it has stop-word bug and returns </execute_ipython instead of </execute_ipython>
if '</file_edit' in action and '</file_edit>' not in action:
action = action.replace('</file_edit', '</file_edit>')
if '<file_edit' in action and '</file_edit>' not in action:
action += '</file_edit>'
return action
def parse_action(self, action_str: str) -> Action:
for action_parser in self.action_parsers:
if action_parser.check_condition(action_str):
return action_parser.parse(action_str)
return self.default_parser.parse(action_str)
def action_to_str(self, action: Action) -> str:
if isinstance(action, CmdRunAction):
return (
f'{action.thought}\n<execute_bash>\n{action.command}\n</execute_bash>'
)
elif isinstance(action, IPythonRunCellAction):
return f'{action.thought}\n<execute_ipython>\n{action.code}\n</execute_ipython>'
elif isinstance(action, AgentDelegateAction):
return f'{action.thought}\n<execute_browse>\n{action.inputs["task"]}\n</execute_browse>'
elif isinstance(action, FileEditAction):
return f'{action.thought}\n<file_edit path={action.path}>\n{action.content}\n</file_edit>'
elif isinstance(action, MessageAction):
return action.content
elif isinstance(action, AgentFinishAction) and action.source == 'agent':
return action.thought
return ''
class CodeActActionParserFinish(ActionParser):
"""Parser action:
- AgentFinishAction() - end the interaction
"""
def __init__(
self,
):
self.finish_command = None
def check_condition(self, action_str: str) -> bool:
self.finish_command = re.search(r'<finish>.*</finish>', action_str, re.DOTALL)
return self.finish_command is not None
def parse(self, action_str: str) -> Action:
assert (
self.finish_command is not None
), 'self.finish_command should not be None when parse is called'
thought = action_str.replace(self.finish_command.group(0), '').strip()
return AgentFinishAction(thought=thought)
class CodeActActionParserCmdRun(ActionParser):
"""Parser action:
- CmdRunAction(command) - bash command to run
- AgentFinishAction() - end the interaction
"""
def __init__(
self,
):
self.bash_command = None
def check_condition(self, action_str: str) -> bool:
self.bash_command = re.search(
r'<execute_bash>(.*?)</execute_bash>', action_str, re.DOTALL
)
return self.bash_command is not None
def parse(self, action_str: str) -> Action:
assert (
self.bash_command is not None
), 'self.bash_command should not be None when parse is called'
thought = action_str.replace(self.bash_command.group(0), '').strip()
# a command was found
command_group = self.bash_command.group(1).strip()
if command_group.strip() == 'exit':
return AgentFinishAction(thought=thought)
return CmdRunAction(command=command_group, thought=thought)
class CodeActActionParserIPythonRunCell(ActionParser):
"""Parser action:
- IPythonRunCellAction(code) - IPython code to run
"""
def __init__(
self,
):
self.python_code = None
self.jupyter_kernel_init_code: str = 'from agentskills import *'
def check_condition(self, action_str: str) -> bool:
self.python_code = re.search(
r'<execute_ipython>(.*?)</execute_ipython>', action_str, re.DOTALL
)
return self.python_code is not None
def parse(self, action_str: str) -> Action:
assert (
self.python_code is not None
), 'self.python_code should not be None when parse is called'
code_group = self.python_code.group(1).strip()
thought = action_str.replace(self.python_code.group(0), '').strip()
return IPythonRunCellAction(
code=code_group,
thought=thought,
kernel_init_code=self.jupyter_kernel_init_code,
)
class CodeActActionParserAgentDelegate(ActionParser):
"""Parser action:
- AgentDelegateAction(agent, inputs) - delegate action for (sub)task
"""
def __init__(
self,
):
self.agent_delegate = None
def check_condition(self, action_str: str) -> bool:
self.agent_delegate = re.search(
r'<execute_browse>(.*)</execute_browse>', action_str, re.DOTALL
)
return self.agent_delegate is not None
def parse(self, action_str: str) -> Action:
assert (
self.agent_delegate is not None
), 'self.agent_delegate should not be None when parse is called'
thought = action_str.replace(self.agent_delegate.group(0), '').strip()
browse_actions = self.agent_delegate.group(1).strip()
thought = (
f'{thought}\nI should start with: {browse_actions}'
if thought
else f'I should start with: {browse_actions}'
)
return AgentDelegateAction(
agent='BrowsingAgent', thought=thought, inputs={'task': browse_actions}
)
class CodeActActionParserMessage(ActionParser):
"""Parser action:
- MessageAction(content) - Message action to run (e.g. ask for clarification)
"""
def __init__(
self,
):
pass
def check_condition(self, action_str: str) -> bool:
# We assume the LLM is GOOD enough that when it returns pure natural language
# it wants to talk to the user
return True
def parse(self, action_str: str) -> Action:
return MessageAction(content=action_str, wait_for_response=True)
class CodeActActionParserFileEdit(ActionParser):
"""Parser action:
- FileEditAction(path, content) - edit a file
"""
def __init__(self):
self.file_edit_match: re.Match | None = None
def check_condition(self, action_str: str) -> bool:
if '<file_edit' not in action_str:
return False
# Updated regex to make start and end optional
self.file_edit_match = re.search(
r'<file_edit\s+path=(["\']?)(.*?)\1(?:\s+start=(["\']?)(.*?)\3)?(?:\s+end=(["\']?)(.*?)\5)?\s*>(.*?)</file_edit>',
action_str,
re.DOTALL,
)
if self.file_edit_match is None:
logger.error(
f'FileEditAction detected but the format is incorrect. Unable to match for <file_edit> in:\n{"-" * 80}\n{action_str}\n{"-" * 80}'
)
raise LLMMalformedActionError(
'FileEditAction detected but the format is incorrect. Usage:\n'
'<file_edit path="[path]" start=[start_line] end=[end_line]>\n'
'[content_to_edit]\n'
'</file_edit>\n'
)
path = self.file_edit_match.group(2)
start = self.file_edit_match.group(4)
end = self.file_edit_match.group(6)
if not path:
raise LLMMalformedActionError(
'FileEditAction detected but no `path` specified. You should specify the path of the file to edit.'
)
if start:
try:
int(start)
except ValueError:
raise LLMMalformedActionError(
f'FileEditAction detected but `start` is not a valid integer: {start}'
)
if end:
try:
int(end)
except ValueError:
raise LLMMalformedActionError(
f'FileEditAction detected but `end` is not a valid integer: {end}'
)
return True
def parse(self, action_str: str) -> Action:
assert (
self.file_edit_match is not None
), 'self.file_edit_match should not be None when parse is called'
file_path = self.file_edit_match.group(2).strip()
start_line = (
int(self.file_edit_match.group(4))
if self.file_edit_match.group(4)
else None
)
end_line = (
int(self.file_edit_match.group(6))
if self.file_edit_match.group(6)
else None
)
content = self.file_edit_match.group(7)
thought = action_str.replace(self.file_edit_match.group(0), '').strip()
action = FileEditAction(path=file_path, content=content, thought=thought)
if start_line is not None:
action.start = start_line
if end_line is not None:
action.end = end_line
return action

View File

@@ -1,12 +1,10 @@
import json
import os
from collections import deque
from itertools import islice
from litellm import ModelResponse
import openhands.agenthub.codeact_agent.function_calling as codeact_function_calling
from openhands.agenthub.codeact_agent.action_parser import CodeActResponseParser
from openhands.controller.agent import Agent
from openhands.controller.state.state import State
from openhands.core.config import AgentConfig
@@ -70,7 +68,6 @@ class CodeActAgent(Agent):
AgentSkillsRequirement(),
JupyterRequirement(),
]
obs_prefix = 'OBSERVATION:\n'
def __init__(
self,
@@ -85,36 +82,30 @@ class CodeActAgent(Agent):
super().__init__(llm, config)
self.reset()
self.function_calling_active = self.config.function_calling
if self.function_calling_active and not self.llm.is_function_calling_active():
logger.warning(
f'Function calling not supported for model {self.llm.config.model}. '
'Disabling function calling.'
self.mock_function_calling = False
if not self.llm.is_function_calling_active():
logger.info(
f'Function calling not enabled for model {self.llm.config.model}. '
'Mocking function calling via prompting.'
)
self.function_calling_active = False
self.mock_function_calling = True
if self.function_calling_active:
self.tools = codeact_function_calling.get_tools(
codeact_enable_browsing=self.config.codeact_enable_browsing,
codeact_enable_jupyter=self.config.codeact_enable_jupyter,
codeact_enable_llm_editor=self.config.codeact_enable_llm_editor,
)
logger.debug(
f'TOOLS loaded for CodeActAgent: {json.dumps(self.tools, indent=2)}'
)
self.prompt_manager = PromptManager(
microagent_dir=os.path.join(os.path.dirname(__file__), 'micro') if self.config.use_microagents else None,
prompt_dir=os.path.join(os.path.dirname(__file__), 'prompts', 'tools'),
disabled_microagents=self.config.disabled_microagents,
)
else:
self.action_parser = CodeActResponseParser()
self.prompt_manager = PromptManager(
microagent_dir=os.path.join(os.path.dirname(__file__), 'micro') if self.config.use_microagents else None,
prompt_dir=os.path.join(os.path.dirname(__file__), 'prompts', 'default'),
agent_skills_docs=AgentSkillsRequirement.documentation,
disabled_microagents=self.config.disabled_microagents,
)
# Function calling mode
self.tools = codeact_function_calling.get_tools(
codeact_enable_browsing=self.config.codeact_enable_browsing,
codeact_enable_jupyter=self.config.codeact_enable_jupyter,
codeact_enable_llm_editor=self.config.codeact_enable_llm_editor,
)
logger.debug(
f'TOOLS loaded for CodeActAgent: {json.dumps(self.tools, indent=2)}'
)
self.prompt_manager = PromptManager(
microagent_dir=os.path.join(os.path.dirname(__file__), 'micro')
if self.config.use_microagents
else None,
prompt_dir=os.path.join(os.path.dirname(__file__), 'prompts'),
disabled_microagents=self.config.disabled_microagents,
)
self.pending_actions: deque[Action] = deque()
@@ -163,38 +154,25 @@ class CodeActAgent(Agent):
BrowseInteractiveAction,
),
) or (isinstance(action, AgentFinishAction) and action.source == 'agent'):
if self.function_calling_active:
tool_metadata = action.tool_call_metadata
assert tool_metadata is not None, (
'Tool call metadata should NOT be None when function calling is enabled. Action: '
+ str(action)
)
tool_metadata = action.tool_call_metadata
assert tool_metadata is not None, (
'Tool call metadata should NOT be None when function calling is enabled. Action: '
+ str(action)
)
llm_response: ModelResponse = tool_metadata.model_response
assistant_msg = llm_response.choices[0].message
# Add the LLM message (assistant) that initiated the tool calls
# (overwrites any previous message with the same response_id)
pending_tool_call_action_messages[llm_response.id] = Message(
role=assistant_msg.role,
# tool call content SHOULD BE a string
content=[TextContent(text=assistant_msg.content or '')]
if assistant_msg.content is not None
else [],
tool_calls=assistant_msg.tool_calls,
)
return []
else:
assert not isinstance(action, BrowseInteractiveAction), (
'BrowseInteractiveAction is not supported in non-function calling mode. Action: '
+ str(action)
)
content = [TextContent(text=self.action_parser.action_to_str(action))]
return [
Message(
role='user' if action.source == 'user' else 'assistant',
content=content,
)
]
llm_response: ModelResponse = tool_metadata.model_response
assistant_msg = llm_response.choices[0].message
# Add the LLM message (assistant) that initiated the tool calls
# (overwrites any previous message with the same response_id)
pending_tool_call_action_messages[llm_response.id] = Message(
role=assistant_msg.role,
# tool call content SHOULD BE a string
content=[TextContent(text=assistant_msg.content or '')]
if assistant_msg.content is not None
else [],
tool_calls=assistant_msg.tool_calls,
)
return []
elif isinstance(action, MessageAction):
role = 'user' if action.source == 'user' else 'assistant'
content = [TextContent(text=action.content or '')]
@@ -240,15 +218,14 @@ class CodeActAgent(Agent):
"""
message: Message
max_message_chars = self.llm.config.max_message_chars
obs_prefix = 'OBSERVATION:\n'
if isinstance(obs, CmdOutputObservation):
text = obs_prefix + truncate_content(
text = truncate_content(
obs.content + obs.interpreter_details, max_message_chars
)
text += f'\n[Command finished with exit code {obs.exit_code}]'
message = Message(role='user', content=[TextContent(text=text)])
elif isinstance(obs, IPythonRunCellObservation):
text = obs_prefix + obs.content
text = obs.content
# replace base64 images with a placeholder
splitted = text.split('\n')
for i, line in enumerate(splitted):
@@ -260,22 +237,22 @@ class CodeActAgent(Agent):
text = truncate_content(text, max_message_chars)
message = Message(role='user', content=[TextContent(text=text)])
elif isinstance(obs, FileEditObservation):
text = obs_prefix + truncate_content(str(obs), max_message_chars)
text = truncate_content(str(obs), max_message_chars)
message = Message(role='user', content=[TextContent(text=text)])
elif isinstance(obs, BrowserOutputObservation):
text = obs.get_agent_obs_text()
message = Message(
role='user',
content=[TextContent(text=obs_prefix + text)],
content=[TextContent(text=text)],
)
elif isinstance(obs, AgentDelegateObservation):
text = obs_prefix + truncate_content(
text = truncate_content(
obs.outputs['content'] if 'content' in obs.outputs else '',
max_message_chars,
)
message = Message(role='user', content=[TextContent(text=text)])
elif isinstance(obs, ErrorObservation):
text = obs_prefix + truncate_content(obs.content, max_message_chars)
text = truncate_content(obs.content, max_message_chars)
text += '\n[Error occurred in processing last action]'
message = Message(role='user', content=[TextContent(text=text)])
elif isinstance(obs, UserRejectObservation):
@@ -287,19 +264,18 @@ class CodeActAgent(Agent):
# when the LLM tries to return the next message
raise ValueError(f'Unknown observation type: {type(obs)}')
if self.function_calling_active:
# Update the message as tool response properly
if (tool_call_metadata := obs.tool_call_metadata) is not None:
tool_call_id_to_message[tool_call_metadata.tool_call_id] = Message(
role='tool',
content=message.content,
tool_call_id=tool_call_metadata.tool_call_id,
name=tool_call_metadata.function_name,
)
# No need to return the observation message
# because it will be added by get_action_message when all the corresponding
# tool calls in the SAME request are processed
return []
# Update the message as tool response properly
if (tool_call_metadata := obs.tool_call_metadata) is not None:
tool_call_id_to_message[tool_call_metadata.tool_call_id] = Message(
role='tool',
content=message.content,
tool_call_id=tool_call_metadata.tool_call_id,
name=tool_call_metadata.function_name,
)
# No need to return the observation message
# because it will be added by get_action_message when all the corresponding
# tool calls in the SAME request are processed
return []
return [message]
@@ -335,25 +311,14 @@ class CodeActAgent(Agent):
params: dict = {
'messages': self.llm.format_messages_for_llm(messages),
}
if self.function_calling_active:
params['tools'] = self.tools
params['parallel_tool_calls'] = False
else:
params['stop'] = [
'</execute_ipython>',
'</execute_bash>',
'</execute_browse>',
'</file_edit>',
]
params['tools'] = self.tools
if self.mock_function_calling:
params['mock_function_calling'] = True
response = self.llm.completion(**params)
if self.function_calling_active:
actions = codeact_function_calling.response_to_actions(response)
for action in actions:
self.pending_actions.append(action)
return self.pending_actions.popleft()
else:
return self.action_parser.parse(response)
actions = codeact_function_calling.response_to_actions(response)
for action in actions:
self.pending_actions.append(action)
return self.pending_actions.popleft()
def _get_messages(self, state: State) -> list[Message]:
"""Constructs the message history for the LLM conversation.
@@ -484,7 +449,4 @@ class CodeActAgent(Agent):
else:
break
if not self.function_calling_active:
self.prompt_manager.add_turns_left_reminder(messages, state)
return messages

View File

@@ -53,9 +53,6 @@ _IPYTHON_DESCRIPTION = """Run a cell of Python code in an IPython environment.
* The assistant should define variables and import packages before using them.
* The variable defined in the IPython environment will not be available outside the IPython environment (e.g., in terminal).
"""
# We are not using agentskills's file_ops for viewing files now because StrReplaceEditorTool already supports viewing files
# """* Apart from the standard Python library, the assistant can also use the following functions (already imported):
# {AgentSkillsRequirement.documentation}"""
IPythonTool = ChatCompletionToolParam(
type='function',

View File

@@ -1,174 +0,0 @@
{% set MINIMAL_SYSTEM_PREFIX %}
A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed answers to the user's questions.
[1] The assistant can use a Python environment with <execute_ipython>, e.g.:
<execute_ipython>
print("Hello World!")
</execute_ipython>
[2] The assistant can execute bash commands wrapped with <execute_bash>, e.g. <execute_bash> ls </execute_bash>.
If a bash command returns exit code `-1`, this means the process is not yet finished.
The assistant must then send a second <execute_bash>. The second <execute_bash> can be empty
(which will retrieve any additional logs), or it can contain text to be sent to STDIN of the running process,
or it can contain the text `ctrl+c` to interrupt the process.
For commands that may run indefinitely, the output should be redirected to a file and the command run
in the background, e.g. <execute_bash> python3 app.py > server.log 2>&1 & </execute_bash>
If a command execution result says "Command timed out. Sending SIGINT to the process",
the assistant should retry running the command in the background.
[3] The assistant can edit files using <file_edit> by setting the file path and providing a draft of the new file content. The draft file content does not need to be exactly the same as the existing file content; the assistant may skip some lines and only include the parts that need to be changed.
IMPORTANT: When editing large file (e.g., > 300 lines), the assistant MUST SPECIFY the range of lines to be edited by setting `start` and `end` (1-indexed, both inclusive). For example, `<file_edit path="/path/to/file.txt" start=1 end=-1>` means the assistant will edit the whole file (from line 1 to the end of the file). `start=1` and `end=-1` are the default values, so the assistant can omit them if they are the same as the default values.
BEFORE you start editing, you MUST view the ENTIRE body of the part you want to edit and get the correct begin and end line numbers.
When editing files, the assistant should include comments indicating where the code will not change. For example, use comments like `# no changes before` or `# no changes here` to clearly mark sections of the code that remain unchanged. This helps to provide context and ensure clarity in the edits being made.
Possible cases:
- File too long: When the file to be edited is too long, the assistant should set `start` and `end` (1-indexed, both inclusive) to specify the range of lines to be edited. For example, `<file_edit path="/path/to/file.txt" start=100 end=200>` means the assistant will only edit lines 100 to 200 of `/path/to/file.txt`.
- Append to file: If the assistant wants to append to a file, it should set both `start` and `end` to `-1`.
- File does not exist: If `<file_edit>` is pointing to a file that does not exist, a new file with the exact content will be created.
Important: because line numbers are useful, the assistant should always use the provided functions to search (e.g., `search_dir`) or view the file content (e.g., `open_file`) along with the line numbers. DO NOT use other methods (e.g., `cat`) to view the file content.
**Example 1 (general edit for short files)**
For example, given an existing file `/path/to/file.py` that looks like this:
(this is the end of the file)
1|class MyClass:
2| def __init__(self):
3| self.x = 1
4| self.y = 2
5| self.z = 3
6|
7|print(MyClass().z)
8|print(MyClass().x)
(this is the end of the file)
The assistant wants to edit the file to look like this:
(this is the end of the file)
1|class MyClass:
2| def __init__(self):
3| self.x = 1
4| self.y = 2
5|
6|print(MyClass().y)
(this is the end of the file)
The assistant may produce an edit action like this:
<file_edit path="/path/to/file.txt" start=1 end=-1>
class MyClass:
def __init__(self):
# no changes before
self.y = 2
# self.z is removed
# MyClass().z is removed
print(MyClass().y)
</file_edit>
**Example 2 (append to file for short files)**
For example, given an existing file `/path/to/file.py` that looks like this:
(this is the end of the file)
1|class MyClass:
2| def __init__(self):
3| self.x = 1
4| self.y = 2
5| self.z = 3
6|
7|print(MyClass().z)
8|print(MyClass().x)
(this is the end of the file)
To append the following lines to the file:
```python
print(MyClass().y)
```
The assistant may produce an edit action like this:
<file_edit path="/path/to/file.txt" start=-1 end=-1>
print(MyClass().y)
</file_edit>
**Example 3 (edit for long files)**
Given an existing file `/path/to/file.py` that looks like this:
(1000 more lines above)
1001|class MyClass:
1002| def __init__(self):
1003| self.x = 1
1004| self.y = 2
1005| self.z = 3
1006|
1007|print(MyClass().z)
1008|print(MyClass().x)
(2000 more lines below)
The assistant wants to edit the file to look like this:
(1000 more lines above)
1001|class MyClass:
1002| def __init__(self):
1003| self.x = 1
1004| self.y = 2
1005|
1006|print(MyClass().y)
(2000 more lines below)
The assistant may produce an edit action like this:
<file_edit path="/path/to/file.txt" start=1001 end=1008>
class MyClass:
def __init__(self):
# no changes before
self.y = 2
# self.z is removed
# MyClass().z is removed
print(MyClass().y)
</file_edit>
{% endset %}
{% set BROWSING_PREFIX %}
The assistant can browse the Internet with <execute_browse> and </execute_browse>.
For example, <execute_browse> Tell me the usa's president using google search </execute_browse>.
Or <execute_browse> Tell me what is in http://example.com </execute_browse>.
{% endset %}
{% set PIP_INSTALL_PREFIX %}
The assistant can install Python packages using the %pip magic command in an IPython environment by using the following syntax: <execute_ipython> %pip install [package needed] </execute_ipython> and should always import packages and define variables before starting to use them.
{% endset %}
{% set SYSTEM_PREFIX = MINIMAL_SYSTEM_PREFIX + BROWSING_PREFIX + PIP_INSTALL_PREFIX %}
{% set COMMAND_DOCS %}
Apart from the standard Python library, the assistant can also use the following functions (already imported) in <execute_ipython> environment:
{{ agent_skills_docs }}
IMPORTANT:
- `open_file` only returns the first 100 lines of the file by default! The assistant MUST use `scroll_down` repeatedly to read the full file BEFORE making edits!
- Indentation is important and code that is not indented correctly will fail and require fixing before it can be run.
- Any code issued should be less than 50 lines to avoid context being cut off!
{% endset %}
{% set SYSTEM_SUFFIX %}
Responses should be concise.
The assistant should attempt fewer things at a time instead of putting too many commands OR too much code in one "execute" block.
Include ONLY ONE <execute_ipython>, <execute_bash>, or <execute_browse> per response, unless the assistant is finished with the task or needs more input or action from the user in order to proceed.
If the assistant is finished with the task you MUST include <finish></finish> in your response.
IMPORTANT: Execute code using <execute_ipython>, <execute_bash>, or <execute_browse> whenever possible.
The assistant should utilize full file paths and the `pwd` command to prevent path-related errors.
The assistant MUST NOT apologize to the user or thank the user after running commands or editing files. It should only address the user in response to an explicit message from the user, or to ask for more information.
The assistant MUST NOT push any changes to GitHub unless explicitly requested to do so.
The assistant MUST NOT include comments in the code unless they are necessary to describe non-obvious behavior, or
to describe precisely how to apply proposed edits. Comments about applying edits should always have blank lines above
and below.
{% endset %}
{# Combine all parts without newlines between them #}
{{ SYSTEM_PREFIX -}}
{{- COMMAND_DOCS -}}
{{- SYSTEM_SUFFIX }}

View File

@@ -1,219 +0,0 @@
{% set DEFAULT_EXAMPLE %}
--- START OF EXAMPLE ---
USER: Create a list of numbers from 1 to 10, and display them in a web page at port 5000.
ASSISTANT:
Sure! Let me first check the current directory:
<execute_bash>
ls
</execute_bash>
USER:
OBSERVATION:
openhands@runtime:~/workspace$
ASSISTANT:
There is no `app.py` file in the current directory. Let me create a Python file `app.py`:
<file_edit path="/workspace/app.py" start=-1 end=-1>
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
numbers = list(range(1, 11))
return str(numbers)
if __name__ == '__main__':
app.run(port=5000)
</file_edit>
USER:
OBSERVATION:
[New file /workspace/app.py is created.]
(begin of changes)
--- /workspace/app.py
+++ /workspace/app.py
@@ -1 +1,10 @@
+from flask import Flask
+app = Flask(__name__)
+@app.route('/')
+def index():
+ numbers = list(range(1, 11))
+ return str(numbers)
+
+if __name__ == '__main__':
+ app.run(port=5000)
(end of changes)
ASSISTANT:
I have created a Python file `app.py` that will display a list of numbers from 1 to 10 when you run it. Let me run the Python file for you:
<execute_bash>
python3 app.py > server.log 2>&1 &
</execute_bash>
USER:
OBSERVATION:
[1] 121[1]+ Exit 1 python3 app.py > server.log 2>&1
ASSISTANT: Looks like the server is running with PID 121 then crashed. Let me check the server log:
<execute_bash>
cat server.log
</execute_bash>
USER:
OBSERVATION:
Traceback (most recent call last):
File "/workspace/app.py", line 2, in <module>
from flask import Flask
ModuleNotFoundError: No module named 'flask'
ASSISTANT:
It seems that Flask is not installed. Let me install Flask for you:
<execute_bash>
pip install flask
</execute_bash>
USER:
OBSERVATION:
Defaulting to user installation because normal site-packages is not writeable
Collecting flask
Using cached flask-3.0.3-py3-none-any.whl (101 kB)
Collecting blinker>=1.6.2
Using cached blinker-1.7.0-py3-none-any.whl (13 kB)
Collecting Werkzeug>=3.0.0
Using cached werkzeug-3.0.2-py3-none-any.whl (226 kB)
Collecting click>=8.1.3
Using cached click-8.1.7-py3-none-any.whl (97 kB)
Collecting itsdangerous>=2.1.2
Using cached itsdangerous-2.2.0-py3-none-any.whl (16 kB)
Requirement already satisfied: Jinja2>=3.1.2 in /home/openhands/.local/lib/python3.10/site-packages (from flask) (3.1.3)
Requirement already satisfied: MarkupSafe>=2.0 in /home/openhands/.local/lib/python3.10/site-packages (from Jinja2>=3.1.2->flask) (2.1.5)
Installing collected packages: Werkzeug, itsdangerous, click, blinker, flask
Successfully installed Werkzeug-3.0.2 blinker-1.7.0 click-8.1.7 flask-3.0.3 itsdangerous-2.2.0
ASSISTANT:
Now that Flask is installed, let me run the Python file again:
<execute_bash>
python3 app.py > server.log 2>&1 &
</execute_bash>
USER:
OBSERVATION:
[1] 124
ASSISTANT:
Let me check the server log again:
<execute_bash>
cat server.log
</execute_bash>
USER:
OBSERVATION:
* Serving Flask app 'app'
* Debug mode: off
WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead.
* Running on http://127.0.0.1:5000
Press CTRL+C to quit
ASSISTANT:
The server is running on port 5000 with PID 124. You can access the list of numbers by visiting http://127.0.0.1:5000. If you have any further questions, feel free to ask!
USER: Now browse the newly started server's homepage and show me the content.
ASSISTANT:
Sure! Let me browse the server's homepage at http://127.0.0.1:5000:
<execute_browse>
Get the content on "http://127.0.0.1:5000"
</execute_browse>
USER:
OBSERVATION:
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
ASSISTANT:
The content of the server's homepage is "[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]". If you have any further questions, feel free to ask!
USER: Now kill the server, make it display the numbers in a table format.
ASSISTANT:
Sure! Let me stop the server first:
<execute_bash>
kill 124
</execute_bash>
USER:
OBSERVATION:
[1]+ Terminated python3 app.py > server.log 2>&1
ASSISTANT:
The server has been stopped. Let me open the Python file and modify it.
<execute_ipython>
open_file('app.py')
</execute_ipython>
USER:
[File: /workspace/app.py (10 lines total)]
(this is the beginning of the file)
1|from flask import Flask
2|app = Flask(__name__)
3|
4|@app.route('/')
5|def index():
6| numbers = list(range(1, 11))
7| return str(numbers)
8|
9|if __name__ == '__main__':
10| app.run(port=5000)
(this is the end of the file)
ASSISTANT:
I should edit the file to display the numbers in a table format. I should include correct indentation. Let me update the file:
<file_edit path="/workspace/app.py" start=4 end=7>
@app.route('/')
def index():
numbers = list(range(1, 11))
ret = '<table>' + ''.join([f'<tr><td>{i}</td></tr>' for i in numbers]) + '</table>'
return ret
</file_edit>
USER:
Observation:
[Edited existing file /workspace/app.py]
[File: /workspace/app.py (10 lines total after edit)]
(this is the beginning of the file)
1|from flask import Flask
2|app = Flask(__name__)
3|
4|@app.route('/')
5|def index():
6| numbers = list(range(1, 11))
7| ret = '<table>' + ''.join([f'<tr><td>{i}</td></tr>' for i in numbers]) + '</table>'
8| return ret
9|
10|if __name__ == '__main__':
11| app.run(port=5000)
(this is the end of the file)
[File updated (edited at line 7). Please review the changes and make sure they are correct (correct indentation, no duplicate lines, etc). Edit the file again if necessary.]
ASSISTANT:
Running the updated file:
<execute_bash>
python3 app.py > server.log 2>&1 &
</execute_bash>
USER:
Observation:
[1] 126
ASSISTANT:
The server is running on port 5000 with PID 126. You can access the list of numbers in a table format by visiting http://127.0.0.1:5000. Let me know if you have any further requests!
<finish></finish>
--- END OF EXAMPLE ---
{% endset %}
Here is an example of how you can interact with the environment for task solving:
{{ DEFAULT_EXAMPLE }}
NOW, LET'S START!

View File

@@ -4,4 +4,3 @@ You are OpenHands agent, a helpful AI assistant that can interact with a compute
* When configuring git credentials, use "openhands" as the user.name and "openhands@all-hands.dev" as the user.email by default, unless explicitly instructed otherwise.
* The assistant MUST NOT include comments in the code unless they are necessary to describe non-obvious behavior.
</IMPORTANT>

View File

@@ -12,6 +12,7 @@ from openhands.controller.state.state import State, TrafficControlState
from openhands.controller.stuck import StuckDetector
from openhands.core.config import AgentConfig, LLMConfig
from openhands.core.exceptions import (
FunctionCallValidationError,
LLMMalformedActionError,
LLMNoActionError,
LLMResponseError,
@@ -478,7 +479,12 @@ class AgentController:
action = self.agent.step(self.state)
if action is None:
raise LLMNoActionError('No action was returned')
except (LLMMalformedActionError, LLMNoActionError, LLMResponseError) as e:
except (
LLMMalformedActionError,
LLMNoActionError,
LLMResponseError,
FunctionCallValidationError,
) as e:
self.event_stream.add_event(
ErrorObservation(
content=str(e),

View File

@@ -20,7 +20,6 @@ class AgentConfig:
disabled_microagents: A list of microagents to disable. Default is None.
"""
function_calling: bool = True
codeact_enable_browsing: bool = True
codeact_enable_llm_editor: bool = False
codeact_enable_jupyter: bool = True

View File

@@ -94,3 +94,23 @@ class CloudFlareBlockageError(Exception):
"""Exception raised when a request is blocked by CloudFlare."""
pass
class FunctionCallConversionError(Exception):
"""Exception raised when FunctionCallingConverter failed to convert a non-function call message to a function call message.
This typically happens when there's a malformed message (e.g., missing <function=...> tags). But not due to LLM output.
"""
def __init__(self, message):
super().__init__(message)
class FunctionCallValidationError(Exception):
"""Exception raised when FunctionCallingConverter failed to validate a function call message.
This typically happens when the LLM outputs unrecognized function call / parameter names / values.
"""
def __init__(self, message):
super().__init__(message)

View File

@@ -72,7 +72,12 @@ class Message(BaseModel):
# - into a single string: for providers that don't support list of content items (e.g. no vision, no tool calls)
# - into a list of content items: the new APIs of providers with vision/prompt caching/tool calls
# NOTE: remove this when litellm or providers support the new API
if self.cache_enabled or self.vision_enabled or self.tool_call_id is not None:
if (
self.cache_enabled
or self.vision_enabled
or self.tool_call_id is not None
or self.tool_calls is not None
):
return self._list_serializer()
return self._string_serializer()

View File

@@ -0,0 +1,794 @@
"""Convert function calling messages to non-function calling messages and vice versa.
This will inject prompts so that models that doesn't support function calling
can still be used with function calling agents.
We follow format from: https://docs.litellm.ai/docs/completion/function_call
"""
import copy
import json
import re
from typing import Iterable
from litellm import ChatCompletionToolParam
from openhands.core.exceptions import (
FunctionCallConversionError,
FunctionCallValidationError,
)
# Inspired by: https://docs.together.ai/docs/llama-3-function-calling#function-calling-w-llama-31-70b
SYSTEM_PROMPT_SUFFIX_TEMPLATE = """
You have access to the following functions:
{description}
If you choose to call a function ONLY reply in the following format with NO suffix:
<function=example_function_name>
<parameter=example_parameter_1>value_1</parameter>
<parameter=example_parameter_2>
This is the value for the second parameter
that can span
multiple lines
</parameter>
</function>
<IMPORTANT>
Reminder:
- Function calls MUST follow the specified format, start with <function= and end with </function>
- Required parameters MUST be specified
- Only call one function at a time
- You may provide optional reasoning for your function call in natural language BEFORE the function call, but NOT after.
- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls
"""
STOP_WORDS = ['</function']
# NOTE: we need to make sure this example is always in-sync with the tool interface designed in openhands/agenthub/codeact_agent/function_calling.py
IN_CONTEXT_LEARNING_EXAMPLE_PREFIX = """
Here's a running example of how to perform a task with the provided tools.
--------------------- START OF EXAMPLE ---------------------
USER: Create a list of numbers from 1 to 10, and display them in a web page at port 5000.
ASSISTANT: Sure! Let me first check the current directory:
<function=execute_bash>
<parameter=command>
pwd && ls
</parameter>
</function>
USER: EXECUTION RESULT of [execute_bash]:
/workspace
openhands@runtime:~/workspace$
ASSISTANT: There is no `app.py` file in the current directory. Let me create a Python file `app.py`:
<function=str_replace_editor>
<parameter=command>create</parameter>
<parameter=path>/workspace/app.py</parameter>
<parameter=file_text>
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
numbers = list(range(1, 11))
return str(numbers)
if __name__ == '__main__':
app.run(port=5000)
</parameter>
</function>
USER: EXECUTION RESULT of [str_replace_editor]:
File created successfully at: /workspace/app.py
ASSISTANT: I have created a Python file `app.py` that will display a list of numbers from 1 to 10 when you run it. Let me run the Python file for you:
<function=execute_bash>
<parameter=command>
python3 app.py > server.log 2>&1 &
</parameter>
</function>
USER: EXECUTION RESULT of [execute_bash]:
[1] 121
[1]+ Exit 1 python3 app.py > server.log 2>&1
ASSISTANT: Looks like the server is running with PID 121 then crashed. Let me check the server log:
<function=execute_bash>
<parameter=command>
cat server.log
</parameter>
</function>
USER: EXECUTION RESULT of [execute_bash]:
Traceback (most recent call last):
File "/workspace/app.py", line 2, in <module>
from flask import Flask
ModuleNotFoundError: No module named 'flask'
ASSISTANT: Looks like the server crashed because the `flask` module is not installed. Let me install the `flask` module for you:
<function=execute_bash>
<parameter=command>
pip3 install flask
</parameter>
</function>
USER: EXECUTION RESULT of [execute_bash]:
Defaulting to user installation because normal site-packages is not writeable
Collecting flask
Using cached flask-3.0.3-py3-none-any.whl (101 kB)
Collecting blinker>=1.6.2
Using cached blinker-1.7.0-py3-none-any.whl (13 kB)
Collecting Werkzeug>=3.0.0
Using cached werkzeug-3.0.2-py3-none-any.whl (226 kB)
Collecting click>=8.1.3
Using cached click-8.1.7-py3-none-any.whl (97 kB)
Collecting itsdangerous>=2.1.2
Using cached itsdangerous-2.2.0-py3-none-any.whl (16 kB)
Requirement already satisfied: Jinja2>=3.1.2 in /home/openhands/.local/lib/python3.10/site-packages (from flask) (3.1.3)
Requirement already satisfied: MarkupSafe>=2.0 in /home/openhands/.local/lib/python3.10/site-packages (from Jinja2>=3.1.2->flask) (2.1.5)
Installing collected packages: Werkzeug, itsdangerous, click, blinker, flask
Successfully installed Werkzeug-3.0.2 blinker-1.7.0 click-8.1.7 flask-3.0.3 itsdangerous-2.2.0
ASSISTANT: The `flask` module is installed successfully. Let me run the Python file again:
<function=execute_bash>
<parameter=command>
python3 app.py > server.log 2>&1 &
</parameter>
</function>
USER: EXECUTION RESULT of [execute_bash]:
[1] 124
ASSISTANT:
Let me check the server log again:
<function=execute_bash>
<parameter=command>
cat server.log
</parameter>
</function>
USER: EXECUTION RESULT of [execute_bash]:
* Serving Flask app 'app'
* Debug mode: off
WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead.
* Running on http://127.0.0.1:5000
Press CTRL+C to quit
ASSISTANT:
The server is running on port 5000 with PID 124. You can access the list of numbers by visiting http://127.0.0.1:5000. If you have any further questions, feel free to ask!
USER: Now kill the server, make it display the numbers in a table format.
ASSISTANT:
Sure! Let me stop the server first:
<function=execute_bash>
<parameter=command>
kill 124
</parameter>
</function>
USER: EXECUTION RESULT of [execute_bash]:
[1]+ Terminated python3 app.py > server.log 2>&1
ASSISTANT:
Now let me display the numbers in a table format:
<function=str_replace_editor>
<parameter=command>str_replace</parameter>
<parameter=path>/workspace/app.py</parameter>
<parameter=old_str>return str(numbers)</parameter>
<parameter=new_str>return '<table>' + ''.join([f'<tr><td>{i}</td></tr>' for i in numbers]) + '</table>'</parameter>
</function>
USER: EXECUTION RESULT of [str_replace_editor]:
The file /workspace/app.py has been edited. Here's the result of running `cat -n` on a snippet of /workspace/app.py:
3
4 @app.route('/')
5 def index():
6 numbers = list(range(1, 11))
7 return '<table>' + ''.join([f'<tr><td>{i}</td></tr>' for i in numbers]) + '</table>'
8
9 if __name__ == '__main__':
10 app.run(port=5000)
Review the changes and make sure they are as expected. Edit the file again if necessary.
ASSISTANT:
Running the updated file:
<function=execute_bash>
<parameter=command>
<parameter=command>
python3 app.py > server.log 2>&1 &
</parameter>
</function>
USER: EXECUTION RESULT of [execute_bash]:
[1] 126
ASSISTANT:
The server is running on port 5000 with PID 126. You can access the list of numbers in a table format by visiting http://127.0.0.1:5000. Let me know if you have any further requests!
<function=finish>
</function>
--------------------- END OF EXAMPLE ---------------------
Do NOT assume the environment is the same as in the example above.
--------------------- NEW TASK DESCRIPTION ---------------------
""".lstrip()
IN_CONTEXT_LEARNING_EXAMPLE_SUFFIX = """
--------------------- END OF NEW TASK DESCRIPTION ---------------------
PLEASE follow the format strictly! PLEASE EMIT ONE AND ONLY ONE FUNCTION CALL PER MESSAGE.
""".lstrip()
# Regex patterns for function call parsing
FN_REGEX_PATTERN = r'<function=([^>]+)>\n(.*?)</function>'
FN_PARAM_REGEX_PATTERN = r'<parameter=([^>]+)>(.*?)</parameter>'
# Add new regex pattern for tool execution results
TOOL_RESULT_REGEX_PATTERN = r'EXECUTION RESULT of \[(.*?)\]:\n(.*)'
def convert_tool_call_to_string(tool_call: dict) -> str:
"""Convert tool call to content in string format."""
if 'function' not in tool_call:
raise FunctionCallConversionError("Tool call must contain 'function' key.")
if 'id' not in tool_call:
raise FunctionCallConversionError("Tool call must contain 'id' key.")
if 'type' not in tool_call:
raise FunctionCallConversionError("Tool call must contain 'type' key.")
if tool_call['type'] != 'function':
raise FunctionCallConversionError("Tool call type must be 'function'.")
ret = f"<function={tool_call['function']['name']}>\n"
try:
args = json.loads(tool_call['function']['arguments'])
except json.JSONDecodeError as e:
raise FunctionCallConversionError(
f"Failed to parse arguments as JSON. Arguments: {tool_call['function']['arguments']}"
) from e
for param_name, param_value in args.items():
is_multiline = isinstance(param_value, str) and '\n' in param_value
ret += f'<parameter={param_name}>'
if is_multiline:
ret += '\n'
ret += f'{param_value}'
if is_multiline:
ret += '\n'
ret += '</parameter>\n'
ret += '</function>'
return ret
def convert_tools_to_description(tools: list[dict]) -> str:
ret = ''
for i, tool in enumerate(tools):
assert tool['type'] == 'function'
fn = tool['function']
if i > 0:
ret += '\n'
ret += f"---- BEGIN FUNCTION #{i+1}: {fn['name']} ----\n"
ret += f"Description: {fn['description']}\n"
if 'parameters' in fn:
ret += 'Parameters:\n'
properties = fn['parameters'].get('properties', {})
required_params = set(fn['parameters'].get('required', []))
for j, (param_name, param_info) in enumerate(properties.items()):
# Indicate required/optional in parentheses with type
is_required = param_name in required_params
param_status = 'required' if is_required else 'optional'
param_type = param_info.get('type', 'string')
# Get parameter description
desc = param_info.get('description', 'No description provided')
# Handle enum values if present
if 'enum' in param_info:
enum_values = ', '.join(f'`{v}`' for v in param_info['enum'])
desc += f'\nAllowed values: [{enum_values}]'
ret += (
f' ({j+1}) {param_name} ({param_type}, {param_status}): {desc}\n'
)
else:
ret += 'No parameters are required for this function.\n'
ret += f'---- END FUNCTION #{i+1} ----\n'
return ret
def convert_fncall_messages_to_non_fncall_messages(
messages: list[dict],
tools: list[ChatCompletionToolParam],
) -> list[dict]:
"""Convert function calling messages to non-function calling messages."""
messages = copy.deepcopy(messages)
formatted_tools = convert_tools_to_description(tools)
system_prompt_suffix = SYSTEM_PROMPT_SUFFIX_TEMPLATE.format(
description=formatted_tools
)
converted_messages = []
first_user_message_encountered = False
for message in messages:
role, content = message['role'], message['content']
if content is None:
content = ''
# 1. SYSTEM MESSAGES
# append system prompt suffix to content
if role == 'system':
if isinstance(content, str):
content += system_prompt_suffix
elif isinstance(content, list):
if content and content[-1]['type'] == 'text':
content[-1]['text'] += system_prompt_suffix
else:
content.append({'type': 'text', 'text': system_prompt_suffix})
else:
raise FunctionCallConversionError(
f'Unexpected content type {type(content)}. Expected str or list. Content: {content}'
)
converted_messages.append({'role': 'system', 'content': content})
# 2. USER MESSAGES (no change)
elif role == 'user':
# Add in-context learning example for the first user message
if not first_user_message_encountered:
first_user_message_encountered = True
# Check tools
if not (
tools
and len(tools) > 0
and any(
(
tool['type'] == 'function'
and tool['function']['name'] == 'execute_bash'
and 'command'
in tool['function']['parameters']['properties']
)
for tool in tools
)
and any(
(
tool['type'] == 'function'
and tool['function']['name'] == 'str_replace_editor'
and 'path' in tool['function']['parameters']['properties']
and 'file_text'
in tool['function']['parameters']['properties']
and 'old_str'
in tool['function']['parameters']['properties']
and 'new_str'
in tool['function']['parameters']['properties']
)
for tool in tools
)
):
raise FunctionCallConversionError(
'The currently provided tool set are NOT compatible with the in-context learning example for FnCall to Non-FnCall conversion. '
'Please update your tool set OR the in-context learning example in openhands/llm/fn_call_converter.py'
)
# add in-context learning example
if isinstance(content, str):
content = (
IN_CONTEXT_LEARNING_EXAMPLE_PREFIX
+ content
+ IN_CONTEXT_LEARNING_EXAMPLE_SUFFIX
)
elif isinstance(content, list):
if content and content[0]['type'] == 'text':
content[0]['text'] = (
IN_CONTEXT_LEARNING_EXAMPLE_PREFIX
+ content[0]['text']
+ IN_CONTEXT_LEARNING_EXAMPLE_SUFFIX
)
else:
content = (
[
{
'type': 'text',
'text': IN_CONTEXT_LEARNING_EXAMPLE_PREFIX,
}
]
+ content
+ [
{
'type': 'text',
'text': IN_CONTEXT_LEARNING_EXAMPLE_SUFFIX,
}
]
)
else:
raise FunctionCallConversionError(
f'Unexpected content type {type(content)}. Expected str or list. Content: {content}'
)
converted_messages.append(
{
'role': 'user',
'content': content,
}
)
# 3. ASSISTANT MESSAGES
# - 3.1 no change if no function call
# - 3.2 change if function call
elif role == 'assistant':
if 'tool_calls' in message and message['tool_calls'] is not None:
if len(message['tool_calls']) != 1:
raise FunctionCallConversionError(
f'Expected exactly one tool call in the message. More than one tool call is not supported. But got {len(message["tool_calls"])} tool calls. Content: {content}'
)
try:
tool_content = convert_tool_call_to_string(message['tool_calls'][0])
except FunctionCallConversionError as e:
raise FunctionCallConversionError(
f'Failed to convert tool call to string. Raw messages: {json.dumps(messages, indent=2)}'
) from e
if isinstance(content, str):
content += '\n\n' + tool_content
content = content.lstrip()
elif isinstance(content, list):
if content and content[-1]['type'] == 'text':
content[-1]['text'] += '\n\n' + tool_content
content[-1]['text'] = content[-1]['text'].lstrip()
else:
content.append({'type': 'text', 'text': tool_content})
else:
raise FunctionCallConversionError(
f'Unexpected content type {type(content)}. Expected str or list. Content: {content}'
)
converted_messages.append({'role': 'assistant', 'content': content})
# 4. TOOL MESSAGES (tool outputs)
elif role == 'tool':
# Convert tool result as assistant message
prefix = f'EXECUTION RESULT of [{message["name"]}]:\n'
# and omit "tool_call_id" AND "name"
if isinstance(content, str):
content = prefix + content
elif isinstance(content, list):
if content and content[-1]['type'] == 'text':
content[-1]['text'] = prefix + content[-1]['text']
else:
content = [{'type': 'text', 'text': prefix}] + content
else:
raise FunctionCallConversionError(
f'Unexpected content type {type(content)}. Expected str or list. Content: {content}'
)
converted_messages.append({'role': 'user', 'content': content})
else:
raise FunctionCallConversionError(
f'Unexpected role {role}. Expected system, user, assistant or tool.'
)
return converted_messages
def _extract_and_validate_params(
matching_tool: dict, param_matches: Iterable[re.Match], fn_name: str
) -> dict:
params = {}
# Parse and validate parameters
required_params = set()
if 'parameters' in matching_tool and 'required' in matching_tool['parameters']:
required_params = set(matching_tool['parameters'].get('required', []))
allowed_params = set()
if 'parameters' in matching_tool and 'properties' in matching_tool['parameters']:
allowed_params = set(matching_tool['parameters']['properties'].keys())
param_name_to_type = {}
if 'parameters' in matching_tool and 'properties' in matching_tool['parameters']:
param_name_to_type = {
name: val.get('type', 'string')
for name, val in matching_tool['parameters']['properties'].items()
}
# Collect parameters
found_params = set()
for param_match in param_matches:
param_name = param_match.group(1)
param_value = param_match.group(2).strip()
# Validate parameter is allowed
if allowed_params and param_name not in allowed_params:
raise FunctionCallValidationError(
f"Parameter '{param_name}' is not allowed for function '{fn_name}'. "
f'Allowed parameters: {allowed_params}'
)
# Validate and convert parameter type
# supported: string, integer, array
if param_name in param_name_to_type:
if param_name_to_type[param_name] == 'integer':
try:
param_value = int(param_value)
except ValueError:
raise FunctionCallValidationError(
f"Parameter '{param_name}' is expected to be an integer."
)
elif param_name_to_type[param_name] == 'array':
try:
param_value = json.loads(param_value)
except json.JSONDecodeError:
raise FunctionCallValidationError(
f"Parameter '{param_name}' is expected to be an array."
)
else:
# string
pass
# Enum check
if 'enum' in matching_tool['parameters']['properties'][param_name]:
if (
param_value
not in matching_tool['parameters']['properties'][param_name]['enum']
):
raise FunctionCallValidationError(
f"Parameter '{param_name}' is expected to be one of {matching_tool['parameters']['properties'][param_name]['enum']}."
)
params[param_name] = param_value
found_params.add(param_name)
# Check all required parameters are present
missing_params = required_params - found_params
if missing_params:
raise FunctionCallValidationError(
f"Missing required parameters for function '{fn_name}': {missing_params}"
)
return params
def _fix_stopword(content: str) -> str:
"""Fix the issue when some LLM would NOT return the stopword."""
if '<function=' in content and content.count('<function=') == 1:
if content.endswith('</'):
content = content.rstrip() + 'function>'
else:
content = content + '\n</function>'
return content
def convert_non_fncall_messages_to_fncall_messages(
messages: list[dict],
tools: list[ChatCompletionToolParam],
) -> list[dict]:
"""Convert non-function calling messages back to function calling messages."""
messages = copy.deepcopy(messages)
formatted_tools = convert_tools_to_description(tools)
system_prompt_suffix = SYSTEM_PROMPT_SUFFIX_TEMPLATE.format(
description=formatted_tools
)
converted_messages = []
tool_call_counter = 1 # Counter for tool calls
first_user_message_encountered = False
for message in messages:
role, content = message['role'], message['content']
content = content or '' # handle cases where content is None
# For system messages, remove the added suffix
if role == 'system':
if isinstance(content, str):
# Remove the suffix if present
content = content.split(system_prompt_suffix)[0]
elif isinstance(content, list):
if content and content[-1]['type'] == 'text':
# Remove the suffix from the last text item
content[-1]['text'] = content[-1]['text'].split(
system_prompt_suffix
)[0]
converted_messages.append({'role': 'system', 'content': content})
# Skip user messages (no conversion needed)
elif role == 'user':
# Check & replace in-context learning example
if not first_user_message_encountered:
first_user_message_encountered = True
if isinstance(content, str):
content = content.replace(IN_CONTEXT_LEARNING_EXAMPLE_PREFIX, '')
content = content.replace(IN_CONTEXT_LEARNING_EXAMPLE_SUFFIX, '')
elif isinstance(content, list):
for item in content:
if item['type'] == 'text':
item['text'] = item['text'].replace(
IN_CONTEXT_LEARNING_EXAMPLE_PREFIX, ''
)
item['text'] = item['text'].replace(
IN_CONTEXT_LEARNING_EXAMPLE_SUFFIX, ''
)
else:
raise FunctionCallConversionError(
f'Unexpected content type {type(content)}. Expected str or list. Content: {content}'
)
# Check for tool execution result pattern
if isinstance(content, str):
tool_result_match = re.search(
TOOL_RESULT_REGEX_PATTERN, content, re.DOTALL
)
elif isinstance(content, list):
tool_result_match = next(
(
_match
for item in content
if item.get('type') == 'text'
and (
_match := re.search(
TOOL_RESULT_REGEX_PATTERN, item['text'], re.DOTALL
)
)
),
None,
)
else:
raise FunctionCallConversionError(
f'Unexpected content type {type(content)}. Expected str or list. Content: {content}'
)
if tool_result_match:
if not (
isinstance(content, str)
or (
isinstance(content, list)
and len(content) == 1
and content[0].get('type') == 'text'
)
):
raise FunctionCallConversionError(
f'Expected str or list with one text item when tool result is present in the message. Content: {content}'
)
tool_name = tool_result_match.group(1)
tool_result = tool_result_match.group(2).strip()
# Convert to tool message format
converted_messages.append(
{
'role': 'tool',
'name': tool_name,
'content': [{'type': 'text', 'text': tool_result}]
if isinstance(content, list)
else tool_result,
'tool_call_id': f'toolu_{tool_call_counter-1:02d}', # Use last generated ID
}
)
else:
converted_messages.append({'role': 'user', 'content': content})
# Handle assistant messages
elif role == 'assistant':
if isinstance(content, str):
content = _fix_stopword(content)
fn_match = re.search(FN_REGEX_PATTERN, content, re.DOTALL)
elif isinstance(content, list):
if content and content[-1]['type'] == 'text':
content[-1]['text'] = _fix_stopword(content[-1]['text'])
fn_match = re.search(
FN_REGEX_PATTERN, content[-1]['text'], re.DOTALL
)
else:
fn_match = None
fn_match_exists = any(
item.get('type') == 'text'
and re.search(FN_REGEX_PATTERN, item['text'], re.DOTALL)
for item in content
)
if fn_match_exists and not fn_match:
raise FunctionCallConversionError(
f'Expecting function call in the LAST index of content list. But got content={content}'
)
else:
raise FunctionCallConversionError(
f'Unexpected content type {type(content)}. Expected str or list. Content: {content}'
)
if fn_match:
fn_name = fn_match.group(1)
fn_body = fn_match.group(2)
matching_tool = next(
(
tool['function']
for tool in tools
if tool['type'] == 'function'
and tool['function']['name'] == fn_name
),
None,
)
# Validate function exists in tools
if not matching_tool:
raise FunctionCallValidationError(
f"Function '{fn_name}' not found in available tools: {[tool['function']['name'] for tool in tools if tool['type'] == 'function']}"
)
# Parse parameters
param_matches = re.finditer(FN_PARAM_REGEX_PATTERN, fn_body, re.DOTALL)
params = _extract_and_validate_params(
matching_tool, param_matches, fn_name
)
# Create tool call with unique ID
tool_call_id = f'toolu_{tool_call_counter:02d}'
tool_call = {
'index': 1, # always 1 because we only support **one tool call per message**
'id': tool_call_id,
'type': 'function',
'function': {'name': fn_name, 'arguments': json.dumps(params)},
}
tool_call_counter += 1 # Increment counter
# Remove the function call part from content
if isinstance(content, list):
assert content and content[-1]['type'] == 'text'
content[-1]['text'] = (
content[-1]['text'].split('<function=')[0].strip()
)
elif isinstance(content, str):
content = content.split('<function=')[0].strip()
else:
raise FunctionCallConversionError(
f'Unexpected content type {type(content)}. Expected str or list. Content: {content}'
)
converted_messages.append(
{'role': 'assistant', 'content': content, 'tool_calls': [tool_call]}
)
else:
# No function call, keep message as is
converted_messages.append(message)
else:
raise FunctionCallConversionError(
f'Unexpected role {role}. Expected system, user, or assistant in non-function calling messages.'
)
return converted_messages
def convert_from_multiple_tool_calls_to_single_tool_call_messages(
messages: list[dict],
) -> list[dict]:
"""Break one message with multiple tool calls into multiple messages."""
converted_messages = []
pending_tool_calls: dict[str, dict] = {}
for message in messages:
role, content = message['role'], message['content']
if role == 'assistant':
if message.get('tool_calls') and len(message['tool_calls']) > 1:
# handle multiple tool calls by breaking them into multiple messages
for i, tool_call in enumerate(message['tool_calls']):
pending_tool_calls[tool_call['id']] = {
'role': 'assistant',
'content': content if i == 0 else '',
'tool_calls': [tool_call],
}
else:
converted_messages.append(message)
elif role == 'tool':
if message['tool_call_id'] in pending_tool_calls:
# remove the tool call from the pending list
_tool_call_message = pending_tool_calls.pop(message['tool_call_id'])
converted_messages.append(_tool_call_message)
# add the tool result
converted_messages.append(message)
else:
assert (
len(pending_tool_calls) == 0
), f'Found pending tool calls but not found in pending list: {pending_tool_calls=}'
converted_messages.append(message)
else:
assert (
len(pending_tool_calls) == 0
), f'Found pending tool calls but not expect to handle it with role {role}: {pending_tool_calls=}, {message=}'
converted_messages.append(message)
if len(pending_tool_calls) > 0:
raise FunctionCallConversionError(
f'Found pending tool calls but no tool result: {pending_tool_calls=}'
)
return converted_messages

View File

@@ -12,6 +12,7 @@ from openhands.core.config import LLMConfig
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import litellm
from litellm import Message as LiteLLMMessage
from litellm import ModelInfo, PromptTokensDetails
from litellm import completion as litellm_completion
from litellm import completion_cost as litellm_completion_cost
@@ -28,6 +29,11 @@ from openhands.core.exceptions import CloudFlareBlockageError
from openhands.core.logger import openhands_logger as logger
from openhands.core.message import Message
from openhands.llm.debug_mixin import DebugMixin
from openhands.llm.fn_call_converter import (
STOP_WORDS,
convert_fncall_messages_to_non_fncall_messages,
convert_non_fncall_messages_to_fncall_messages,
)
from openhands.llm.metrics import Metrics
from openhands.llm.retry_mixin import RetryMixin
@@ -56,11 +62,12 @@ CACHE_PROMPT_SUPPORTED_MODELS = [
# function calling supporting models
FUNCTION_CALLING_SUPPORTED_MODELS = [
'claude-3-5-sonnet',
'claude-3-5-sonnet-20240620',
'claude-3-5-sonnet-20241022',
'claude-3-5-haiku-20241022',
'gpt-4o',
'gpt-4o-mini',
'gpt-4o',
]
@@ -136,6 +143,9 @@ class LLM(RetryMixin, DebugMixin):
drop_params=self.config.drop_params,
)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.init_model_info()
if self.vision_is_active():
logger.debug('LLM: model has vision enabled')
if self.is_caching_prompt_active():
@@ -143,7 +153,7 @@ class LLM(RetryMixin, DebugMixin):
if self.is_function_calling_active():
logger.debug('LLM: model supports function calling')
completion_unwrapped = self._completion
self._completion_unwrapped = self._completion
@self.retry_decorator(
num_retries=self.config.num_retries,
@@ -154,8 +164,11 @@ class LLM(RetryMixin, DebugMixin):
)
def wrapper(*args, **kwargs):
"""Wrapper for the litellm completion function. Logs the input and output of the completion function."""
self.init_model_info()
from openhands.core.utils import json
messages: list[dict[str, Any]] | dict[str, Any] = []
mock_function_calling = kwargs.pop('mock_function_calling', False)
# some callers might send the model and messages directly
# litellm allows positional args, like completion(model, messages, **kwargs)
@@ -174,6 +187,18 @@ class LLM(RetryMixin, DebugMixin):
# ensure we work with a list of messages
messages = messages if isinstance(messages, list) else [messages]
original_fncall_messages = copy.deepcopy(messages)
mock_fncall_tools = None
if mock_function_calling:
assert (
'tools' in kwargs
), "'tools' must be in kwargs when mock_function_calling is True"
messages = convert_fncall_messages_to_non_fncall_messages(
messages, kwargs['tools']
)
kwargs['messages'] = messages
kwargs['stop'] = STOP_WORDS
mock_fncall_tools = kwargs.pop('tools')
# if we have no messages, something went very wrong
if not messages:
@@ -193,7 +218,25 @@ class LLM(RetryMixin, DebugMixin):
try:
# we don't support streaming here, thus we get a ModelResponse
resp: ModelResponse = completion_unwrapped(*args, **kwargs)
resp: ModelResponse = self._completion_unwrapped(*args, **kwargs)
non_fncall_response = copy.deepcopy(resp)
if mock_function_calling:
assert len(resp.choices) == 1
assert mock_fncall_tools is not None
non_fncall_response_message = resp.choices[0].message
fn_call_messages_with_response = (
convert_non_fncall_messages_to_fncall_messages(
messages + [non_fncall_response_message], mock_fncall_tools
)
)
fn_call_response_message = fn_call_messages_with_response[-1]
if not isinstance(fn_call_response_message, LiteLLMMessage):
fn_call_response_message = LiteLLMMessage(
**fn_call_response_message
)
resp.choices[0].message = fn_call_response_message
# log for evals or other scripts that need the raw completion
if self.config.log_completions:
assert self.config.log_completions_folder is not None
@@ -202,25 +245,23 @@ class LLM(RetryMixin, DebugMixin):
# use the metric model name (for draft editor)
f'{self.metrics.model_name.replace("/", "__")}-{time.time()}.json',
)
from openhands.core.utils import json
_d = {
'messages': messages,
'response': resp,
'args': args,
'kwargs': {k: v for k, v in kwargs.items() if k != 'messages'},
'timestamp': time.time(),
'cost': self._completion_cost(resp),
}
if mock_function_calling:
# Overwrite response as non-fncall to be consistent with `messages``
_d['response'] = non_fncall_response
# Save fncall_messages/response separately
_d['fncall_messages'] = original_fncall_messages
_d['fncall_response'] = resp
with open(log_file, 'w') as f:
f.write(
json.dumps(
{
'messages': messages,
'response': resp,
'args': args,
'kwargs': {
k: v
for k, v in kwargs.items()
if k != 'messages'
},
'timestamp': time.time(),
'cost': self._completion_cost(resp),
},
)
)
f.write(json.dumps(_d))
message_back: str = resp['choices'][0]['message']['content']
@@ -330,7 +371,9 @@ class LLM(RetryMixin, DebugMixin):
self.config.max_output_tokens = self.model_info['max_tokens']
def vision_is_active(self):
return not self.config.disable_vision and self._supports_vision()
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return not self.config.disable_vision and self._supports_vision()
def _supports_vision(self):
"""Acquire from litellm if model is vision capable.
@@ -358,15 +401,13 @@ class LLM(RetryMixin, DebugMixin):
Returns:
boolean: True if prompt caching is supported and enabled for the given model.
"""
return self.config.caching_prompt is True and (
(
return (
self.config.caching_prompt is True
and (
self.config.model in CACHE_PROMPT_SUPPORTED_MODELS
or self.config.model.split('/')[-1] in CACHE_PROMPT_SUPPORTED_MODELS
)
or (
self.model_info is not None
and self.model_info.get('supports_prompt_caching', False)
)
# We don't need to look-up model_info, because only Anthropic models needs the explicit caching breakpoint
)
def is_function_calling_active(self) -> bool:
@@ -376,10 +417,7 @@ class LLM(RetryMixin, DebugMixin):
or self.config.model.split('/')[-1] in FUNCTION_CALLING_SUPPORTED_MODELS
or any(m in self.config.model for m in FUNCTION_CALLING_SUPPORTED_MODELS)
)
return model_name_supported or (
self.model_info is not None
and self.model_info.get('supports_function_calling', False)
)
return model_name_supported
def _post_completion(self, response: ModelResponse) -> None:
"""Post-process the completion response.

View File

@@ -302,12 +302,6 @@ class RemoteRuntime(Runtime):
else:
return None
@tenacity.retry(
stop=tenacity.stop_after_delay(180) | stop_if_should_exit(),
reraise=True,
retry=tenacity.retry_if_exception_type(RuntimeNotReadyError),
wait=tenacity.wait_fixed(2),
)
def _wait_until_alive(self):
retry_decorator = tenacity.retry(
stop=tenacity.stop_after_delay(

View File

@@ -18,7 +18,6 @@ class PromptManager:
Attributes:
prompt_dir (str): Directory containing prompt templates.
agent_skills_docs (str): Documentation of agent skills.
microagent_dir (str): Directory containing microagent specifications.
disabled_microagents (list[str] | None): List of microagents to disable. If None, all microagents are enabled.
"""
@@ -27,11 +26,9 @@ class PromptManager:
self,
prompt_dir: str,
microagent_dir: str | None = None,
agent_skills_docs: str = '',
disabled_microagents: list[str] | None = None,
):
self.prompt_dir: str = prompt_dir
self.agent_skills_docs: str = agent_skills_docs
self.system_template: Template = self._load_template('system_prompt')
self.user_template: Template = self._load_template('user_prompt')
@@ -62,10 +59,7 @@ class PromptManager:
return Template(file.read())
def get_system_message(self) -> str:
rendered = self.system_template.render(
agent_skills_docs=self.agent_skills_docs,
).strip()
return rendered
return self.system_template.render().strip()
def get_example_user_message(self) -> str:
"""This is the initial user message provided to the agent

4
poetry.lock generated
View File

@@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand.
# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand.
[[package]]
name = "aenum"
@@ -10211,4 +10211,4 @@ testing = ["coverage[toml]", "zope.event", "zope.testing"]
[metadata]
lock-version = "2.0"
python-versions = "^3.12"
content-hash = "a552f630dfdb9221eda6932e71e67a935c52ebfe4388ec9ef4b3245e7df2f82b"
content-hash = "8718ffe2ed836fca6c646c37bdad2c9c8e63ebd7ec881f420148fef5095d19e4"

View File

@@ -14,7 +14,7 @@ packages = [
python = "^3.12"
datasets = "*"
pandas = "*"
litellm = "^1.51.1"
litellm = "^1.52.3"
google-generativeai = "*" # To use litellm with Gemini Pro API
google-api-python-client = "*" # For Google Sheets API
google-auth-httplib2 = "*" # For Google Sheets authentication
@@ -95,6 +95,7 @@ reportlab = "*"
[tool.coverage.run]
concurrency = ["gevent"]
[tool.poetry.group.runtime.dependencies]
jupyterlab = "*"
notebook = "*"
@@ -125,6 +126,7 @@ ignore = ["D1"]
[tool.ruff.lint.pydocstyle]
convention = "google"
[tool.poetry.group.evaluation.dependencies]
streamlit = "*"
whatthepatch = "*"

View File

@@ -37,7 +37,6 @@ def test_cmd_output_observation_message(agent: CodeActAgent):
assert result.role == 'user'
assert len(result.content) == 1
assert isinstance(result.content[0], TextContent)
assert 'OBSERVATION:' in result.content[0].text
assert 'Command output' in result.content[0].text
assert 'Command finished with exit code 0' in result.content[0].text
@@ -57,7 +56,6 @@ def test_ipython_run_cell_observation_message(agent: CodeActAgent):
assert result.role == 'user'
assert len(result.content) == 1
assert isinstance(result.content[0], TextContent)
assert 'OBSERVATION:' in result.content[0].text
assert 'IPython output' in result.content[0].text
assert (
'![image](data:image/png;base64, ...) already displayed to user'
@@ -80,7 +78,6 @@ def test_agent_delegate_observation_message(agent: CodeActAgent):
assert result.role == 'user'
assert len(result.content) == 1
assert isinstance(result.content[0], TextContent)
assert 'OBSERVATION:' in result.content[0].text
assert 'Delegated agent output' in result.content[0].text
@@ -96,7 +93,6 @@ def test_error_observation_message(agent: CodeActAgent):
assert result.role == 'user'
assert len(result.content) == 1
assert isinstance(result.content[0], TextContent)
assert 'OBSERVATION:' in result.content[0].text
assert 'Error message' in result.content[0].text
assert 'Error occurred in processing last action' in result.content[0].text

View File

@@ -1,53 +0,0 @@
import pytest
from openhands.agenthub.codeact_agent.action_parser import (
CodeActActionParserAgentDelegate,
)
from openhands.events.action import AgentDelegateAction
@pytest.mark.parametrize(
'action_str, expected_agent, expected_thought, expected_task',
[
(
'I need to search for information.\n<execute_browse>Tell me who is the Vice President of the USA</execute_browse>',
'BrowsingAgent',
'I need to search for information.\nI should start with: Tell me who is the Vice President of the USA',
'Tell me who is the Vice President of the USA',
),
(
'<execute_browse>Search for recent climate change data</execute_browse>',
'BrowsingAgent',
'I should start with: Search for recent climate change data',
'Search for recent climate change data',
),
(
"Let's use the browsing agent to find this information.\n<execute_browse>Find the population of Tokyo in 2023</execute_browse>\nThis will help us answer the question.",
'BrowsingAgent',
"Let's use the browsing agent to find this information.\n\nThis will help us answer the question.\nI should start with: Find the population of Tokyo in 2023",
'Find the population of Tokyo in 2023',
),
],
)
def test_codeact_action_parser_agent_delegate(
action_str, expected_agent, expected_thought, expected_task
):
parser = CodeActActionParserAgentDelegate()
assert parser.check_condition(action_str)
action = parser.parse(action_str)
assert isinstance(action, AgentDelegateAction)
assert action.agent == expected_agent
assert action.thought == expected_thought
assert action.inputs['task'] == expected_task
def test_codeact_action_parser_agent_delegate_no_match():
parser = CodeActActionParserAgentDelegate()
action_str = 'This is a regular message without any browse command.'
assert not parser.check_condition(action_str)
with pytest.raises(AssertionError):
parser.parse(action_str)

View File

@@ -0,0 +1,686 @@
"""Test for FunctionCallingConverter."""
import copy
import json
import pytest
from litellm import ChatCompletionToolParam
from openhands.llm.fn_call_converter import (
IN_CONTEXT_LEARNING_EXAMPLE_PREFIX,
IN_CONTEXT_LEARNING_EXAMPLE_SUFFIX,
FunctionCallConversionError,
convert_fncall_messages_to_non_fncall_messages,
convert_from_multiple_tool_calls_to_single_tool_call_messages,
convert_non_fncall_messages_to_fncall_messages,
convert_tool_call_to_string,
convert_tools_to_description,
)
FNCALL_TOOLS: list[ChatCompletionToolParam] = [
{
'type': 'function',
'function': {
'name': 'execute_bash',
'description': 'Execute a bash command in the terminal.\n* Long running commands: For commands that may run indefinitely, it should be run in the background and the output should be redirected to a file, e.g. command = `python3 app.py > server.log 2>&1 &`.\n* Interactive: If a bash command returns exit code `-1`, this means the process is not yet finished. The assistant must then send a second call to terminal with an empty `command` (which will retrieve any additional logs), or it can send additional text (set `command` to the text) to STDIN of the running process, or it can send command=`ctrl+c` to interrupt the process.\n* Timeout: If a command execution result says "Command timed out. Sending SIGINT to the process", the assistant should retry running the command in the background.\n',
'parameters': {
'type': 'object',
'properties': {
'command': {
'type': 'string',
'description': 'The bash command to execute. Can be empty to view additional logs when previous exit code is `-1`. Can be `ctrl+c` to interrupt the currently running process.',
}
},
'required': ['command'],
},
},
},
{
'type': 'function',
'function': {
'name': 'finish',
'description': 'Finish the interaction when the task is complete OR if the assistant cannot proceed further with the task.',
},
},
{
'type': 'function',
'function': {
'name': 'str_replace_editor',
'description': 'Custom editing tool for viewing, creating and editing files\n* State is persistent across command calls and discussions with the user\n* If `path` is a file, `view` displays the result of applying `cat -n`. If `path` is a directory, `view` lists non-hidden files and directories up to 2 levels deep\n* The `create` command cannot be used if the specified `path` already exists as a file\n* If a `command` generates a long output, it will be truncated and marked with `<response clipped>`\n* The `undo_edit` command will revert the last edit made to the file at `path`\n\nNotes for using the `str_replace` command:\n* The `old_str` parameter should match EXACTLY one or more consecutive lines from the original file. Be mindful of whitespaces!\n* If the `old_str` parameter is not unique in the file, the replacement will not be performed. Make sure to include enough context in `old_str` to make it unique\n* The `new_str` parameter should contain the edited lines that should replace the `old_str`\n',
'parameters': {
'type': 'object',
'properties': {
'command': {
'description': 'The commands to run. Allowed options are: `view`, `create`, `str_replace`, `insert`, `undo_edit`.',
'enum': [
'view',
'create',
'str_replace',
'insert',
'undo_edit',
],
'type': 'string',
},
'path': {
'description': 'Absolute path to file or directory, e.g. `/repo/file.py` or `/repo`.',
'type': 'string',
},
'file_text': {
'description': 'Required parameter of `create` command, with the content of the file to be created.',
'type': 'string',
},
'old_str': {
'description': 'Required parameter of `str_replace` command containing the string in `path` to replace.',
'type': 'string',
},
'new_str': {
'description': 'Optional parameter of `str_replace` command containing the new string (if not given, no string will be added). Required parameter of `insert` command containing the string to insert.',
'type': 'string',
},
'insert_line': {
'description': 'Required parameter of `insert` command. The `new_str` will be inserted AFTER the line `insert_line` of `path`.',
'type': 'integer',
},
'view_range': {
'description': 'Optional parameter of `view` command when `path` points to a file. If none is given, the full file is shown. If provided, the file will be shown in the indicated line number range, e.g. [11, 12] will show lines 11 and 12. Indexing at 1 to start. Setting `[start_line, -1]` shows all lines from `start_line` to the end of the file.',
'items': {'type': 'integer'},
'type': 'array',
},
},
'required': ['command', 'path'],
},
},
},
]
def test_convert_tools_to_description():
formatted_tools = convert_tools_to_description(FNCALL_TOOLS)
print(formatted_tools)
assert (
formatted_tools.strip()
== """---- BEGIN FUNCTION #1: execute_bash ----
Description: Execute a bash command in the terminal.
* Long running commands: For commands that may run indefinitely, it should be run in the background and the output should be redirected to a file, e.g. command = `python3 app.py > server.log 2>&1 &`.
* Interactive: If a bash command returns exit code `-1`, this means the process is not yet finished. The assistant must then send a second call to terminal with an empty `command` (which will retrieve any additional logs), or it can send additional text (set `command` to the text) to STDIN of the running process, or it can send command=`ctrl+c` to interrupt the process.
* Timeout: If a command execution result says "Command timed out. Sending SIGINT to the process", the assistant should retry running the command in the background.
Parameters:
(1) command (string, required): The bash command to execute. Can be empty to view additional logs when previous exit code is `-1`. Can be `ctrl+c` to interrupt the currently running process.
---- END FUNCTION #1 ----
---- BEGIN FUNCTION #2: finish ----
Description: Finish the interaction when the task is complete OR if the assistant cannot proceed further with the task.
No parameters are required for this function.
---- END FUNCTION #2 ----
---- BEGIN FUNCTION #3: str_replace_editor ----
Description: Custom editing tool for viewing, creating and editing files
* State is persistent across command calls and discussions with the user
* If `path` is a file, `view` displays the result of applying `cat -n`. If `path` is a directory, `view` lists non-hidden files and directories up to 2 levels deep
* The `create` command cannot be used if the specified `path` already exists as a file
* If a `command` generates a long output, it will be truncated and marked with `<response clipped>`
* The `undo_edit` command will revert the last edit made to the file at `path`
Notes for using the `str_replace` command:
* The `old_str` parameter should match EXACTLY one or more consecutive lines from the original file. Be mindful of whitespaces!
* If the `old_str` parameter is not unique in the file, the replacement will not be performed. Make sure to include enough context in `old_str` to make it unique
* The `new_str` parameter should contain the edited lines that should replace the `old_str`
Parameters:
(1) command (string, required): The commands to run. Allowed options are: `view`, `create`, `str_replace`, `insert`, `undo_edit`.
Allowed values: [`view`, `create`, `str_replace`, `insert`, `undo_edit`]
(2) path (string, required): Absolute path to file or directory, e.g. `/repo/file.py` or `/repo`.
(3) file_text (string, optional): Required parameter of `create` command, with the content of the file to be created.
(4) old_str (string, optional): Required parameter of `str_replace` command containing the string in `path` to replace.
(5) new_str (string, optional): Optional parameter of `str_replace` command containing the new string (if not given, no string will be added). Required parameter of `insert` command containing the string to insert.
(6) insert_line (integer, optional): Required parameter of `insert` command. The `new_str` will be inserted AFTER the line `insert_line` of `path`.
(7) view_range (array, optional): Optional parameter of `view` command when `path` points to a file. If none is given, the full file is shown. If provided, the file will be shown in the indicated line number range, e.g. [11, 12] will show lines 11 and 12. Indexing at 1 to start. Setting `[start_line, -1]` shows all lines from `start_line` to the end of the file.
---- END FUNCTION #3 ----""".strip()
)
FNCALL_MESSAGES = [
{
'content': [
{
'type': 'text',
'text': "You are a helpful assistant that can interact with a computer to solve tasks.\n<IMPORTANT>\n* If user provides a path, you should NOT assume it's relative to the current working directory. Instead, you should explore the file system to find the file before working on it.\n</IMPORTANT>\n\n",
'cache_control': {'type': 'ephemeral'},
}
],
'role': 'system',
},
{
'content': [
{
'type': 'text',
'text': "<uploaded_files>\n/workspace/astropy__astropy__5.1\n</uploaded_files>\nI've uploaded a python code repository in the directory astropy__astropy__5.1. LONG DESCRIPTION:\n\n",
}
],
'role': 'user',
},
{
'content': [
{
'type': 'text',
'text': "I'll help you implement the necessary changes to meet the requirements. Let's follow the steps:\n\n1. First, let's explore the repository structure:",
}
],
'role': 'assistant',
'tool_calls': [
{
'index': 1,
'function': {
'arguments': '{"command": "ls -la /workspace/astropy__astropy__5.1"}',
'name': 'execute_bash',
},
'id': 'toolu_01',
'type': 'function',
}
],
},
{
'content': [
{
'type': 'text',
'text': 'ls -la /workspace/astropy__astropy__5.1\r\nls: /workspace/astropy__astropy__5.1: Bad file descriptor\r\nlrwxrwxrwx 1 root root 8 Oct 28 21:58 /workspace/astropy__astropy__5.1 -> /testbed[Python Interpreter: /opt/miniconda3/envs/testbed/bin/python]\nroot@openhands-workspace:/workspace/astropy__astropy__5.1 # \n[Command finished with exit code 0]',
}
],
'role': 'tool',
'tool_call_id': 'toolu_01',
'name': 'execute_bash',
},
{
'content': [
{
'type': 'text',
'text': "I see there's a symlink. Let's explore the actual directory:",
}
],
'role': 'assistant',
'tool_calls': [
{
'index': 1,
'function': {
'arguments': '{"command": "ls -la /testbed"}',
'name': 'execute_bash',
},
'id': 'toolu_02',
'type': 'function',
}
],
},
{
'content': [
{
'type': 'text',
'text': 'SOME OBSERVATION',
}
],
'role': 'tool',
'tool_call_id': 'toolu_02',
'name': 'execute_bash',
},
{
'content': [
{
'type': 'text',
'text': "Let's look at the source code file mentioned in the PR description:",
}
],
'role': 'assistant',
'tool_calls': [
{
'index': 1,
'function': {
'arguments': '{"command": "view", "path": "/testbed/astropy/io/fits/card.py"}',
'name': 'str_replace_editor',
},
'id': 'toolu_03',
'type': 'function',
}
],
},
{
'content': [
{
'type': 'text',
'text': "Here's the result of running `cat -n` on /testbed/astropy/io/fits/card.py:\n 1\t# Licensed under a 3-clause BSD style license - see PYFITS.rst...VERY LONG TEXT",
}
],
'role': 'tool',
'tool_call_id': 'toolu_03',
'name': 'str_replace_editor',
},
]
NON_FNCALL_MESSAGES = [
{
'role': 'system',
'content': [
{
'type': 'text',
'text': 'You are a helpful assistant that can interact with a computer to solve tasks.\n<IMPORTANT>\n* If user provides a path, you should NOT assume it\'s relative to the current working directory. Instead, you should explore the file system to find the file before working on it.\n</IMPORTANT>\n\n\nYou have access to the following functions:\n\n---- BEGIN FUNCTION #1: execute_bash ----\nDescription: Execute a bash command in the terminal.\n* Long running commands: For commands that may run indefinitely, it should be run in the background and the output should be redirected to a file, e.g. command = `python3 app.py > server.log 2>&1 &`.\n* Interactive: If a bash command returns exit code `-1`, this means the process is not yet finished. The assistant must then send a second call to terminal with an empty `command` (which will retrieve any additional logs), or it can send additional text (set `command` to the text) to STDIN of the running process, or it can send command=`ctrl+c` to interrupt the process.\n* Timeout: If a command execution result says "Command timed out. Sending SIGINT to the process", the assistant should retry running the command in the background.\n\nParameters:\n (1) command (string, required): The bash command to execute. Can be empty to view additional logs when previous exit code is `-1`. Can be `ctrl+c` to interrupt the currently running process.\n---- END FUNCTION #1 ----\n\n---- BEGIN FUNCTION #2: finish ----\nDescription: Finish the interaction when the task is complete OR if the assistant cannot proceed further with the task.\nNo parameters are required for this function.\n---- END FUNCTION #2 ----\n\n---- BEGIN FUNCTION #3: str_replace_editor ----\nDescription: Custom editing tool for viewing, creating and editing files\n* State is persistent across command calls and discussions with the user\n* If `path` is a file, `view` displays the result of applying `cat -n`. If `path` is a directory, `view` lists non-hidden files and directories up to 2 levels deep\n* The `create` command cannot be used if the specified `path` already exists as a file\n* If a `command` generates a long output, it will be truncated and marked with `<response clipped>`\n* The `undo_edit` command will revert the last edit made to the file at `path`\n\nNotes for using the `str_replace` command:\n* The `old_str` parameter should match EXACTLY one or more consecutive lines from the original file. Be mindful of whitespaces!\n* If the `old_str` parameter is not unique in the file, the replacement will not be performed. Make sure to include enough context in `old_str` to make it unique\n* The `new_str` parameter should contain the edited lines that should replace the `old_str`\n\nParameters:\n (1) command (string, required): The commands to run. Allowed options are: `view`, `create`, `str_replace`, `insert`, `undo_edit`.\nAllowed values: [`view`, `create`, `str_replace`, `insert`, `undo_edit`]\n (2) path (string, required): Absolute path to file or directory, e.g. `/repo/file.py` or `/repo`.\n (3) file_text (string, optional): Required parameter of `create` command, with the content of the file to be created.\n (4) old_str (string, optional): Required parameter of `str_replace` command containing the string in `path` to replace.\n (5) new_str (string, optional): Optional parameter of `str_replace` command containing the new string (if not given, no string will be added). Required parameter of `insert` command containing the string to insert.\n (6) insert_line (integer, optional): Required parameter of `insert` command. The `new_str` will be inserted AFTER the line `insert_line` of `path`.\n (7) view_range (array, optional): Optional parameter of `view` command when `path` points to a file. If none is given, the full file is shown. If provided, the file will be shown in the indicated line number range, e.g. [11, 12] will show lines 11 and 12. Indexing at 1 to start. Setting `[start_line, -1]` shows all lines from `start_line` to the end of the file.\n---- END FUNCTION #3 ----\n\n\nIf you choose to call a function ONLY reply in the following format with NO suffix:\n\n<function=example_function_name>\n<parameter=example_parameter_1>value_1</parameter>\n<parameter=example_parameter_2>\nThis is the value for the second parameter\nthat can span\nmultiple lines\n</parameter>\n</function>\n\n<IMPORTANT>\nReminder:\n- Function calls MUST follow the specified format, start with <function= and end with </function>\n- Required parameters MUST be specified\n- Only call one function at a time\n- You may provide optional reasoning for your function call in natural language BEFORE the function call, but NOT after.\n- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls\n',
'cache_control': {'type': 'ephemeral'},
}
],
},
{
'content': [
{
'type': 'text',
'text': IN_CONTEXT_LEARNING_EXAMPLE_PREFIX
+ "<uploaded_files>\n/workspace/astropy__astropy__5.1\n</uploaded_files>\nI've uploaded a python code repository in the directory astropy__astropy__5.1. LONG DESCRIPTION:\n\n"
+ IN_CONTEXT_LEARNING_EXAMPLE_SUFFIX,
}
],
'role': 'user',
},
{
'role': 'assistant',
'content': [
{
'type': 'text',
'text': "I'll help you implement the necessary changes to meet the requirements. Let's follow the steps:\n\n1. First, let's explore the repository structure:\n\n<function=execute_bash>\n<parameter=command>ls -la /workspace/astropy__astropy__5.1</parameter>\n</function>",
}
],
},
{
'role': 'user',
'content': [
{
'type': 'text',
'text': 'EXECUTION RESULT of [execute_bash]:\nls -la /workspace/astropy__astropy__5.1\r\nls: /workspace/astropy__astropy__5.1: Bad file descriptor\r\nlrwxrwxrwx 1 root root 8 Oct 28 21:58 /workspace/astropy__astropy__5.1 -> /testbed[Python Interpreter: /opt/miniconda3/envs/testbed/bin/python]\nroot@openhands-workspace:/workspace/astropy__astropy__5.1 # \n[Command finished with exit code 0]',
}
],
},
{
'role': 'assistant',
'content': [
{
'type': 'text',
'text': "I see there's a symlink. Let's explore the actual directory:\n\n<function=execute_bash>\n<parameter=command>ls -la /testbed</parameter>\n</function>",
}
],
},
{
'role': 'user',
'content': [
{
'type': 'text',
'text': 'EXECUTION RESULT of [execute_bash]:\nSOME OBSERVATION',
}
],
},
{
'role': 'assistant',
'content': [
{
'type': 'text',
'text': "Let's look at the source code file mentioned in the PR description:\n\n<function=str_replace_editor>\n<parameter=command>view</parameter>\n<parameter=path>/testbed/astropy/io/fits/card.py</parameter>\n</function>",
}
],
},
{
'role': 'user',
'content': [
{
'type': 'text',
'text': "EXECUTION RESULT of [str_replace_editor]:\nHere's the result of running `cat -n` on /testbed/astropy/io/fits/card.py:\n 1\t# Licensed under a 3-clause BSD style license - see PYFITS.rst...VERY LONG TEXT",
}
],
},
]
FNCALL_RESPONSE_MESSAGE = {
'content': [
{
'type': 'text',
'text': 'Let me search for the `_format_float` method mentioned in the PR description:',
}
],
'role': 'assistant',
'tool_calls': [
{
'index': 1,
'function': {
'arguments': '{"command": "grep -n \\"_format_float\\" /testbed/astropy/io/fits/card.py"}',
'name': 'execute_bash',
},
'id': 'toolu_04',
'type': 'function',
}
],
}
NON_FNCALL_RESPONSE_MESSAGE = {
'content': [
{
'type': 'text',
'text': 'Let me search for the `_format_float` method mentioned in the PR description:\n\n<function=execute_bash>\n<parameter=command>grep -n "_format_float" /testbed/astropy/io/fits/card.py</parameter>\n</function>',
}
],
'role': 'assistant',
}
@pytest.mark.parametrize(
'tool_calls, expected',
[
# Original test case
(
FNCALL_RESPONSE_MESSAGE['tool_calls'],
"""<function=execute_bash>
<parameter=command>grep -n "_format_float" /testbed/astropy/io/fits/card.py</parameter>
</function>""",
),
# Test case with multiple parameters
(
[
{
'index': 1,
'function': {
'arguments': '{"command": "view", "path": "/test/file.py", "view_range": [1, 10]}',
'name': 'str_replace_editor',
},
'id': 'test_id',
'type': 'function',
}
],
"""<function=str_replace_editor>
<parameter=command>view</parameter>
<parameter=path>/test/file.py</parameter>
<parameter=view_range>[1, 10]</parameter>
</function>""",
),
],
)
def test_convert_tool_call_to_string(tool_calls, expected):
assert len(tool_calls) == 1
converted = convert_tool_call_to_string(tool_calls[0])
print(converted)
assert converted == expected
def test_convert_fncall_messages_to_non_fncall_messages():
converted_non_fncall = convert_fncall_messages_to_non_fncall_messages(
FNCALL_MESSAGES, FNCALL_TOOLS
)
assert converted_non_fncall == NON_FNCALL_MESSAGES
def test_convert_non_fncall_messages_to_fncall_messages():
converted = convert_non_fncall_messages_to_fncall_messages(
NON_FNCALL_MESSAGES, FNCALL_TOOLS
)
print(json.dumps(converted, indent=2))
assert converted == FNCALL_MESSAGES
def test_two_way_conversion_nonfn_to_fn_to_nonfn():
non_fncall_copy = copy.deepcopy(NON_FNCALL_MESSAGES)
converted_fncall = convert_non_fncall_messages_to_fncall_messages(
NON_FNCALL_MESSAGES, FNCALL_TOOLS
)
assert (
non_fncall_copy == NON_FNCALL_MESSAGES
) # make sure original messages are not modified
assert converted_fncall == FNCALL_MESSAGES
fncall_copy = copy.deepcopy(FNCALL_MESSAGES)
converted_non_fncall = convert_fncall_messages_to_non_fncall_messages(
FNCALL_MESSAGES, FNCALL_TOOLS
)
assert (
fncall_copy == FNCALL_MESSAGES
) # make sure original messages are not modified
assert converted_non_fncall == NON_FNCALL_MESSAGES
def test_two_way_conversion_fn_to_nonfn_to_fn():
fncall_copy = copy.deepcopy(FNCALL_MESSAGES)
converted_non_fncall = convert_fncall_messages_to_non_fncall_messages(
FNCALL_MESSAGES, FNCALL_TOOLS
)
assert (
fncall_copy == FNCALL_MESSAGES
) # make sure original messages are not modified
assert converted_non_fncall == NON_FNCALL_MESSAGES
non_fncall_copy = copy.deepcopy(NON_FNCALL_MESSAGES)
converted_fncall = convert_non_fncall_messages_to_fncall_messages(
NON_FNCALL_MESSAGES, FNCALL_TOOLS
)
assert (
non_fncall_copy == NON_FNCALL_MESSAGES
) # make sure original messages are not modified
assert converted_fncall == FNCALL_MESSAGES
def test_infer_fncall_on_noncall_model():
messages_for_llm_inference = convert_fncall_messages_to_non_fncall_messages(
FNCALL_MESSAGES, FNCALL_TOOLS
)
assert messages_for_llm_inference == NON_FNCALL_MESSAGES
# Mock LLM inference
response_message_from_llm_inference = NON_FNCALL_RESPONSE_MESSAGE
# Convert back to fncall messages to hand back to the agent
# so agent is model-agnostic
all_nonfncall_messages = NON_FNCALL_MESSAGES + [response_message_from_llm_inference]
converted_fncall_messages = convert_non_fncall_messages_to_fncall_messages(
all_nonfncall_messages, FNCALL_TOOLS
)
assert converted_fncall_messages == FNCALL_MESSAGES + [FNCALL_RESPONSE_MESSAGE]
assert converted_fncall_messages[-1] == FNCALL_RESPONSE_MESSAGE
def test_convert_from_multiple_tool_calls_to_single_tool_call_messages():
# Test case with multiple tool calls in one message
input_messages = [
{
'role': 'assistant',
'content': 'Let me help you with that.',
'tool_calls': [
{
'id': 'call1',
'type': 'function',
'function': {'name': 'func1', 'arguments': '{}'},
},
{
'id': 'call2',
'type': 'function',
'function': {'name': 'func2', 'arguments': '{}'},
},
],
},
{
'role': 'tool',
'tool_call_id': 'call1',
'content': 'Result 1',
'name': 'func1',
},
{
'role': 'tool',
'tool_call_id': 'call2',
'content': 'Result 2',
'name': 'func2',
},
{
'role': 'assistant',
'content': 'Test again',
'tool_calls': [
{
'id': 'call3',
'type': 'function',
'function': {'name': 'func3', 'arguments': '{}'},
},
{
'id': 'call4',
'type': 'function',
'function': {'name': 'func4', 'arguments': '{}'},
},
],
},
{
'role': 'tool',
'tool_call_id': 'call3',
'content': 'Result 3',
'name': 'func3',
},
{
'role': 'tool',
'tool_call_id': 'call4',
'content': 'Result 4',
'name': 'func4',
},
]
expected_output = [
{
'role': 'assistant',
'content': 'Let me help you with that.',
'tool_calls': [
{
'id': 'call1',
'type': 'function',
'function': {'name': 'func1', 'arguments': '{}'},
}
],
},
{
'role': 'tool',
'tool_call_id': 'call1',
'content': 'Result 1',
'name': 'func1',
},
{
'role': 'assistant',
'content': '',
'tool_calls': [
{
'id': 'call2',
'type': 'function',
'function': {'name': 'func2', 'arguments': '{}'},
}
],
},
{
'role': 'tool',
'tool_call_id': 'call2',
'content': 'Result 2',
'name': 'func2',
},
{
'role': 'assistant',
'content': 'Test again',
'tool_calls': [
{
'id': 'call3',
'type': 'function',
'function': {'name': 'func3', 'arguments': '{}'},
}
],
},
{
'role': 'tool',
'tool_call_id': 'call3',
'content': 'Result 3',
'name': 'func3',
},
{
'role': 'assistant',
'content': '',
'tool_calls': [
{
'id': 'call4',
'type': 'function',
'function': {'name': 'func4', 'arguments': '{}'},
}
],
},
{
'role': 'tool',
'tool_call_id': 'call4',
'content': 'Result 4',
'name': 'func4',
},
]
result = convert_from_multiple_tool_calls_to_single_tool_call_messages(
input_messages
)
assert result == expected_output
def test_convert_from_multiple_tool_calls_to_single_tool_call_messages_incomplete():
# Test case with multiple tool calls in one message
input_messages = [
{
'role': 'assistant',
'content': 'Let me help you with that.',
'tool_calls': [
{
'id': 'call1',
'type': 'function',
'function': {'name': 'func1', 'arguments': '{}'},
},
{
'id': 'call2',
'type': 'function',
'function': {'name': 'func2', 'arguments': '{}'},
},
],
},
{
'role': 'tool',
'tool_call_id': 'call1',
'content': 'Result 1',
'name': 'func1',
},
]
with pytest.raises(FunctionCallConversionError):
convert_from_multiple_tool_calls_to_single_tool_call_messages(input_messages)
def test_convert_from_multiple_tool_calls_no_changes_needed():
# Test case where no conversion is needed (single tool call)
input_messages = [
{
'role': 'assistant',
'content': 'Let me help you with that.',
'tool_calls': [
{
'id': 'call1',
'type': 'function',
'function': {'name': 'func1', 'arguments': '{}'},
}
],
},
{
'role': 'tool',
'tool_call_id': 'call1',
'content': 'Result 1',
'name': 'func1',
},
]
result = convert_from_multiple_tool_calls_to_single_tool_call_messages(
input_messages
)
assert result == input_messages
def test_convert_from_multiple_tool_calls_no_tool_calls():
# Test case with no tool calls
input_messages = [
{'role': 'user', 'content': 'Hello'},
{'role': 'assistant', 'content': 'Hi there!'},
]
result = convert_from_multiple_tool_calls_to_single_tool_call_messages(
input_messages
)
assert result == input_messages

View File

@@ -1,32 +1,35 @@
from unittest.mock import Mock, patch
from unittest.mock import Mock
import pytest
from litellm import ModelResponse
from openhands.agenthub.codeact_agent.codeact_agent import CodeActAgent
from openhands.core.config import AgentConfig, LLMConfig
from openhands.events.action import CmdRunAction, MessageAction
from openhands.events.observation import CmdOutputObservation
from openhands.events.action import MessageAction
from openhands.llm.llm import LLM
@pytest.fixture
def mock_llm():
llm = Mock(spec=LLM)
llm.config = LLMConfig(model='claude-3-5-sonnet-20241022', caching_prompt=True)
llm.is_caching_prompt_active.return_value = True
llm = LLM(
LLMConfig(
model='claude-3-5-sonnet-20241022',
api_key='fake',
caching_prompt=True,
)
)
return llm
@pytest.fixture(params=[False, True])
def codeact_agent(mock_llm, request):
@pytest.fixture
def codeact_agent(mock_llm):
config = AgentConfig()
config.function_calling = request.param
return CodeActAgent(mock_llm, config)
def response_mock(content: str):
def response_mock(content: str, tool_call_id: str):
class MockModelResponse:
def __init__(self, content):
def __init__(self, content, tool_call_id):
self.choices = [
{
'message': {
@@ -34,6 +37,7 @@ def response_mock(content: str):
'tool_calls': [
{
'function': {
'id': tool_call_id,
'name': 'execute_bash',
'arguments': '{}',
}
@@ -46,10 +50,10 @@ def response_mock(content: str):
def model_dump(self):
return {'choices': self.choices}
return MockModelResponse(content)
return ModelResponse(**MockModelResponse(content, tool_call_id).model_dump())
def test_get_messages_with_reminder(codeact_agent: CodeActAgent):
def test_get_messages(codeact_agent: CodeActAgent):
# Add some events to history
history = list()
message_action_1 = MessageAction('Initial user message')
@@ -78,13 +82,9 @@ def test_get_messages_with_reminder(codeact_agent: CodeActAgent):
) # System, initial user + user message, agent message, last user message
assert messages[0].content[0].cache_prompt # system message
assert messages[1].role == 'user'
if not codeact_agent.config.function_calling:
assert messages[1].content[0].text.endswith("LET'S START!")
assert messages[1].content[1].text.endswith('Initial user message')
else:
assert messages[1].content[0].text.endswith('Initial user message')
assert messages[1].content[0].text.endswith('Initial user message')
# we add cache breakpoint to the last 3 user messages
assert messages[1].content[-1].cache_prompt
assert messages[1].content[0].cache_prompt
assert messages[3].role == 'user'
assert messages[3].content[0].text == ('Hello, agent!')
@@ -95,14 +95,6 @@ def test_get_messages_with_reminder(codeact_agent: CodeActAgent):
assert messages[5].role == 'user'
assert messages[5].content[0].text.startswith('Laaaaaaaast!')
assert messages[5].content[0].cache_prompt
if not codeact_agent.config.function_calling:
assert (
messages[5]
.content[1]
.text.endswith(
'ENVIRONMENT REMINDER: You have 5 turns left to complete the task. When finished reply with <finish></finish>.'
)
)
def test_get_messages_prompt_caching(codeact_agent: CodeActAgent):
@@ -132,114 +124,20 @@ def test_get_messages_prompt_caching(codeact_agent: CodeActAgent):
) # Including the initial system+user + 2 last user message
# Verify that these are indeed the last two user messages (from start)
if not codeact_agent.config.function_calling:
assert (
cached_user_messages[0].content[0].text.startswith('A chat between')
) # system message
assert cached_user_messages[0].content[0].text.startswith('You are OpenHands agent')
assert cached_user_messages[2].content[0].text.startswith('User message 1')
assert cached_user_messages[3].content[0].text.startswith('User message 1')
def test_get_messages_with_cmd_action(codeact_agent: CodeActAgent):
if codeact_agent.config.function_calling:
pytest.skip('Skipping this test for function calling')
history = list()
# Add a mix of actions and observations
message_action_1 = MessageAction(
"Let's list the contents of the current directory."
)
message_action_1._source = 'user'
history.append(message_action_1)
cmd_action_1 = CmdRunAction('ls -l', thought='List files in current directory')
cmd_action_1._source = 'agent'
cmd_action_1._id = 'cmd_1'
history.append(cmd_action_1)
cmd_observation_1 = CmdOutputObservation(
content='total 0\n-rw-r--r-- 1 user group 0 Jan 1 00:00 file1.txt\n-rw-r--r-- 1 user group 0 Jan 1 00:00 file2.txt',
command_id=cmd_action_1._id,
command='ls -l',
exit_code=0,
)
cmd_observation_1._source = 'user'
history.append(cmd_observation_1)
message_action_2 = MessageAction("Now, let's create a new directory.")
message_action_2._source = 'agent'
history.append(message_action_2)
cmd_action_2 = CmdRunAction('mkdir new_directory', thought='Create a new directory')
cmd_action_2._source = 'agent'
cmd_action_2._id = 'cmd_2'
history.append(cmd_action_2)
cmd_observation_2 = CmdOutputObservation(
content='',
command_id=cmd_action_2._id,
command='mkdir new_directory',
exit_code=0,
)
cmd_observation_2._source = 'user'
history.append(cmd_observation_2)
codeact_agent.reset()
messages = codeact_agent._get_messages(
Mock(history=history, max_iterations=5, iteration=0)
)
# Assert the presence of key elements in the messages
assert (
messages[1]
.content[-1]
.text.startswith("Let's list the contents of the current directory.")
) # user, included in the initial message
if not codeact_agent.config.function_calling:
assert any(
'List files in current directory\n<execute_bash>\nls -l\n</execute_bash>'
in msg.content[0].text
for msg in messages
) # agent
assert any(
'total 0\n-rw-r--r-- 1 user group 0 Jan 1 00:00 file1.txt\n-rw-r--r-- 1 user group 0 Jan 1 00:00 file2.txt'
in msg.content[0].text
for msg in messages
) # user, observation
assert any(
"Now, let's create a new directory." in msg.content[0].text for msg in messages
) # agent
if not codeact_agent.config.function_calling:
assert messages[4].content[1].text.startswith('Create a new directory') # agent
assert any(
'finished with exit code 0' in msg.content[0].text for msg in messages
) # user, observation
assert (
messages[5].content[0].text.startswith('OBSERVATION:\n\n')
) # user, observation
# prompt cache is added to the system message
assert messages[0].content[0].cache_prompt
# and the first initial user message
assert messages[1].content[-1].cache_prompt
# and to the last two user messages
assert messages[3].content[0].cache_prompt
assert messages[5].content[0].cache_prompt
# reminder is added to the last user message
if not codeact_agent.config.function_calling:
assert 'ENVIRONMENT REMINDER: You have 5 turns' in messages[5].content[1].text
def test_prompt_caching_headers(codeact_agent: CodeActAgent):
history = list()
if codeact_agent.config.function_calling:
pytest.skip('Skipping this test for function calling')
# Setup
history.append(MessageAction('Hello, agent!'))
history.append(MessageAction('Hello, user!'))
msg1 = MessageAction('Hello, agent!')
msg1._source = 'user'
history.append(msg1)
msg2 = MessageAction('Hello, user!')
msg2._source = 'agent'
history.append(msg2)
mock_state = Mock()
mock_state.history = history
@@ -253,23 +151,12 @@ def test_prompt_caching_headers(codeact_agent: CodeActAgent):
assert 'extra_headers' in kwargs
assert 'anthropic-beta' in kwargs['extra_headers']
assert kwargs['extra_headers']['anthropic-beta'] == 'prompt-caching-2024-07-31'
# Create a mock response with the expected structure
mock_response = Mock()
mock_response.choices = [Mock()]
mock_response.choices[0].message = Mock()
mock_response.choices[0].message.content = 'Hello! How can I assist you today?'
return mock_response
return ModelResponse(
choices=[{'message': {'content': 'Hello! How can I assist you today?'}}]
)
# Use patch to replace litellm_completion with our check_headers function
with patch('openhands.llm.llm.litellm_completion', side_effect=check_headers):
# Also patch the action parser to return a MessageAction
with patch.object(
codeact_agent.action_parser,
'parse',
return_value=MessageAction('Hello! How can I assist you today?'),
):
# Act
result = codeact_agent.step(mock_state)
codeact_agent.llm._completion_unwrapped = check_headers
result = codeact_agent.step(mock_state)
# Assert
assert isinstance(result, MessageAction)

View File

@@ -12,44 +12,14 @@ from openhands.utils.prompt import PromptManager
def prompt_dir(tmp_path):
# Copy contents from "openhands/agenthub/codeact_agent" to the temp directory
shutil.copytree(
'openhands/agenthub/codeact_agent/prompts/default', tmp_path, dirs_exist_ok=True
'openhands/agenthub/codeact_agent/prompts', tmp_path, dirs_exist_ok=True
)
# Return the temporary directory path
return tmp_path
SAMPLE_AGENT_SKILLS_DOCS = """Sample agent skills documentation"""
@pytest.fixture
def agent_skills_docs():
return SAMPLE_AGENT_SKILLS_DOCS
def test_prompt_manager_without_microagent(prompt_dir, agent_skills_docs):
manager = PromptManager(
prompt_dir, microagent_dir='', agent_skills_docs=agent_skills_docs
)
assert manager.prompt_dir == prompt_dir
assert manager.agent_skills_docs == agent_skills_docs
assert len(manager.microagents) == 0
assert isinstance(manager.get_system_message(), str)
assert (
"A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed answers to the user's questions."
in manager.get_system_message()
)
assert SAMPLE_AGENT_SKILLS_DOCS in manager.get_system_message()
assert isinstance(manager.get_example_user_message(), str)
assert '--- BEGIN OF GUIDELINE ---' not in manager.get_example_user_message()
assert '--- END OF GUIDELINE ---' not in manager.get_example_user_message()
assert "NOW, LET'S START!" in manager.get_example_user_message()
assert 'microagent' not in manager.get_example_user_message()
def test_prompt_manager_with_microagent(prompt_dir, agent_skills_docs):
def test_prompt_manager_with_microagent(prompt_dir):
microagent_name = 'test_microagent'
microagent_content = """
---
@@ -71,19 +41,16 @@ only respond with a message telling them how smart they are
manager = PromptManager(
prompt_dir=prompt_dir,
microagent_dir=os.path.join(prompt_dir, 'micro'),
agent_skills_docs=agent_skills_docs,
)
assert manager.prompt_dir == prompt_dir
assert manager.agent_skills_docs == agent_skills_docs
assert len(manager.microagents) == 1
assert isinstance(manager.get_system_message(), str)
assert (
"A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed answers to the user's questions."
'You are OpenHands agent, a helpful AI assistant that can interact with a computer to solve tasks.'
in manager.get_system_message()
)
assert SAMPLE_AGENT_SKILLS_DOCS in manager.get_system_message()
assert isinstance(manager.get_example_user_message(), str)
@@ -97,23 +64,21 @@ only respond with a message telling them how smart they are
os.remove(os.path.join(prompt_dir, 'micro', f'{microagent_name}.md'))
def test_prompt_manager_file_not_found(prompt_dir, agent_skills_docs):
def test_prompt_manager_file_not_found(prompt_dir):
with pytest.raises(FileNotFoundError):
MicroAgent(os.path.join(prompt_dir, 'micro', 'non_existent_microagent.md'))
def test_prompt_manager_template_rendering(prompt_dir, agent_skills_docs):
def test_prompt_manager_template_rendering(prompt_dir):
# Create temporary template files
with open(os.path.join(prompt_dir, 'system_prompt.j2'), 'w') as f:
f.write('System prompt: {{ agent_skills_docs }}')
f.write('System prompt: bar')
with open(os.path.join(prompt_dir, 'user_prompt.j2'), 'w') as f:
f.write('User prompt: foo')
manager = PromptManager(
prompt_dir, microagent_dir='', agent_skills_docs=agent_skills_docs
)
manager = PromptManager(prompt_dir, microagent_dir='')
assert manager.get_system_message() == f'System prompt: {agent_skills_docs}'
assert manager.get_system_message() == 'System prompt: bar'
assert manager.get_example_user_message() == 'User prompt: foo'
# Clean up temporary files
@@ -121,7 +86,7 @@ def test_prompt_manager_template_rendering(prompt_dir, agent_skills_docs):
os.remove(os.path.join(prompt_dir, 'user_prompt.j2'))
def test_prompt_manager_disabled_microagents(prompt_dir, agent_skills_docs):
def test_prompt_manager_disabled_microagents(prompt_dir):
# Create test microagent files
microagent1_name = 'test_microagent1'
microagent2_name = 'test_microagent2'
@@ -157,7 +122,6 @@ Test microagent 2 content
manager = PromptManager(
prompt_dir=prompt_dir,
microagent_dir=os.path.join(prompt_dir, 'micro'),
agent_skills_docs=agent_skills_docs,
disabled_microagents=['Test Microagent 1'],
)
@@ -169,7 +133,6 @@ Test microagent 2 content
manager = PromptManager(
prompt_dir=prompt_dir,
microagent_dir=os.path.join(prompt_dir, 'micro'),
agent_skills_docs=agent_skills_docs,
)
assert len(manager.microagents) == 2