mirror of
https://github.com/All-Hands-AI/OpenHands.git
synced 2026-01-10 07:18:10 -05:00
* basic microagent structure * start on jinja * add instructions parser * add action instructions * add history instructions * fix a few issues * fix a few issues * fix issues * fix agent encoding * fix up anon class * prompt to fix errors * less debug info when errors happen * add another traceback * add output to finish * fix math prompt * fix pg prompt * fix up json prompt * fix math prompt * fix math prompt * fix repo prompt * fix up repo explorer * update lock * revert changes to agent_controller * refactor microagent registration a bit * create delegate action * delegation working * add finish action to manager * fix tests * rename microagents registry * rename fn * logspam * add metadata to manager agent * fix message * move repo_explorer * add delegator agent * rename agent_definition * fix up input-output plumbing * fix tests * Update agenthub/micro/math_agent/agent.yaml Co-authored-by: Boxuan Li <liboxuan@connect.hku.hk> * Update agenthub/delegator_agent/prompt.py Co-authored-by: Boxuan Li <liboxuan@connect.hku.hk> * Update agenthub/delegator_agent/prompt.py Co-authored-by: Boxuan Li <liboxuan@connect.hku.hk> * remove prompt.py * fix lint * Update agenthub/micro/postgres_agent/agent.yaml Co-authored-by: Boxuan Li <liboxuan@connect.hku.hk> * Update agenthub/micro/postgres_agent/agent.yaml Co-authored-by: Boxuan Li <liboxuan@connect.hku.hk> * fix error --------- Co-authored-by: Boxuan Li <liboxuan@connect.hku.hk> Co-authored-by: Engel Nyst <enyst@users.noreply.github.com>
83 lines
2.7 KiB
Python
83 lines
2.7 KiB
Python
import json
|
|
from typing import List, Dict
|
|
|
|
from jinja2 import Environment, BaseLoader
|
|
|
|
from opendevin.agent import Agent
|
|
from opendevin.llm.llm import LLM
|
|
from opendevin.state import State
|
|
from opendevin.action import Action, action_from_dict
|
|
from opendevin.exceptions import LLMOutputError
|
|
|
|
from .instructions import instructions
|
|
from .registry import all_microagents
|
|
|
|
|
|
def parse_response(orig_response: str) -> Action:
|
|
json_start = orig_response.find('{')
|
|
json_end = orig_response.rfind('}') + 1
|
|
response = orig_response[json_start:json_end]
|
|
try:
|
|
action_dict = json.loads(response)
|
|
except json.JSONDecodeError as e:
|
|
raise LLMOutputError(
|
|
'Invalid JSON in response. Please make sure the response is a valid JSON object'
|
|
) from e
|
|
action = action_from_dict(action_dict)
|
|
return action
|
|
|
|
|
|
def my_encoder(obj):
|
|
"""
|
|
Encodes objects as dictionaries
|
|
|
|
Parameters:
|
|
- obj (Object): An object that will be converted
|
|
|
|
Returns:
|
|
- dict: If the object can be converted it is returned in dict format
|
|
"""
|
|
if hasattr(obj, 'to_dict'):
|
|
return obj.to_dict()
|
|
|
|
|
|
def to_json(obj, **kwargs):
|
|
"""
|
|
Serialize an object to str format
|
|
"""
|
|
return json.dumps(obj, default=my_encoder, **kwargs)
|
|
|
|
|
|
class MicroAgent(Agent):
|
|
prompt = ''
|
|
agent_definition: Dict = {}
|
|
|
|
def __init__(self, llm: LLM):
|
|
super().__init__(llm)
|
|
if 'name' not in self.agent_definition:
|
|
raise ValueError('Agent definition must contain a name')
|
|
self.name = self.agent_definition['name']
|
|
self.description = self.agent_definition['description'] if 'description' in self.agent_definition else ''
|
|
self.inputs = self.agent_definition['inputs'] if 'inputs' in self.agent_definition else []
|
|
self.outputs = self.agent_definition['outputs'] if 'outputs' in self.agent_definition else []
|
|
self.examples = self.agent_definition['examples'] if 'examples' in self.agent_definition else []
|
|
self.prompt_template = Environment(loader=BaseLoader).from_string(self.prompt)
|
|
self.delegates = all_microagents.copy()
|
|
del self.delegates[self.name]
|
|
|
|
def step(self, state: State) -> Action:
|
|
prompt = self.prompt_template.render(
|
|
state=state,
|
|
instructions=instructions,
|
|
to_json=to_json,
|
|
delegates=self.delegates)
|
|
messages = [{'content': prompt, 'role': 'user'}]
|
|
resp = self.llm.completion(messages=messages)
|
|
action_resp = resp['choices'][0]['message']['content']
|
|
state.num_of_chars += len(prompt) + len(action_resp)
|
|
action = parse_response(action_resp)
|
|
return action
|
|
|
|
def search_memory(self, query: str) -> List[str]:
|
|
return []
|