fix(agent/profile_generator): Improve robustness by leveraging create_chat_completion's parse handling

This commit is contained in:
Reinier van der Leer
2024-02-15 11:48:07 +01:00
parent 52b93dd84e
commit a9b7b175ff

View File

@@ -238,18 +238,14 @@ async def generate_agent_profile_for_task(
prompt = agent_profile_generator.build_prompt(task)
# Call LLM with the string as user input
output = (
await llm_provider.create_chat_completion(
prompt.messages,
model_name=app_config.smart_llm,
functions=prompt.functions,
)
).response
output = await llm_provider.create_chat_completion(
prompt.messages,
model_name=app_config.smart_llm,
functions=prompt.functions,
completion_parser=agent_profile_generator.parse_response_content,
)
# Debug LLM Output
logger.debug(f"AI Config Generator Raw Output: {output}")
logger.debug(f"AI Config Generator Raw Output: {output.response}")
# Parse the output
ai_profile, ai_directives = agent_profile_generator.parse_response_content(output)
return ai_profile, ai_directives
return output.parsed_result