mirror of
https://github.com/All-Hands-AI/OpenHands.git
synced 2026-01-10 07:18:10 -05:00
Config objects as Pydantic BaseModels (#6176)
Co-authored-by: Calvin Smith <calvin@all-hands.dev> Co-authored-by: Graham Neubig <neubig@gmail.com>
This commit is contained in:
@@ -52,30 +52,6 @@ class EvalMetadata(BaseModel):
|
||||
details: dict[str, Any] | None = None
|
||||
condenser_config: CondenserConfig | None = None
|
||||
|
||||
def model_dump(self, *args, **kwargs):
|
||||
dumped_dict = super().model_dump(*args, **kwargs)
|
||||
# avoid leaking sensitive information
|
||||
dumped_dict['llm_config'] = self.llm_config.to_safe_dict()
|
||||
if hasattr(self.condenser_config, 'llm_config'):
|
||||
dumped_dict['condenser_config']['llm_config'] = (
|
||||
self.condenser_config.llm_config.to_safe_dict()
|
||||
)
|
||||
|
||||
return dumped_dict
|
||||
|
||||
def model_dump_json(self, *args, **kwargs):
|
||||
dumped = super().model_dump_json(*args, **kwargs)
|
||||
dumped_dict = json.loads(dumped)
|
||||
# avoid leaking sensitive information
|
||||
dumped_dict['llm_config'] = self.llm_config.to_safe_dict()
|
||||
if hasattr(self.condenser_config, 'llm_config'):
|
||||
dumped_dict['condenser_config']['llm_config'] = (
|
||||
self.condenser_config.llm_config.to_safe_dict()
|
||||
)
|
||||
|
||||
logger.debug(f'Dumped metadata: {dumped_dict}')
|
||||
return json.dumps(dumped_dict)
|
||||
|
||||
|
||||
class EvalOutput(BaseModel):
|
||||
# NOTE: User-specified
|
||||
@@ -98,23 +74,6 @@ class EvalOutput(BaseModel):
|
||||
# Optionally save the input test instance
|
||||
instance: dict[str, Any] | None = None
|
||||
|
||||
def model_dump(self, *args, **kwargs):
|
||||
dumped_dict = super().model_dump(*args, **kwargs)
|
||||
# Remove None values
|
||||
dumped_dict = {k: v for k, v in dumped_dict.items() if v is not None}
|
||||
# Apply custom serialization for metadata (to avoid leaking sensitive information)
|
||||
if self.metadata is not None:
|
||||
dumped_dict['metadata'] = self.metadata.model_dump()
|
||||
return dumped_dict
|
||||
|
||||
def model_dump_json(self, *args, **kwargs):
|
||||
dumped = super().model_dump_json(*args, **kwargs)
|
||||
dumped_dict = json.loads(dumped)
|
||||
# Apply custom serialization for metadata (to avoid leaking sensitive information)
|
||||
if 'metadata' in dumped_dict:
|
||||
dumped_dict['metadata'] = json.loads(self.metadata.model_dump_json())
|
||||
return json.dumps(dumped_dict)
|
||||
|
||||
|
||||
class EvalException(Exception):
|
||||
pass
|
||||
@@ -314,7 +273,7 @@ def update_progress(
|
||||
logger.info(
|
||||
f'Finished evaluation for instance {result.instance_id}: {str(result.test_result)[:300]}...\n'
|
||||
)
|
||||
output_fp.write(json.dumps(result.model_dump()) + '\n')
|
||||
output_fp.write(result.model_dump_json() + '\n')
|
||||
output_fp.flush()
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user