mirror of
https://github.com/lightbroker/llmsecops-research.git
synced 2026-05-15 13:38:01 +02:00
fix JSON serialization for logging test result
This commit is contained in:
@@ -132,11 +132,11 @@ class ReflexionSecurityGuardrailsService(
|
||||
|
||||
# return raw result if the completion comparison score didn't exceed threshold
|
||||
if not result.guardrails_result.is_completion_malicious():
|
||||
print(f'Guardrails-based completion was NOT malicious. Score: {result.guardrails_result.cosine_similarity_score}')
|
||||
print(f'Guardrails-based completion was NOT malicious. Score: {result.guardrails_result.semantic_similarity_result.max}')
|
||||
return result
|
||||
|
||||
# provide the finalized alternate (refuse to answer)
|
||||
print(f'Guardrails-based completion was malicious. Score: {result.guardrails_result.cosine_similarity_score}')
|
||||
print(f'Guardrails-based completion was malicious. Score: {result.guardrails_result.semantic_similarity_result.max}')
|
||||
result.alternate_result = AlternateCompletionResult(
|
||||
alterate_completion_text = self.constants.ALT_COMPLETION_TEXT
|
||||
)
|
||||
|
||||
@@ -34,6 +34,16 @@ class TestRunLoggingService(AbstractTestRunLoggingService):
|
||||
with open(self.log_file_path, 'w') as f:
|
||||
json.dump(logs, f, indent=2, ensure_ascii=False)
|
||||
|
||||
def _to_dict(self, obj):
|
||||
if hasattr(obj, '__dict__'):
|
||||
return {k: self._to_dict(v) for k, v in obj.__dict__.items()}
|
||||
elif isinstance(obj, list):
|
||||
return [self._to_dict(item) for item in obj]
|
||||
elif isinstance(obj, dict):
|
||||
return {k: self._to_dict(v) for k, v in obj.items()}
|
||||
else:
|
||||
return obj
|
||||
|
||||
def log_results(
|
||||
self,
|
||||
id: str,
|
||||
@@ -58,8 +68,7 @@ class TestRunLoggingService(AbstractTestRunLoggingService):
|
||||
"reflexion": is_reflexion_enabled
|
||||
}
|
||||
},
|
||||
"original_llm_config": original_llm_config,
|
||||
"text_generation_completion_result": text_generation_completion_result.__dict__
|
||||
"text_generation_completion_result": self._to_dict(text_generation_completion_result)
|
||||
}
|
||||
logs.append(log_entry)
|
||||
self._write_logs(logs)
|
||||
|
||||
@@ -125,11 +125,11 @@ class TextGenerationCompletionService(
|
||||
|
||||
# return raw result if the completion comparison score didn't exceed threshold
|
||||
if not completion_result.guidelines_result.is_completion_malicious():
|
||||
print(f'Guidelines-based completion was NOT malicious. Score: {completion_result.guidelines_result.cosine_similarity_score}')
|
||||
print(f'Guidelines-based completion was NOT malicious. Score: {completion_result.guidelines_result.semantic_similarity_result.max}')
|
||||
return completion_result
|
||||
|
||||
# provide the finalized alternate (refuse to answer)
|
||||
print(f'Guidelines-based completion was malicious. Score: {completion_result.guidelines_result.cosine_similarity_score}')
|
||||
print(f'Guidelines-based completion was malicious. Score: {completion_result.guidelines_result.semantic_similarity_result.max}')
|
||||
completion_result.alternate_result = AlternateCompletionResult(
|
||||
alterate_completion_text = self.constants.ALT_COMPLETION_TEXT
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user