mirror of
https://github.com/Shiva108/ai-llm-red-team-handbook.git
synced 2026-05-15 05:00:57 +02:00
b3d3bac51f
- Extracted all code examples from handbook chapters - Organized into 15 attack categories - Created shared utilities (api_client, validators, logging, constants) - Added workflow orchestration scripts - Implemented install.sh for easy setup - Renamed all scripts to descriptive functional names - Added comprehensive README and documentation - Included pytest test suite and configuration
42 lines
1.1 KiB
Python
42 lines
1.1 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Test 2: Data Poisoning Simulation
|
|
|
|
Source: Chapter_13_Data_Provenance_and_Supply_Chain_Security
|
|
Category: supply_chain
|
|
"""
|
|
|
|
import argparse
|
|
import sys
|
|
|
|
def test_data_poisoning_detection(clean_dataset, poisoning_ratio=0.01):
|
|
"""
|
|
Inject poisoned examples to test data validation
|
|
"""
|
|
|
|
poisoned_dataset = inject_poisoned_examples(
|
|
clean_dataset,
|
|
poisoning_ratio=poisoning_ratio,
|
|
poison_type='label_flip'
|
|
)
|
|
|
|
# Test if data validation catches poisoned examples
|
|
validation_results = data_validation_pipeline(poisoned_dataset)
|
|
|
|
if validation_results.suspicious_count > 0:
|
|
print(f"✅ Detected {validation_results.suspicious_count} suspicious examples")
|
|
else:
|
|
print(f"❌ Data poisoning not detected!")
|
|
|
|
|
|
def main():
|
|
"""Command-line interface."""
|
|
parser = argparse.ArgumentParser(description=__doc__)
|
|
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
|
|
args = parser.parse_args()
|
|
|
|
# TODO: Add main execution logic
|
|
pass
|
|
|
|
if __name__ == "__main__":
|
|
main() |