Files
ai-llm-red-team-handbook/scripts/automation/attack.py
T
shiva108 b3d3bac51f Add practical scripts directory with 400+ tools
- Extracted all code examples from handbook chapters
- Organized into 15 attack categories
- Created shared utilities (api_client, validators, logging, constants)
- Added workflow orchestration scripts
- Implemented install.sh for easy setup
- Renamed all scripts to descriptive functional names
- Added comprehensive README and documentation
- Included pytest test suite and configuration
2026-01-07 11:39:46 +01:00

52 lines
1.3 KiB
Python

#!/usr/bin/env python3
"""
Practical Detection Example
Source: Chapter_32_Automated_Attack_Frameworks
Category: automation
"""
import time
from collections import deque
import argparse
import sys
#!/usr/bin/env python3
"""
Detection Logic for Automated Fuzzing
"""
class RateLimitDetector:
"""Detects rapid-fire requests typical of fuzzers."""
def __init__(self, max_requests: int = 10, window_seconds: int = 60):
self.max_requests = max_requests
self.window = window_seconds
self.timestamps = deque()
def check_request(self) -> bool:
"""
Log a request and check if limit is exceeded.
Returns: True if blocked (limit exceeded), False otherwise.
"""
now = time.time()
# Remove old timestamps
while self.timestamps and self.timestamps[0] < now - self.window:
self.timestamps.popleft()
# Check count
if len(self.timestamps) >= self.max_requests:
return True
self.timestamps.append(now)
return False
if __name__ == "__main__":
detector = RateLimitDetector(max_requests=5, window_seconds=10)
# Simulate burst
for i in range(7):
blocked = detector.check_request()
print(f"Req {i+1}: Blocked? {blocked}")