mirror of
https://github.com/Shiva108/ai-llm-red-team-handbook.git
synced 2026-05-15 05:00:57 +02:00
b3d3bac51f
- Extracted all code examples from handbook chapters - Organized into 15 attack categories - Created shared utilities (api_client, validators, logging, constants) - Added workflow orchestration scripts - Implemented install.sh for easy setup - Renamed all scripts to descriptive functional names - Added comprehensive README and documentation - Included pytest test suite and configuration
45 lines
1.0 KiB
Python
45 lines
1.0 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Token generation patterns
|
|
|
|
Source: Chapter_15_Data_Leakage_and_Extraction
|
|
Category: data_extraction
|
|
"""
|
|
|
|
import argparse
|
|
import sys
|
|
|
|
def analyze_token_timing(model_api, prompt):
|
|
"""Analyze inter-token delay patterns"""
|
|
delays = []
|
|
tokens = []
|
|
|
|
stream = model_api.stream(prompt)
|
|
last_time = time.time()
|
|
|
|
for token in stream:
|
|
current_time = time.time()
|
|
delay = current_time - last_time
|
|
delays.append(delay)
|
|
tokens.append(token)
|
|
last_time = current_time
|
|
|
|
# Look for patterns
|
|
# - Longer delays may indicate database lookups
|
|
# - Consistent delays suggest cached/memorized content
|
|
# - Spikes may reveal plugin calls or filtering
|
|
|
|
return tokens, delays
|
|
|
|
|
|
def main():
|
|
"""Command-line interface."""
|
|
parser = argparse.ArgumentParser(description=__doc__)
|
|
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
|
|
args = parser.parse_args()
|
|
|
|
# TODO: Add main execution logic
|
|
pass
|
|
|
|
if __name__ == "__main__":
|
|
main() |