refactor: Reorganize scripts into dedicated subdirectories for examples, tools, config, and docs.

This commit is contained in:
shiva108
2026-01-07 12:14:03 +01:00
parent 74b1ad5bbd
commit 92f16a543e
31 changed files with 1856 additions and 6998 deletions

View File

@@ -1,115 +0,0 @@
#!/usr/bin/env python3
"""
Fix MD040 linting errors by adding language identifiers to bare code blocks.
Specifically designed for Chapter 14: Prompt Injection
"""
import re
import sys
def detect_code_block_language(content):
"""
Heuristically determine the language of a code block based on its content.
"""
lines = content.strip().split('\n')
# Check for Python indicators
if any(keyword in content for keyword in ['def ', 'import ', 'class ', 'print(', 'return ', ' #', 'if __name__']):
return 'python'
# Check for bash/shell indicators
if any(keyword in content for keyword in ['#!/bin/', 'echo ', 'curl ', 'grep ', 'sed ', 'awk ', '$ ', 'export ']):
return 'bash'
# Check for JSON indicators
if content.strip().startswith('{') and content.strip().endswith('}'):
if '"' in content and ':' in content:
return 'json'
# Check for YAML indicators
if re.search(r'^\w+:\s*$', content, re.MULTILINE) and not any(c in content for c in ['{', '(', 'def ']):
return 'yaml'
# Check for markdown content
if '## ' in content or '### ' in content or '- [ ]' in content:
return 'markdown'
# Check for prompts/examples (conversation format)
if any(indicator in content.lower() for indicator in ['user:', 'assistant:', 'system:', 'prompt:', 'response:']):
return 'text'
# Check for bullet points or list-like content
if content.count('\n- ') > 2 or content.count('\n* ') > 2:
return 'text'
# Check for HTTP/API content
if any(keyword in content for keyword in ['HTTP/', 'GET ', 'POST ', 'Content-Type:', 'Authorization:']):
return 'http'
# Default to text for plain examples
return 'text'
def fix_code_blocks(filepath):
"""
Read a markdown file and add language identifiers to bare code blocks.
"""
with open(filepath, 'r', encoding='utf-8') as f:
content = f.read()
# Pattern to match bare code blocks (``` without language identifier)
# This matches ``` at start of line, optionally followed by newline
pattern = r'^```\s*$'
lines = content.split('\n')
in_code_block = False
code_block_start = -1
code_block_lines = []
fixed_count = 0
i = 0
while i < len(lines):
line = lines[i]
# Check if this is a code fence
if re.match(r'^```+', line):
# Check if it's a bare fence (no language specified)
match = re.match(r'^(```+)(\s*)$', line)
if match and not in_code_block:
# Start of a bare code block
in_code_block = True
code_block_start = i
code_block_lines = []
i += 1
continue
elif match and in_code_block:
# End of code block - analyze and fix
code_content = '\n'.join(code_block_lines)
language = detect_code_block_language(code_content)
# Replace the opening fence with language identifier
lines[code_block_start] = f'```{language}'
fixed_count += 1
in_code_block = False
code_block_start = -1
code_block_lines = []
elif in_code_block:
code_block_lines.append(line)
i += 1
# Write back
with open(filepath, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
return fixed_count
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python fix_md040.py <markdown_file>")
sys.exit(1)
filepath = sys.argv[1]
count = fix_code_blocks(filepath)
print(f"Fixed {count} bare code blocks in {filepath}")

View File

@@ -1,309 +1,108 @@
# AI LLM Red Team Handbook - Practical Scripts
# AI LLM Red Team Handbook - Scripts
This directory contains 386 practical, production-ready scripts extracted from the AI LLM Red Team Handbook chapters. All scripts are organized by attack category and technique, providing immediately usable tools for LLM security assessments.
Production-ready security testing scripts for LLM applications, extracted from the AI LLM Red Team Handbook.
## 📚 Quick Links
- **[Full Documentation](docs/README.md)** - Complete guide to all scripts
- **[Quick Start Guide](docs/QUICKSTART.md)** - Get started in minutes
- **[Testing Guide](docs/TESTING_GUIDE.md)** - Comprehensive testing framework
## 🚀 Quick Start
```bash
# Install dependencies
./tools/install.sh
# Activate environment
source venv/bin/activate
# Run a test
python3 prompt_injection/chapter_14_prompt_injection_01_prompt_injection.py --help
```
## 📁 Directory Structure
```
scripts/
├── automation/ 4 scripts - Attack orchestration and fuzzing
├── compliance/ 16 scripts - Standards, regulations, and best practices
├── data_extraction/ 53 scripts - Data leakage and extraction techniques
├── evasion/ 14 scripts - Filter bypass and obfuscation methods
├── jailbreak/ 21 scripts - Guardrail bypass and jailbreak techniques
├── model_attacks/ 23 scripts - Model theft, DoS, and adversarial ML
├── multimodal/ 15 scripts - Cross-modal and multimodal attacks
├── plugin_exploitation/ 128 scripts - Plugin, API, and function calling exploits
├── post_exploitation/ 6 scripts - Persistence and advanced chaining
├── prompt_injection/ 41 scripts - Prompt injection attacks and templates
├── rag_attacks/ 13 scripts - RAG poisoning and retrieval manipulation
├── reconnaissance/ 2 scripts - LLM fingerprinting and discovery
├── social_engineering/ 8 scripts - Social engineering techniques for LLMs
├── supply_chain/ 29 scripts - Supply chain and provenance attacks
├── utils/ 13 scripts - Common utilities and helpers
── workflows/ - End-to-end attack workflows
├── docs/ # Documentation
├── config/ # Configuration files
├── tools/ # Development & validation tools
├── examples/ # Example scripts
├── tests/ # Testing framework
├── logs/ # Runtime logs
├── automation/ # Attack orchestration (4 scripts)
├── compliance/ # Security standards (16 scripts)
├── data_extraction/ # Data leakage techniques (53 scripts)
├── evasion/ # Filter bypass methods (14 scripts)
├── jailbreak/ # Guardrail bypasses (21 scripts)
├── model_attacks/ # Model theft & DoS (23 scripts)
├── multimodal/ # Cross-modal attacks (15 scripts)
├── plugin_exploitation/ # Plugin/API exploits (128 scripts)
── post_exploitation/ # Persistence techniques (6 scripts)
├── prompt_injection/ # Prompt injection (41 scripts)
├── rag_attacks/ # RAG poisoning (13 scripts)
├── reconnaissance/ # LLM fingerprinting (2 scripts)
├── social_engineering/ # Manipulation techniques (8 scripts)
├── supply_chain/ # Supply chain attacks (29 scripts)
└── utils/ # Common utilities (13 scripts)
```
## 🚀 Quick Start
**Total:** 386+ production-ready scripts across 15 attack categories
### Automated Installation
## 🔧 Configuration
The easiest way to get started:
- **Python:** 3.8+
- **Dependencies:** See [config/requirements.txt](config/requirements.txt)
- **Testing:** See [config/pytest.ini](config/pytest.ini)
## 🧪 Testing
Run comprehensive tests:
```bash
# Run the installation script
cd /home/e/Desktop/ai-llm-red-team-handbook/scripts
./install.sh
# This will:
# - Check Python 3.8+ installation
# - Create a virtual environment (venv/)
# - Install all dependencies from requirements.txt
# - Make all scripts executable
# - Run verification tests
cd tests
./run_comprehensive_tests.sh
```
### Manual Installation
See [docs/TESTING_GUIDE.md](docs/TESTING_GUIDE.md) for detailed testing options.
```bash
# Install Python dependencies
pip install -r requirements.txt
## 🛠️ Development Tools
# Make scripts executable
chmod +x automation/*.py
chmod +x reconnaissance/*.py
# ... etc for other categories
```
Located in `tools/`:
### Basic Usage
- **`install.sh`** - Automated installation script
- **`validation/`** - Health checks, link validation, linting
- **`build/`** - Script generation and build tools (archived)
```bash
# Example: Run a prompt injection script
python3 prompt_injection/chapter_14_prompt_injection_01_prompt_injection.py --help
## 📖 Examples
# Example: Tokenization analysis
python3 utils/chapter_09_llm_architectures_and_system_components_01_utils.py
Example implementations in `examples/`:
# Example: RAG poisoning attack
python3 rag_attacks/chapter_12_retrieval_augmented_generation_rag_pipelines_01_rag_attacks.py
```
- **`c2_server_elite.py`** - Advanced C2 server demonstration
- **`runner.py`** - Test runner framework
- **`models.py`** - Data models for testing
## 📚 Category Overview
## ⚠️ Security Warning
### Reconnaissance (`reconnaissance/`)
- LLM system fingerprinting
- API discovery and enumeration
- Architecture detection
**Source Chapters:** 31
### Prompt Injection (`prompt_injection/`)
- Basic injection techniques
- Context overflow attacks
- System prompt leakage
- Multi-turn injection chains
**Source Chapters:** 14
### Data Extraction (`data_extraction/`)
- PII extraction techniques
- Memory dumping
- Training data extraction
- API key leakage
**Source Chapters:** 15
### Jailbreaks (`jailbreak/`)
- Character roleplay bypasses
- DAN (Do Anything Now) techniques
- Encoding-based bypasses
- Multi-language jailbreaks
**Source Chapters:** 16
### Plugin Exploitation (`plugin_exploitation/`)
- Command injection via plugins
- Function calling hijacks
- API authentication bypass
- Third-party integration exploits
**Source Chapters:** 11, 17 (01-06)
### RAG Attacks (`rag_attacks/`)
- Vector database poisoning
- Retrieval manipulation
- Indirect injection via RAG
- Embedding space attacks
**Source Chapters:** 12
### Evasion (`evasion/`)
- Token smuggling
- Filter bypass techniques
- Obfuscation methods
- Adversarial input crafting
**Source Chapters:** 18, 34
### Model Attacks (`model_attacks/`)
- Model extraction/theft
- Membership inference
- DoS and resource exhaustion
- Adversarial examples
- Model inversion
- Backdoor attacks
**Source Chapters:** 19, 20, 21, 25, 29, 30
### Multimodal Attacks (`multimodal/`)
- Cross-modal injection
- Image-based exploits
- Audio/video manipulation
- Multi-modal evasion
**Source Chapters:** 22
### Post-Exploitation (`post_exploitation/`)
- Persistence mechanisms
- Advanced attack chaining
- Privilege escalation
- Lateral movement
**Source Chapters:** 23, 30, 35
### Social Engineering (`social_engineering/`)
- LLM manipulation techniques
- Persuasion and influence
- Trust exploitation
- Phishing via AI
**Source Chapters:** 24
### Automation (`automation/`)
- Attack fuzzing frameworks
- Orchestration tools
- Batch testing utilities
- Report generation
**Source Chapters:** 32, 33
### Supply Chain (`supply_chain/`)
- Dependency attacks
- Model provenance verification
- Package poisoning
- Supply chain reconnaissance
**Source Chapters:** 13, 26
### Compliance (`compliance/`)
- Bug bounty automation
- Standard compliance checking
- Audit utilities
- Best practice verification
**Source Chapters:** 39, 40, 41
### Utilities (`utils/`)
- Tokenization analysis
- Model loading helpers
- API utilities
- Evidence logging
- Generic helpers
**Source Chapters:** 9, 10, 27, 28, 42, 44
## 🔗 Workflow Scripts
End-to-end attack scenarios combining multiple techniques:
- `workflows/full_assessment.py` - Complete LLM security assessment
- `workflows/plugin_pentest.py` - Plugin-focused penetration test
- `workflows/data_leak_test.py` - Data leakage assessment
- `workflows/rag_exploitation.py` - RAG-specific attack chain
## 🛠️ Common Patterns
### Pattern 1: Reconnaissance → Injection → Extraction
```bash
# 1. Fingerprint the target
python3 reconnaissance/chapter_31_ai_system_reconnaissance_01_reconnaissance.py --target https://api.example.com
# 2. Test prompt injection
python3 prompt_injection/chapter_14_prompt_injection_01_prompt_injection.py --payload "Ignore previous..."
# 3. Extract data
python3 data_extraction/chapter_15_data_leakage_and_extraction_01_data_extraction.py
```
### Pattern 2: Plugin Discovery → Exploitation
```bash
# 1. Enumerate plugins
python3 plugin_exploitation/chapter_17_01_fundamentals_and_architecture_01_plugin_exploitation.py
# 2. Test authentication
python3 plugin_exploitation/chapter_17_02_api_authentication_and_authorization_01_plugin_exploitation.py
# 3. Exploit vulnerabilities
python3 plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_01_plugin_exploitation.py
```
## 📝 Notes
### Script Naming Convention
Scripts follow the naming pattern: `{chapter_name}_{index}_{category}.py`
Example: `chapter_14_prompt_injection_01_prompt_injection.py`
- **Chapter:** 14 (Prompt Injection)
- **Index:** 01 (first code block from that chapter)
- **Category:** prompt_injection
### Customization
Most scripts include:
- Command-line interfaces via `argparse`
- Docstrings explaining purpose and source
- Error handling
- Verbose output options
### Development
To add new scripts:
1. Follow the existing structure in each category
2. Include proper docstrings and source attribution
3. Add CLI interface with argparse
4. Update this README if adding new categories
## 🔐 Security Warning
⚠️ **These scripts are for authorized security testing only.**
**These scripts are for authorized security testing only.**
- Only use against systems you own or have explicit permission to test
- Follow all applicable laws and regulations
- Respect rules of engagement and scope boundaries
- Document all activities for evidence and audit trails
## 📖 Additional Resources
- **Main Handbook:** `/docs/` directory
- **Field Manuals:** `/docs/field_manuals/`
- **Original Chapters:** Reference source chapters listed in each script's docstring
## 🤝 Contributing
When adding new scripts:
1. Extract from handbook chapters
2. Classify into appropriate category
3. Ensure CLI interface exists
4. Test before committing
5. Update this README
## 📄 License
Refer to the main repository license.
## 🤝 Contributing
See the [full documentation](docs/README.md) for contribution guidelines.
---
**Generated from:** AI LLM Red Team Handbook (53 chapters, 386 code blocks)
**Source:** AI LLM Red Team Handbook
**Scripts:** 386+ from 53 handbook chapters
**Last Updated:** 2026-01-07

File diff suppressed because one or more lines are too long

103
scripts/config/README.md Normal file
View File

@@ -0,0 +1,103 @@
# Configuration Files
This directory contains configuration files for the AI LLM Red Team scripts.
## 📄 Files
### `requirements.txt`
Python package dependencies for all scripts.
**Install dependencies:**
```bash
pip install -r config/requirements.txt
```
**Key packages:**
- `requests` - HTTP client library
- `transformers` - HuggingFace transformers (optional)
- `tiktoken` - OpenAI tokenization (optional)
### `pytest.ini`
pytest configuration for running tests.
**Run tests:**
```bash
pytest tests/ -v
```
**Configuration includes:**
- Test discovery patterns
- Output formatting
- Coverage settings
- Timeout configurations
## 🔧 Usage
### Installation
```bash
# Automated (recommended)
./tools/install.sh
# Manual
pip install -r config/requirements.txt
```
### Testing
```bash
# Run all tests
pytest tests/ -v
# Run with coverage
pytest tests/ --cov=. --cov-report=html
# Run specific tests
pytest tests/test_prompt_injection.py -v
```
## 📝 Customization
### Adding Dependencies
Edit `requirements.txt` and add new packages:
```text
# Example: adding new packages
numpy>=1.21.0
pandas>=1.3.0
```
Then reinstall:
```bash
pip install -r config/requirements.txt
```
### Test Configuration
Edit `pytest.ini` to customize test behavior:
```ini
[pytest]
testpaths = tests
python_files = test_*.py
python_classes = Test*
python_functions = test_*
```
## 🔗 Related
- [Installation Guide](../tools/README.md)
- [Testing Guide](../docs/TESTING_GUIDE.md)
- [Quick Start](../docs/QUICKSTART.md)
---
**Last Updated:** 2026-01-07

View File

@@ -6,7 +6,7 @@
```bash
cd /home/e/Desktop/ai-llm-red-team-handbook/scripts
./install.sh
./tools/install.sh
```
The installation script will:
@@ -28,7 +28,7 @@ python3 -m venv venv
source venv/bin/activate
# Install dependencies
pip install -r requirements.txt
pip install -r config/requirements.txt
# Make scripts executable
chmod +x workflows/*.py
@@ -110,7 +110,7 @@ If the virtual environment fails to activate:
rm -rf venv
python3 -m venv venv
source venv/bin/activate
pip install -r requirements.txt
pip install -r config/requirements.txt
```
### Missing Dependencies

309
scripts/docs/README.md Normal file
View File

@@ -0,0 +1,309 @@
# AI LLM Red Team Handbook - Practical Scripts
This directory contains 386 practical, production-ready scripts extracted from the AI LLM Red Team Handbook chapters. All scripts are organized by attack category and technique, providing immediately usable tools for LLM security assessments.
## 📁 Directory Structure
```
scripts/
├── automation/ 4 scripts - Attack orchestration and fuzzing
├── compliance/ 16 scripts - Standards, regulations, and best practices
├── data_extraction/ 53 scripts - Data leakage and extraction techniques
├── evasion/ 14 scripts - Filter bypass and obfuscation methods
├── jailbreak/ 21 scripts - Guardrail bypass and jailbreak techniques
├── model_attacks/ 23 scripts - Model theft, DoS, and adversarial ML
├── multimodal/ 15 scripts - Cross-modal and multimodal attacks
├── plugin_exploitation/ 128 scripts - Plugin, API, and function calling exploits
├── post_exploitation/ 6 scripts - Persistence and advanced chaining
├── prompt_injection/ 41 scripts - Prompt injection attacks and templates
├── rag_attacks/ 13 scripts - RAG poisoning and retrieval manipulation
├── reconnaissance/ 2 scripts - LLM fingerprinting and discovery
├── social_engineering/ 8 scripts - Social engineering techniques for LLMs
├── supply_chain/ 29 scripts - Supply chain and provenance attacks
├── utils/ 13 scripts - Common utilities and helpers
└── workflows/ - End-to-end attack workflows
```
## 🚀 Quick Start
### Automated Installation
The easiest way to get started:
```bash
# Run the installation script
cd /home/e/Desktop/ai-llm-red-team-handbook/scripts
./install.sh
# This will:
# - Check Python 3.8+ installation
# - Create a virtual environment (venv/)
# - Install all dependencies from requirements.txt
# - Make all scripts executable
# - Run verification tests
```
### Manual Installation
```bash
# Install Python dependencies
pip install -r config/requirements.txt
# Make scripts executable
chmod +x automation/*.py
chmod +x reconnaissance/*.py
# ... etc for other categories
```
### Basic Usage
```bash
# Example: Run a prompt injection script
python3 prompt_injection/chapter_14_prompt_injection_01_prompt_injection.py --help
# Example: Tokenization analysis
python3 utils/chapter_09_llm_architectures_and_system_components_01_utils.py
# Example: RAG poisoning attack
python3 rag_attacks/chapter_12_retrieval_augmented_generation_rag_pipelines_01_rag_attacks.py
```
## 📚 Category Overview
### Reconnaissance (`reconnaissance/`)
- LLM system fingerprinting
- API discovery and enumeration
- Architecture detection
**Source Chapters:** 31
### Prompt Injection (`prompt_injection/`)
- Basic injection techniques
- Context overflow attacks
- System prompt leakage
- Multi-turn injection chains
**Source Chapters:** 14
### Data Extraction (`data_extraction/`)
- PII extraction techniques
- Memory dumping
- Training data extraction
- API key leakage
**Source Chapters:** 15
### Jailbreaks (`jailbreak/`)
- Character roleplay bypasses
- DAN (Do Anything Now) techniques
- Encoding-based bypasses
- Multi-language jailbreaks
**Source Chapters:** 16
### Plugin Exploitation (`plugin_exploitation/`)
- Command injection via plugins
- Function calling hijacks
- API authentication bypass
- Third-party integration exploits
**Source Chapters:** 11, 17 (01-06)
### RAG Attacks (`rag_attacks/`)
- Vector database poisoning
- Retrieval manipulation
- Indirect injection via RAG
- Embedding space attacks
**Source Chapters:** 12
### Evasion (`evasion/`)
- Token smuggling
- Filter bypass techniques
- Obfuscation methods
- Adversarial input crafting
**Source Chapters:** 18, 34
### Model Attacks (`model_attacks/`)
- Model extraction/theft
- Membership inference
- DoS and resource exhaustion
- Adversarial examples
- Model inversion
- Backdoor attacks
**Source Chapters:** 19, 20, 21, 25, 29, 30
### Multimodal Attacks (`multimodal/`)
- Cross-modal injection
- Image-based exploits
- Audio/video manipulation
- Multi-modal evasion
**Source Chapters:** 22
### Post-Exploitation (`post_exploitation/`)
- Persistence mechanisms
- Advanced attack chaining
- Privilege escalation
- Lateral movement
**Source Chapters:** 23, 30, 35
### Social Engineering (`social_engineering/`)
- LLM manipulation techniques
- Persuasion and influence
- Trust exploitation
- Phishing via AI
**Source Chapters:** 24
### Automation (`automation/`)
- Attack fuzzing frameworks
- Orchestration tools
- Batch testing utilities
- Report generation
**Source Chapters:** 32, 33
### Supply Chain (`supply_chain/`)
- Dependency attacks
- Model provenance verification
- Package poisoning
- Supply chain reconnaissance
**Source Chapters:** 13, 26
### Compliance (`compliance/`)
- Bug bounty automation
- Standard compliance checking
- Audit utilities
- Best practice verification
**Source Chapters:** 39, 40, 41
### Utilities (`utils/`)
- Tokenization analysis
- Model loading helpers
- API utilities
- Evidence logging
- Generic helpers
**Source Chapters:** 9, 10, 27, 28, 42, 44
## 🔗 Workflow Scripts
End-to-end attack scenarios combining multiple techniques:
- `workflows/full_assessment.py` - Complete LLM security assessment
- `workflows/plugin_pentest.py` - Plugin-focused penetration test
- `workflows/data_leak_test.py` - Data leakage assessment
- `workflows/rag_exploitation.py` - RAG-specific attack chain
## 🛠️ Common Patterns
### Pattern 1: Reconnaissance → Injection → Extraction
```bash
# 1. Fingerprint the target
python3 reconnaissance/chapter_31_ai_system_reconnaissance_01_reconnaissance.py --target https://api.example.com
# 2. Test prompt injection
python3 prompt_injection/chapter_14_prompt_injection_01_prompt_injection.py --payload "Ignore previous..."
# 3. Extract data
python3 data_extraction/chapter_15_data_leakage_and_extraction_01_data_extraction.py
```
### Pattern 2: Plugin Discovery → Exploitation
```bash
# 1. Enumerate plugins
python3 plugin_exploitation/chapter_17_01_fundamentals_and_architecture_01_plugin_exploitation.py
# 2. Test authentication
python3 plugin_exploitation/chapter_17_02_api_authentication_and_authorization_01_plugin_exploitation.py
# 3. Exploit vulnerabilities
python3 plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_01_plugin_exploitation.py
```
## 📝 Notes
### Script Naming Convention
Scripts follow the naming pattern: `{chapter_name}_{index}_{category}.py`
Example: `chapter_14_prompt_injection_01_prompt_injection.py`
- **Chapter:** 14 (Prompt Injection)
- **Index:** 01 (first code block from that chapter)
- **Category:** prompt_injection
### Customization
Most scripts include:
- Command-line interfaces via `argparse`
- Docstrings explaining purpose and source
- Error handling
- Verbose output options
### Development
To add new scripts:
1. Follow the existing structure in each category
2. Include proper docstrings and source attribution
3. Add CLI interface with argparse
4. Update this README if adding new categories
## 🔐 Security Warning
⚠️ **These scripts are for authorized security testing only.**
- Only use against systems you own or have explicit permission to test
- Follow all applicable laws and regulations
- Respect rules of engagement and scope boundaries
- Document all activities for evidence and audit trails
## 📖 Additional Resources
- **Main Handbook:** `/docs/` directory
- **Field Manuals:** `/docs/field_manuals/`
- **Original Chapters:** Reference source chapters listed in each script's docstring
## 🤝 Contributing
When adding new scripts:
1. Extract from handbook chapters
2. Classify into appropriate category
3. Ensure CLI interface exists
4. Test before committing
5. Update this README
## 📄 License
Refer to the main repository license.
---
**Generated from:** AI LLM Red Team Handbook (53 chapters, 386 code blocks)
**Last Updated:** 2026-01-07

View File

@@ -0,0 +1,365 @@
# Comprehensive Testing Guide
## Quick Start
### Option 1: Run Full Comprehensive Test Suite (Recommended)
```bash
cd /home/e/Desktop/ai-llm-red-team-handbook/scripts
./test./tests/run_comprehensive_tests.sh
```
This will run ALL test phases (2-4 hours):
- ✅ Environment validation
- ✅ Static analysis & syntax checking
- ✅ Functional testing (all 386+ scripts)
- ✅ Tool integration verification
- ✅ Existing pytest suite
- ✅ Performance benchmarks
- ✅ Compliance checks (OWASP, MITRE, NIST, Ethical)
- ✅ LLM-powered validation
- ✅ Comprehensive report generation
### Option 2: Quick Test (5-10 minutes)
```bash
cd /home/e/Desktop/ai-llm-red-team-handbook/scripts
# Test specific category
python3 tests/test_orchestrator.py \
--category prompt_injection \
--test-type functional \
--verbose
# Test with LLM validation
python3 tests/test_orchestrator.py \
--category reconnaissance \
--llm-endpoint http://localhost:11434 \
--llm-validate \
--verbose
```
### Option 3: Specific Test Types
#### Functional Testing Only
```bash
python3 tests/test_orchestrator.py \
--all \
--test-type functional \
--generate-report \
--output functional_report.json
```
#### Integration Testing Only
```bash
python3 tests/test_orchestrator.py \
--all \
--test-type integration \
--verbose
```
#### Performance Testing Only
```bash
python3 tests/test_orchestrator.py \
--all \
--test-type performance \
--generate-report \
--format html \
--output performance_report.html
```
#### Compliance Testing
```bash
# Test OWASP LLM Top 10 compliance
python3 tests/test_orchestrator.py \
--all \
--test-type compliance \
--standard OWASP-LLM-TOP-10 \
--verbose
# Test MITRE ATLAS compliance
python3 tests/test_orchestrator.py \
--all \
--test-type compliance \
--standard MITRE-ATLAS \
--verbose
# Test NIST AI RMF compliance
python3 tests/test_orchestrator.py \
--all \
--test-type compliance \
--standard NIST-AI-RMF \
--verbose
```
## Available Test Types
| Test Type | Description | Duration |
| --------------- | -------------------------------------- | --------- |
| **functional** | Syntax, imports, help text validation | 10-15 min |
| **integration** | External tool and dependency checks | 2-3 min |
| **performance** | Response time and resource usage | 5-10 min |
| **compliance** | Standards adherence (OWASP/MITRE/NIST) | 5-10 min |
| **all** | Runs all test types | 20-30 min |
## Report Formats
### JSON Report
```bash
python3 tests/test_orchestrator.py \
--all \
--test-type all \
--generate-report \
--format json \
--output test_report.json
```
### HTML Report (Visual Dashboard)
```bash
python3 tests/test_orchestrator.py \
--all \
--test-type all \
--generate-report \
--format html \
--output test_report.html
# View in browser
xdg-open test_report.html
```
### Executive Summary (Text)
```bash
python3 tests/test_orchestrator.py \
--all \
--test-type all \
--generate-report \
--format summary \
--output executive_summary.txt
# View in terminal
cat executive_summary.txt
```
## Testing Individual Categories
Test scripts from specific attack categories:
```bash
# Reconnaissance scripts
python3 tests/test_orchestrator.py --category reconnaissance --verbose
# Prompt injection scripts
python3 tests/test_orchestrator.py --category prompt_injection --verbose
# Data extraction scripts
python3 tests/test_orchestrator.py --category data_extraction --verbose
# Jailbreak scripts
python3 tests/test_orchestrator.py --category jailbreak --verbose
# Plugin exploitation scripts
python3 tests/test_orchestrator.py --category plugin_exploitation --verbose
# RAG attacks scripts
python3 tests/test_orchestrator.py --category rag_attacks --verbose
# Evasion scripts
python3 tests/test_orchestrator.py --category evasion --verbose
# Model attacks scripts
python3 tests/test_orchestrator.py --category model_attacks --verbose
# Multimodal scripts
python3 tests/test_orchestrator.py --category multimodal --verbose
# Post-exploitation scripts
python3 tests/test_orchestrator.py --category post_exploitation --verbose
# Social engineering scripts
python3 tests/test_orchestrator.py --category social_engineering --verbose
# Automation scripts
python3 tests/test_orchestrator.py --category automation --verbose
# Supply chain scripts
python3 tests/test_orchestrator.py --category supply_chain --verbose
# Compliance scripts
python3 tests/test_orchestrator.py --category compliance --verbose
# Utility scripts
python3 tests/test_orchestrator.py --category utils --verbose
```
## LLM-Powered Testing
Leverage the local LLM for intelligent validation:
```bash
# Test LLM connection first
curl http://localhost:11434/api/tags
# Run with LLM validation
python3 tests/test_orchestrator.py \
--all \
--llm-endpoint http://localhost:11434 \
--llm-validate \
--verbose \
--generate-report \
--format html \
--output llm_validated_report.html
```
The LLM will:
- ✅ Analyze script purpose and implementation
- ✅ Identify potential security concerns
- ✅ Provide code quality ratings
- ✅ Suggest improvements
## Running Existing Pytest Suite
```bash
# Run all tests
pytest tests/ -v
# Run with coverage
pytest tests/ --cov=. --cov-report=html --cov-report=term
# Run specific test file
pytest tests/test_prompt_injection.py -v
# Run in parallel
pytest tests/ -n auto -v
# Run with timeout
pytest tests/ --timeout=60
```
## Troubleshooting
### LLM Endpoint Not Accessible
If you see warnings about LLM endpoint:
```bash
# Check if Ollama/LMStudio is running
curl http://localhost:11434/api/tags
# Start Ollama if needed
ollama serve
# Or use different endpoint
python3 tests/test_orchestrator.py \
--llm-endpoint http://localhost:8080 \
--all
```
### Import Errors
```bash
# Ensure virtual environment is activated
source venv/bin/activate
# Install dependencies
pip install -r config/requirements.txt
# Install test dependencies
pip install pytest pytest-cov pytest-timeout pytest-xdist
```
### Permission Denied
```bash
# Make scripts executable
chmod +x tests/test_orchestrator.py
chmod +x tests/run_comprehensive_tests.sh
```
## Performance Optimization
For faster testing:
```bash
# Test fewer scripts per category (sampling)
python3 tests/test_orchestrator.py --all --test-type functional
# Skip LLM validation for speed
python3 tests/test_orchestrator.py --all --test-type all
# Run pytest in parallel
pytest tests/ -n auto
```
## Continuous Integration
Add to cron for weekly testing:
```bash
# Edit crontab
crontab -e
# Add line for Sunday 2 AM testing
0 2 * * 0 cd /home/e/Desktop/ai-llm-red-team-handbook/scripts && ./tests/run_comprehensive_tests.sh > /tmp/test_results.log 2>&1
```
## Expected Outputs
After running tests, you'll find in `test_reports/`:
```text
test_reports/
├── functional_tests_20260107_120000.json
├── integration_tests_20260107_120500.json
├── performance_20260107_121000.json
├── compliance_OWASP-LLM-TOP-10_20260107_121500.json
├── compliance_MITRE-ATLAS_20260107_122000.json
├── compliance_NIST-AI-RMF_20260107_122500.json
├── compliance_ETHICAL_20260107_123000.json
├── llm_validation_20260107_123500.json
├── comprehensive_report_20260107_124000.html
├── executive_summary_20260107_124000.txt
├── coverage_20260107_124000/
│ └── index.html
├── pytest_20260107_120000.log
└── syntax_check_20260107_120000.log
```
## Success Criteria
**All tests should meet these criteria:**
- 100% syntax validation (all scripts compile)
- > 90% functional test pass rate
- All critical dependencies available
- Performance within acceptable ranges
- Compliance coverage >80% for each standard
- Zero critical security vulnerabilities
## Next Steps
After testing:
1. Review `executive_summary_*.txt` for high-level results
2. Open `comprehensive_report_*.html` in browser for detailed analysis
3. Check `coverage_*/index.html` for code coverage metrics
4. Address any failures found in reports
5. Re-run tests to verify fixes
---
**Total Testing Time:**
- Quick test (single category): 2-5 minutes
- Moderate test (all functional): 15-20 minutes
- Full comprehensive suite: 2-4 hours
**Recommended frequency:** Weekly or before major releases

View File

@@ -0,0 +1,97 @@
# Example Scripts
This directory contains example implementations and demonstration scripts.
## 📜 Scripts
### `c2_server_elite.py` (56 KB)
Advanced Command & Control (C2) server demonstration showing sophisticated attack infrastructure capabilities.
**Features:**
- Multi-protocol support
- Encryption and obfuscation
- Command execution framework
- Advanced payload delivery
**Usage:**
```bash
python3 examples/c2_server_elite.py --help
```
⚠️ **For educational and authorized testing only!**
### `runner.py`
Test runner framework for executing LLM red team test suites.
**Purpose:**
- Orchestrates multiple test executions
- Collects and aggregates results
- Generates JSON reports
**Usage:**
```bash
python3 examples/runner.py
```
Requires:
- `client.py` - LLM client implementation
- `config.py` - Configuration settings
- `models.py` - Data models
### `models.py`
Data models and base classes for LLM security testing.
**Components:**
- `TestResult` - Dataclass for test results
- `BaseTest` - Base class for test implementations
**Usage:**
```python
from examples.models import TestResult, BaseTest
class MyTest(BaseTest):
id_prefix = "TEST"
category = "custom"
def run(self, client):
# Test implementation
return [TestResult(...)]
```
## 🎯 Purpose
These examples demonstrate:
- Advanced attack techniques
- Testing framework patterns
- Data model structures
- Integration approaches
## ⚠️ Security Notice
**These are demonstration scripts for authorized security testing only.**
- Review code before execution
- Only use in controlled environments
- Follow responsible disclosure practices
- Respect all applicable laws and regulations
## 🔗 Related Documentation
- [Main Scripts](../docs/README.md)
- [Testing Guide](../docs/TESTING_GUIDE.md)
- [Quick Start](../docs/QUICKSTART.md)
---
**Note:** These examples are provided for educational purposes and to demonstrate implementation patterns. Adapt them to your specific testing requirements.

View File

@@ -1,98 +0,0 @@
#!/usr/bin/env python3
"""
Extract all code blocks from handbook chapters.
Creates a catalog JSON file mapping chapters to code blocks.
"""
import re
import json
from pathlib import Path
from typing import List, Dict
def extract_code_blocks(markdown_file: Path) -> List[Dict]:
"""Extract all code blocks with metadata from a markdown file."""
try:
content = markdown_file.read_text(encoding='utf-8')
except Exception as e:
print(f"Error reading {markdown_file}: {e}")
return []
# Pattern to match fenced code blocks
pattern = r'```(\w+)\n(.*?)```'
blocks = []
for match in re.finditer(pattern, content, re.DOTALL):
language = match.group(1)
code = match.group(2)
if language in ['python', 'bash', 'sh']:
# Get surrounding context (200 chars before and after)
start = max(0, match.start() - 200)
end = min(len(content), match.end() + 200)
context = content[start:end]
# Extract section header if available
section_pattern = r'##\s+([^\n]+)'
sections = re.findall(section_pattern, content[:match.start()])
current_section = sections[-1] if sections else "Unknown"
blocks.append({
'language': language,
'code': code.strip(),
'context': context,
'section': current_section,
'line_number': content[:match.start()].count('\n') + 1,
'length': len(code.strip().split('\n'))
})
return blocks
def catalog_all_code():
"""Scan all chapters and create catalog."""
base_dir = Path('/home/e/Desktop/ai-llm-red-team-handbook')
docs_dir = base_dir / 'docs'
catalog = {}
# Get all chapter files
chapter_files = sorted(docs_dir.glob('Chapter_*.md'))
print(f"Found {len(chapter_files)} chapter files")
for chapter_file in chapter_files:
chapter_name = chapter_file.stem
print(f"Processing {chapter_name}...")
blocks = extract_code_blocks(chapter_file)
if blocks:
catalog[chapter_name] = {
'file': str(chapter_file),
'python_blocks': len([b for b in blocks if b['language'] == 'python']),
'bash_blocks': len([b for b in blocks if b['language'] in ['bash', 'sh']]),
'total_blocks': len(blocks),
'blocks': blocks
}
print(f" Found {len(blocks)} code blocks ({catalog[chapter_name]['python_blocks']} Python, {catalog[chapter_name]['bash_blocks']} Bash)")
# Save catalog
catalog_file = base_dir / 'scripts' / 'code_catalog.json'
catalog_file.parent.mkdir(exist_ok=True)
catalog_file.write_text(json.dumps(catalog, indent=2))
# Print summary
total_python = sum(d['python_blocks'] for d in catalog.values())
total_bash = sum(d['bash_blocks'] for d in catalog.values())
total_blocks = sum(d['total_blocks'] for d in catalog.values())
print(f"\n{'='*60}")
print(f"Catalog saved to {catalog_file}")
print(f"Total chapters with code: {len(catalog)}")
print(f"Total code blocks: {total_blocks}")
print(f" - Python: {total_python}")
print(f" - Bash/sh: {total_bash}")
print(f"{'='*60}")
return catalog
if __name__ == '__main__':
catalog_all_code()

View File

@@ -1,292 +0,0 @@
#!/usr/bin/env python3
"""
Generate organized, practical scripts from cataloged code blocks.
Creates proper directory structure and Python modules with CLI interfaces.
"""
import json
import re
from pathlib import Path
from typing import Dict, List, Set
from collections import defaultdict
# Category mappings based on keywords and chapter numbers
CATEGORY_MAPPINGS = {
'utils': {
'keywords': ['tokenizer', 'tokenization', 'encode', 'decode', 'tiktoken', 'import', 'helper', 'utility'],
'chapters': ['Chapter_09', 'Chapter_10']
},
'reconnaissance': {
'keywords': ['fingerprint', 'discover', 'enumerate', 'scan', 'reconnaissance', 'probe'],
'chapters': ['Chapter_31']
},
'prompt_injection': {
'keywords': ['injection', 'inject', 'ignore previous', 'system prompt leak', 'jailbreak injection'],
'chapters': ['Chapter_14']
},
'data_extraction': {
'keywords': ['extract', 'leak', 'exfiltration', 'pii', 'data leakage', 'memory dump'],
'chapters': ['Chapter_15']
},
'jailbreak': {
'keywords': ['jailbreak', 'bypass', 'character', 'roleplay', 'dan', 'bypass guardrail'],
'chapters': ['Chapter_16']
},
'plugin_exploitation': {
'keywords': ['plugin', 'function_call', 'tool use', 'api call', 'command injection'],
'chapters': ['Chapter_11', 'Chapter_17']
},
'rag_attacks': {
'keywords': ['rag', 'retrieval', 'vector', 'embedding', 'chromadb', 'faiss', 'poisoning'],
'chapters': ['Chapter_12']
},
'evasion': {
'keywords': ['obfuscate', 'evasion', 'encode', 'filter bypass', 'adversarial'],
'chapters': ['Chapter_18', 'Chapter_34']
},
'model_attacks': {
'keywords': ['model theft', 'extraction', 'membership inference', 'dos', 'resource exhaustion', 'adversarial example'],
'chapters': ['Chapter_19', 'Chapter_20', 'Chapter_21', 'Chapter_25', 'Chapter_29', 'Chapter_30']
},
'automation': {
'keywords': ['fuzzer', 'automation', 'orchestrat', 'framework', 'batch'],
'chapters': ['Chapter_32', 'Chapter_33']
},
'labs': {
'keywords': ['setup', 'install', 'configure', 'docker', 'environment'],
'chapters': ['Chapter_07']
},
'supply_chain': {
'keywords': ['supply chain', 'dependency', 'package', 'provenance'],
'chapters': ['Chapter_13', 'Chapter_26']
},
'multimodal': {
'keywords': ['image', 'audio', 'video', 'multimodal', 'cross-modal'],
'chapters': ['Chapter_22']
},
'social_engineering': {
'keywords': ['social engineering', 'phishing', 'manipulation', 'persuasion'],
'chapters': ['Chapter_24']
},
'post_exploitation': {
'keywords': ['persistence', 'post-exploitation', 'backdoor', 'maintain access'],
'chapters': ['Chapter_23', 'Chapter_30', 'Chapter_35']
},
'compliance': {
'keywords': ['compliance', 'standard', 'regulation', 'audit'],
'chapters': ['Chapter_39', 'Chapter_40', 'Chapter_41']
}
}
def classify_code_block(chapter_name: str, code: str, section: str) -> str:
"""Classify a code block into a category."""
combined = (chapter_name + ' ' + code + ' ' + section).lower()
# First try chapter-based classification
for category, mapping in CATEGORY_MAPPINGS.items():
for chapter_prefix in mapping['chapters']:
if chapter_name.startswith(chapter_prefix):
return category
# Then try keyword-based classification
for category, mapping in CATEGORY_MAPPINGS.items():
for keyword in mapping['keywords']:
if keyword.lower() in combined:
return category
# Default to utils for generic code
return 'utils'
def extract_imports(code: str) -> List[str]:
"""Extract import statements from Python code."""
imports = []
for line in code.split('\n'):
stripped = line.strip()
if stripped.startswith('import ') or stripped.startswith('from '):
imports.append(stripped)
return imports
def extract_functions(code: str) -> List[str]:
"""Extract function definitions from Python code."""
functions = []
for line in code.split('\n'):
stripped = line.strip()
if stripped.startswith('def '):
functions.append(stripped)
return functions
def clean_code(code: str) -> str:
"""Clean and format code."""
# Remove excessive blank lines
lines = code.split('\n')
cleaned = []
prev_blank = False
for line in lines:
is_blank = not line.strip()
if is_blank and prev_blank:
continue
cleaned.append(line)
prev_blank = is_blank
return '\n'.join(cleaned)
def create_directory_structure(base_path: Path):
"""Create the organized directory structure."""
categories = list(CATEGORY_MAPPINGS.keys())
categories.append('workflows') # Add workflows category
for category in categories:
category_dir = base_path / category
category_dir.mkdir(exist_ok=True, parents=True)
# Create __init__.py for Python modules
init_file = category_dir / '__init__.py'
if not init_file.exists():
init_file.write_text(f'"""{category.replace("_", " ").title()} module for AI LLM Red Teaming."""\n')
print(f"Created directory structure with {len(categories)} categories")
def generate_script(block_data: Dict, category: str, chapter: str, index: int) -> tuple:
"""Generate a complete, runnable script from a code block."""
code = block_data['code']
section = block_data.get('section', 'Unknown')
language = block_data['language']
if language == 'bash' or language == 'sh':
# Generate bash script
script_content = f'''#!/bin/bash
# AI LLM Red Team - {section}
# Source: {chapter}
# Category: {category}
{code}
'''
filename = f"{chapter.lower()}_{index:02d}.sh"
return filename, script_content
# Python script
imports = extract_imports(code)
functions = extract_functions(code)
# Remove imports from main code
code_lines = []
for line in code.split('\n'):
stripped = line.strip()
if not (stripped.startswith('import ') or stripped.startswith('from ')):
code_lines.append(line)
main_code = '\n'.join(code_lines).strip()
# Build script
script_parts = [
'#!/usr/bin/env python3',
'"""',
f'{section}',
'',
f'Source: {chapter}',
f'Category: {category}',
'"""',
'',
]
# Add imports
if imports:
script_parts.extend(imports)
script_parts.append('')
# Add standard imports for CLI
if 'argparse' not in ' '.join(imports):
script_parts.extend([
'import argparse',
'import sys',
''
])
# Add main code
script_parts.append(clean_code(main_code))
script_parts.append('')
# Add main function if not present
if 'if __name__' not in main_code:
script_parts.extend([
'',
'def main():',
' """Command-line interface."""',
' parser = argparse.ArgumentParser(description=__doc__)',
' parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")',
' args = parser.parse_args()',
' ',
' # TODO: Add main execution logic',
' pass',
'',
'if __name__ == "__main__":',
' main()',
])
script_content = '\n'.join(script_parts)
filename = f"{chapter.lower()}_{index:02d}_{category}.py"
return filename, script_content
def main():
"""Generate all scripts from catalog."""
base_path = Path('/home/e/Desktop/ai-llm-red-team-handbook/scripts')
catalog_file = base_path / 'code_catalog.json'
# Load catalog
catalog = json.loads(catalog_file.read_text())
print(f"Loaded catalog with {len(catalog)} chapters")
# Create directory structure
create_directory_structure(base_path)
# Track statistics
stats = defaultdict(int)
generated_files = []
# Process each chapter
for chapter_name, chapter_data in sorted(catalog.items()):
print(f"\nProcessing {chapter_name}...")
blocks = chapter_data['blocks']
for idx, block in enumerate(blocks, 1):
# Classify the block
category = classify_code_block(chapter_name, block['code'], block.get('section', ''))
# Generate script
filename, script_content = generate_script(block, category, chapter_name, idx)
# Write to appropriate category folder
category_dir = base_path / category
output_file = category_dir / filename
# Only write if file doesn't exist (avoid overwriting custom edits)
if not output_file.exists():
output_file.write_text(script_content)
print(f" Created: {category}/{filename}")
stats[category] += 1
generated_files.append(str(output_file.relative_to(base_path)))
else:
print(f" Skipped (exists): {category}/{filename}")
# Generate summary
print(f"\n{'='*60}")
print(f"Script Generation Complete")
print(f"{'='*60}")
print(f"Total scripts generated: {sum(stats.values())}")
print(f"\nScripts by category:")
for category in sorted(stats.keys()):
print(f" {category:25s}: {stats[category]:3d} scripts")
# Save file list
filelist = base_path / 'generated_scripts.txt'
filelist.write_text('\n'.join(sorted(generated_files)))
print(f"\nFile list saved to: generated_scripts.txt")
return stats
if __name__ == '__main__':
main()

View File

@@ -1,386 +0,0 @@
automation/chapter_32_automated_attack_frameworks_01_automation.py
automation/chapter_32_automated_attack_frameworks_02_automation.py
automation/chapter_33_red_team_automation_01_automation.py
automation/chapter_33_red_team_automation_02_automation.py
compliance/chapter_39_ai_bug_bounty_programs_01_compliance.py
compliance/chapter_39_ai_bug_bounty_programs_02_compliance.py
compliance/chapter_39_ai_bug_bounty_programs_03_compliance.py
compliance/chapter_40_compliance_and_standards_01_compliance.py
compliance/chapter_40_compliance_and_standards_02_compliance.py
compliance/chapter_40_compliance_and_standards_03_compliance.py
compliance/chapter_40_compliance_and_standards_04_compliance.py
compliance/chapter_40_compliance_and_standards_05_compliance.py
compliance/chapter_40_compliance_and_standards_06_compliance.py
compliance/chapter_40_compliance_and_standards_07_compliance.py
compliance/chapter_40_compliance_and_standards_08.sh
compliance/chapter_40_compliance_and_standards_09_compliance.py
compliance/chapter_41_industry_best_practices_01_compliance.py
compliance/chapter_41_industry_best_practices_02_compliance.py
compliance/chapter_41_industry_best_practices_03_compliance.py
compliance/chapter_41_industry_best_practices_04_compliance.py
data_extraction/chapter_15_data_leakage_and_extraction_01_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_02_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_03_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_04_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_05_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_06_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_07_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_08_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_09_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_10_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_11_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_12_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_13_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_14_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_15_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_16_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_17_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_18_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_19_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_20_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_21_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_22_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_23_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_24_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_25_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_26_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_27_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_28_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_29_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_30_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_31_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_32_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_33_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_34_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_35_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_36_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_37_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_38_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_39_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_40_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_41_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_42_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_43_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_44_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_45_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_46_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_47_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_48_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_49_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_50_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_51_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_52_data_extraction.py
data_extraction/chapter_15_data_leakage_and_extraction_53_data_extraction.py
evasion/chapter_18_evasion_obfuscation_and_adversarial_inputs_01_evasion.py
evasion/chapter_18_evasion_obfuscation_and_adversarial_inputs_02_evasion.py
evasion/chapter_18_evasion_obfuscation_and_adversarial_inputs_03_evasion.py
evasion/chapter_18_evasion_obfuscation_and_adversarial_inputs_04_evasion.py
evasion/chapter_18_evasion_obfuscation_and_adversarial_inputs_05_evasion.py
evasion/chapter_18_evasion_obfuscation_and_adversarial_inputs_06_evasion.py
evasion/chapter_18_evasion_obfuscation_and_adversarial_inputs_07_evasion.py
evasion/chapter_18_evasion_obfuscation_and_adversarial_inputs_08_evasion.py
evasion/chapter_18_evasion_obfuscation_and_adversarial_inputs_09_evasion.py
evasion/chapter_18_evasion_obfuscation_and_adversarial_inputs_10_evasion.py
evasion/chapter_18_evasion_obfuscation_and_adversarial_inputs_11_evasion.py
evasion/chapter_18_evasion_obfuscation_and_adversarial_inputs_12_evasion.py
evasion/chapter_34_defense_evasion_techniques_01_evasion.py
evasion/chapter_34_defense_evasion_techniques_02_evasion.py
jailbreak/chapter_16_jailbreaks_and_bypass_techniques_01_jailbreak.py
jailbreak/chapter_16_jailbreaks_and_bypass_techniques_02_jailbreak.py
jailbreak/chapter_16_jailbreaks_and_bypass_techniques_03_jailbreak.py
jailbreak/chapter_16_jailbreaks_and_bypass_techniques_04_jailbreak.py
jailbreak/chapter_16_jailbreaks_and_bypass_techniques_05_jailbreak.py
jailbreak/chapter_16_jailbreaks_and_bypass_techniques_06_jailbreak.py
jailbreak/chapter_16_jailbreaks_and_bypass_techniques_07_jailbreak.py
jailbreak/chapter_16_jailbreaks_and_bypass_techniques_08_jailbreak.py
jailbreak/chapter_16_jailbreaks_and_bypass_techniques_09_jailbreak.py
jailbreak/chapter_16_jailbreaks_and_bypass_techniques_10_jailbreak.py
jailbreak/chapter_16_jailbreaks_and_bypass_techniques_11_jailbreak.py
jailbreak/chapter_16_jailbreaks_and_bypass_techniques_12_jailbreak.py
jailbreak/chapter_16_jailbreaks_and_bypass_techniques_13_jailbreak.py
jailbreak/chapter_16_jailbreaks_and_bypass_techniques_14_jailbreak.py
jailbreak/chapter_16_jailbreaks_and_bypass_techniques_15_jailbreak.py
jailbreak/chapter_16_jailbreaks_and_bypass_techniques_16_jailbreak.py
jailbreak/chapter_16_jailbreaks_and_bypass_techniques_17_jailbreak.py
jailbreak/chapter_16_jailbreaks_and_bypass_techniques_18_jailbreak.py
jailbreak/chapter_16_jailbreaks_and_bypass_techniques_19_jailbreak.py
jailbreak/chapter_16_jailbreaks_and_bypass_techniques_20_jailbreak.py
jailbreak/chapter_16_jailbreaks_and_bypass_techniques_21_jailbreak.py
model_attacks/chapter_19_training_data_poisoning_01_model_attacks.py
model_attacks/chapter_19_training_data_poisoning_02_model_attacks.py
model_attacks/chapter_19_training_data_poisoning_03_model_attacks.py
model_attacks/chapter_19_training_data_poisoning_04_model_attacks.py
model_attacks/chapter_19_training_data_poisoning_05_model_attacks.py
model_attacks/chapter_19_training_data_poisoning_06_model_attacks.py
model_attacks/chapter_19_training_data_poisoning_07_model_attacks.py
model_attacks/chapter_19_training_data_poisoning_08_model_attacks.py
model_attacks/chapter_19_training_data_poisoning_09_model_attacks.py
model_attacks/chapter_20_model_theft_and_membership_inference_01_model_attacks.py
model_attacks/chapter_20_model_theft_and_membership_inference_02_model_attacks.py
model_attacks/chapter_21_model_dos_resource_exhaustion_01_model_attacks.py
model_attacks/chapter_21_model_dos_resource_exhaustion_02_model_attacks.py
model_attacks/chapter_21_model_dos_resource_exhaustion_03_model_attacks.py
model_attacks/chapter_25_advanced_adversarial_ml_01_model_attacks.py
model_attacks/chapter_25_advanced_adversarial_ml_02_model_attacks.py
model_attacks/chapter_25_advanced_adversarial_ml_03_model_attacks.py
model_attacks/chapter_25_advanced_adversarial_ml_04_model_attacks.py
model_attacks/chapter_25_advanced_adversarial_ml_05_model_attacks.py
model_attacks/chapter_29_model_inversion_attacks_01_model_attacks.py
model_attacks/chapter_29_model_inversion_attacks_02_model_attacks.py
model_attacks/chapter_30_backdoor_attacks_01_model_attacks.py
model_attacks/chapter_30_backdoor_attacks_02_model_attacks.py
multimodal/chapter_22_cross_modal_multimodal_attacks_01_multimodal.py
multimodal/chapter_22_cross_modal_multimodal_attacks_02_multimodal.py
multimodal/chapter_22_cross_modal_multimodal_attacks_03_multimodal.py
multimodal/chapter_22_cross_modal_multimodal_attacks_04_multimodal.py
multimodal/chapter_22_cross_modal_multimodal_attacks_05_multimodal.py
multimodal/chapter_22_cross_modal_multimodal_attacks_06_multimodal.py
multimodal/chapter_22_cross_modal_multimodal_attacks_07_multimodal.py
multimodal/chapter_22_cross_modal_multimodal_attacks_08_multimodal.py
multimodal/chapter_22_cross_modal_multimodal_attacks_09.sh
multimodal/chapter_22_cross_modal_multimodal_attacks_10_multimodal.py
multimodal/chapter_22_cross_modal_multimodal_attacks_11_multimodal.py
multimodal/chapter_22_cross_modal_multimodal_attacks_12_multimodal.py
multimodal/chapter_22_cross_modal_multimodal_attacks_13_multimodal.py
multimodal/chapter_22_cross_modal_multimodal_attacks_14_multimodal.py
multimodal/chapter_22_cross_modal_multimodal_attacks_15_multimodal.py
plugin_exploitation/chapter_11_plugins_extensions_and_external_apis_01_plugin_exploitation.py
plugin_exploitation/chapter_17_01_fundamentals_and_architecture_01_plugin_exploitation.py
plugin_exploitation/chapter_17_01_fundamentals_and_architecture_02_plugin_exploitation.py
plugin_exploitation/chapter_17_01_fundamentals_and_architecture_03_plugin_exploitation.py
plugin_exploitation/chapter_17_01_fundamentals_and_architecture_04_plugin_exploitation.py
plugin_exploitation/chapter_17_01_fundamentals_and_architecture_05_plugin_exploitation.py
plugin_exploitation/chapter_17_01_fundamentals_and_architecture_06_plugin_exploitation.py
plugin_exploitation/chapter_17_01_fundamentals_and_architecture_07_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_01_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_02_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_03_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_04_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_05_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_06_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_07_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_08_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_09_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_10_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_11_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_12_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_13_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_14_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_15_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_16_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_17_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_18_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_19_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_20_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_21_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_22_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_23_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_24_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_25_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_26_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_27_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_28_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_29_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_30_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_31_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_32_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_33_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_34_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_35_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_36_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_37_plugin_exploitation.py
plugin_exploitation/chapter_17_02_api_authentication_and_authorization_38_plugin_exploitation.py
plugin_exploitation/chapter_17_03_plugin_vulnerabilities_01_plugin_exploitation.py
plugin_exploitation/chapter_17_03_plugin_vulnerabilities_02_plugin_exploitation.py
plugin_exploitation/chapter_17_03_plugin_vulnerabilities_03_plugin_exploitation.py
plugin_exploitation/chapter_17_03_plugin_vulnerabilities_04_plugin_exploitation.py
plugin_exploitation/chapter_17_03_plugin_vulnerabilities_05_plugin_exploitation.py
plugin_exploitation/chapter_17_03_plugin_vulnerabilities_06_plugin_exploitation.py
plugin_exploitation/chapter_17_03_plugin_vulnerabilities_07_plugin_exploitation.py
plugin_exploitation/chapter_17_03_plugin_vulnerabilities_08_plugin_exploitation.py
plugin_exploitation/chapter_17_03_plugin_vulnerabilities_09_plugin_exploitation.py
plugin_exploitation/chapter_17_03_plugin_vulnerabilities_10_plugin_exploitation.py
plugin_exploitation/chapter_17_03_plugin_vulnerabilities_11_plugin_exploitation.py
plugin_exploitation/chapter_17_03_plugin_vulnerabilities_12_plugin_exploitation.py
plugin_exploitation/chapter_17_03_plugin_vulnerabilities_13_plugin_exploitation.py
plugin_exploitation/chapter_17_03_plugin_vulnerabilities_14_plugin_exploitation.py
plugin_exploitation/chapter_17_03_plugin_vulnerabilities_15_plugin_exploitation.py
plugin_exploitation/chapter_17_03_plugin_vulnerabilities_16_plugin_exploitation.py
plugin_exploitation/chapter_17_03_plugin_vulnerabilities_17_plugin_exploitation.py
plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_01_plugin_exploitation.py
plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_02_plugin_exploitation.py
plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_03_plugin_exploitation.py
plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_04_plugin_exploitation.py
plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_05_plugin_exploitation.py
plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_06_plugin_exploitation.py
plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_07_plugin_exploitation.py
plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_08_plugin_exploitation.py
plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_09_plugin_exploitation.py
plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_10_plugin_exploitation.py
plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_11_plugin_exploitation.py
plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_12_plugin_exploitation.py
plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_13_plugin_exploitation.py
plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_14_plugin_exploitation.py
plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_15_plugin_exploitation.py
plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_16_plugin_exploitation.py
plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_17_plugin_exploitation.py
plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_18_plugin_exploitation.py
plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_19_plugin_exploitation.py
plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_20_plugin_exploitation.py
plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_21_plugin_exploitation.py
plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_22_plugin_exploitation.py
plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_23_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_01_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_02_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_03_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_04_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_05_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_06_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_07_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_08_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_09_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_10_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_11_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_12_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_13.sh
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_14_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_15_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_16_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_17_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_18_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_19_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_20_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_21_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_22_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_23_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_24_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_25_plugin_exploitation.py
plugin_exploitation/chapter_17_05_third_party_risks_and_testing_26_plugin_exploitation.py
plugin_exploitation/chapter_17_06_case_studies_and_defense_01_plugin_exploitation.py
plugin_exploitation/chapter_17_06_case_studies_and_defense_02_plugin_exploitation.py
plugin_exploitation/chapter_17_06_case_studies_and_defense_03_plugin_exploitation.py
plugin_exploitation/chapter_17_06_case_studies_and_defense_04_plugin_exploitation.py
plugin_exploitation/chapter_17_06_case_studies_and_defense_05_plugin_exploitation.py
plugin_exploitation/chapter_17_06_case_studies_and_defense_06_plugin_exploitation.py
plugin_exploitation/chapter_17_06_case_studies_and_defense_07_plugin_exploitation.py
plugin_exploitation/chapter_17_06_case_studies_and_defense_08_plugin_exploitation.py
plugin_exploitation/chapter_17_06_case_studies_and_defense_09_plugin_exploitation.py
plugin_exploitation/chapter_17_06_case_studies_and_defense_10_plugin_exploitation.py
plugin_exploitation/chapter_17_06_case_studies_and_defense_11_plugin_exploitation.py
plugin_exploitation/chapter_17_06_case_studies_and_defense_12_plugin_exploitation.py
plugin_exploitation/chapter_17_06_case_studies_and_defense_13.sh
plugin_exploitation/chapter_17_06_case_studies_and_defense_14_plugin_exploitation.py
plugin_exploitation/chapter_17_06_case_studies_and_defense_15_plugin_exploitation.py
plugin_exploitation/chapter_17_06_case_studies_and_defense_16_plugin_exploitation.py
post_exploitation/chapter_23_advanced_persistence_chaining_01_post_exploitation.py
post_exploitation/chapter_23_advanced_persistence_chaining_02_post_exploitation.py
post_exploitation/chapter_23_advanced_persistence_chaining_03_post_exploitation.py
post_exploitation/chapter_23_advanced_persistence_chaining_04_post_exploitation.py
post_exploitation/chapter_35_post-exploitation_in_ai_systems_01_post_exploitation.py
post_exploitation/chapter_35_post-exploitation_in_ai_systems_02_post_exploitation.py
prompt_injection/chapter_14_prompt_injection_01_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_02.sh
prompt_injection/chapter_14_prompt_injection_03_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_04_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_05_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_06_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_07_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_08_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_09_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_10_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_11_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_12_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_13_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_14_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_15_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_16_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_17_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_18_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_19_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_20_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_21.sh
prompt_injection/chapter_14_prompt_injection_22_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_23_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_24_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_25_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_26_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_27_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_28_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_29_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_30_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_31_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_32.sh
prompt_injection/chapter_14_prompt_injection_33_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_34_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_35_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_36_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_37_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_38_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_39_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_40_prompt_injection.py
prompt_injection/chapter_14_prompt_injection_41.sh
rag_attacks/chapter_12_retrieval_augmented_generation_rag_pipelines_01_rag_attacks.py
rag_attacks/chapter_12_retrieval_augmented_generation_rag_pipelines_02_rag_attacks.py
rag_attacks/chapter_12_retrieval_augmented_generation_rag_pipelines_03_rag_attacks.py
rag_attacks/chapter_12_retrieval_augmented_generation_rag_pipelines_04_rag_attacks.py
rag_attacks/chapter_12_retrieval_augmented_generation_rag_pipelines_05_rag_attacks.py
rag_attacks/chapter_12_retrieval_augmented_generation_rag_pipelines_06_rag_attacks.py
rag_attacks/chapter_12_retrieval_augmented_generation_rag_pipelines_07_rag_attacks.py
rag_attacks/chapter_12_retrieval_augmented_generation_rag_pipelines_08_rag_attacks.py
rag_attacks/chapter_12_retrieval_augmented_generation_rag_pipelines_09_rag_attacks.py
rag_attacks/chapter_12_retrieval_augmented_generation_rag_pipelines_10_rag_attacks.py
rag_attacks/chapter_12_retrieval_augmented_generation_rag_pipelines_11_rag_attacks.py
rag_attacks/chapter_12_retrieval_augmented_generation_rag_pipelines_12_rag_attacks.py
rag_attacks/chapter_12_retrieval_augmented_generation_rag_pipelines_13_rag_attacks.py
reconnaissance/chapter_31_ai_system_reconnaissance_01_reconnaissance.py
reconnaissance/chapter_31_ai_system_reconnaissance_02_reconnaissance.py
social_engineering/chapter_24_social_engineering_llms_01_social_engineering.py
social_engineering/chapter_24_social_engineering_llms_02_social_engineering.py
social_engineering/chapter_24_social_engineering_llms_03_social_engineering.py
social_engineering/chapter_24_social_engineering_llms_04_social_engineering.py
social_engineering/chapter_24_social_engineering_llms_05_social_engineering.py
social_engineering/chapter_24_social_engineering_llms_06_social_engineering.py
social_engineering/chapter_24_social_engineering_llms_07_social_engineering.py
social_engineering/chapter_24_social_engineering_llms_08_social_engineering.py
supply_chain/chapter_13_data_provenance_and_supply_chain_security_01_supply_chain.py
supply_chain/chapter_13_data_provenance_and_supply_chain_security_02_supply_chain.py
supply_chain/chapter_13_data_provenance_and_supply_chain_security_03_supply_chain.py
supply_chain/chapter_13_data_provenance_and_supply_chain_security_04.sh
supply_chain/chapter_13_data_provenance_and_supply_chain_security_05.sh
supply_chain/chapter_13_data_provenance_and_supply_chain_security_06.sh
supply_chain/chapter_13_data_provenance_and_supply_chain_security_07.sh
supply_chain/chapter_13_data_provenance_and_supply_chain_security_08.sh
supply_chain/chapter_13_data_provenance_and_supply_chain_security_09_supply_chain.py
supply_chain/chapter_13_data_provenance_and_supply_chain_security_10_supply_chain.py
supply_chain/chapter_13_data_provenance_and_supply_chain_security_11_supply_chain.py
supply_chain/chapter_13_data_provenance_and_supply_chain_security_12_supply_chain.py
supply_chain/chapter_13_data_provenance_and_supply_chain_security_13.sh
supply_chain/chapter_13_data_provenance_and_supply_chain_security_14_supply_chain.py
supply_chain/chapter_13_data_provenance_and_supply_chain_security_15_supply_chain.py
supply_chain/chapter_13_data_provenance_and_supply_chain_security_16_supply_chain.py
supply_chain/chapter_13_data_provenance_and_supply_chain_security_17_supply_chain.py
supply_chain/chapter_13_data_provenance_and_supply_chain_security_18.sh
supply_chain/chapter_13_data_provenance_and_supply_chain_security_19_supply_chain.py
supply_chain/chapter_13_data_provenance_and_supply_chain_security_20_supply_chain.py
supply_chain/chapter_13_data_provenance_and_supply_chain_security_21_supply_chain.py
supply_chain/chapter_13_data_provenance_and_supply_chain_security_22.sh
supply_chain/chapter_13_data_provenance_and_supply_chain_security_23_supply_chain.py
supply_chain/chapter_13_data_provenance_and_supply_chain_security_24_supply_chain.py
supply_chain/chapter_13_data_provenance_and_supply_chain_security_25_supply_chain.py
supply_chain/chapter_13_data_provenance_and_supply_chain_security_26_supply_chain.py
supply_chain/chapter_26_supply_chain_attacks_on_ai_01_supply_chain.py
supply_chain/chapter_26_supply_chain_attacks_on_ai_02_supply_chain.py
supply_chain/chapter_26_supply_chain_attacks_on_ai_03_supply_chain.py
utils/chapter_09_llm_architectures_and_system_components_01_utils.py
utils/chapter_09_llm_architectures_and_system_components_02_utils.py
utils/chapter_10_tokenization_context_and_generation_01_utils.py
utils/chapter_27_federated_learning_attacks_01_utils.py
utils/chapter_27_federated_learning_attacks_02_utils.py
utils/chapter_27_federated_learning_attacks_03_utils.py
utils/chapter_27_federated_learning_attacks_04_utils.py
utils/chapter_28_ai_privacy_attacks_01_utils.py
utils/chapter_28_ai_privacy_attacks_02_utils.py
utils/chapter_42_case_studies_and_war_stories_01_utils.py
utils/chapter_42_case_studies_and_war_stories_02_utils.py
utils/chapter_44_emerging_threats_01_utils.py
utils/chapter_44_emerging_threats_02_utils.py

View File

@@ -1 +0,0 @@
"""Labs module for AI LLM Red Teaming."""

2
scripts/logs/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
*.log
!.gitignore

File diff suppressed because it is too large Load Diff

View File

@@ -1,238 +0,0 @@
#!/usr/bin/env python3
"""
Rename script files from chapter-based names to descriptive functional names.
Analyzes each Python file's docstring and content to generate appropriate descriptive names.
"""
import re
from pathlib import Path
from typing import Dict, Tuple
import json
def extract_description(file_path: Path) -> str:
"""Extract description from file's docstring."""
try:
content = file_path.read_text(encoding='utf-8')
# Try to find docstring in first 50 lines
lines = content.split('\n')[:50]
in_docstring = False
description_lines = []
for line in lines:
stripped = line.strip()
if '"""' in stripped or "'''" in stripped:
if in_docstring:
break
in_docstring = True
# Get text after opening quotes
text = stripped.replace('"""', '').replace("'''", '').strip()
if text and text != '#':
description_lines.append(text)
elif in_docstring:
if stripped and not stripped.startswith('#'):
description_lines.append(stripped)
description = ' '.join(description_lines)
# Clean up common prefixes
description = re.sub(r'^Source:.*?Category:.*?$', '', description, flags=re.MULTILINE)
description = description.replace(' ', ' ').strip()
return description[:200] # Limit length
except Exception as e:
print(f"Error reading {file_path}: {e}")
return ""
def generate_descriptive_name(description: str, category: str) -> str:
"""Generate a descriptive filename from description."""
# Extract key action words and subjects
description_lower = description.lower()
# Common patterns to descriptive names
patterns = {
'inspect': ['inspect', 'inspecting', 'view', 'check'],
'tokeniz': ['tokenizer', 'tokenization', 'token'],
'extract': ['extract', 'extraction', 'exfiltrat'],
'inject': ['inject', 'injection'],
'bypass': ['bypass', 'evade', 'circumvent'],
'jailbreak': ['jailbreak', 'dan', 'guardrail'],
'test': ['test', 'testing'],
'attack': ['attack', 'exploit'],
'poison': ['poison', 'poisoning'],
'steal': ['steal', 'theft'],
'leak': ['leak', 'leakage'],
'prompt': ['prompt'],
'rag': ['rag', 'retrieval', 'vector'],
'plugin': ['plugin', 'api', 'function'],
'model': ['model loading', 'load model'],
'fuzzer': ['fuzz', 'fuzzing'],
}
# Find matching patterns
action = None
subject = None
for key, keywords in patterns.items():
for keyword in keywords:
if keyword in description_lower:
action = key
break
if action:
break
# Extract subject
if 'tokenizer' in description_lower or 'token' in description_lower:
subject = 'tokenizer'
elif 'rag' in description_lower or 'vector' in description_lower:
subject = 'rag'
elif 'plugin' in description_lower:
subject = 'plugin'
elif 'prompt' in description_lower:
subject = 'prompt'
elif 'model' in description_lower:
subject = 'model'
elif 'api' in description_lower:
subject = 'api'
# Build name
parts = []
if action:
parts.append(action)
if subject and subject not in (action or ''):
parts.append(subject)
if not parts:
# Fallback: use first meaningful words
words = re.findall(r'\b\w{4,}\b', description_lower)
parts = words[:2] if len(words) >= 2 else words[:1]
# Ensure we have something
if not parts:
parts = ['script']
name = '_'.join(parts)
# Clean up
name = re.sub(r'[^a-z0-9_]', '_', name)
name = re.sub(r'_+', '_', name).strip('_')
return name
def rename_file(file_path: Path, dry_run: bool = True) -> Tuple[Path, Path]:
"""
Rename a single file to a descriptive name.
Returns (old_path, new_path)
"""
# Extract category from parent directory
category = file_path.parent.name
# Get description from file
description = extract_description(file_path)
# Generate new name
base_name = generate_descriptive_name(description, category)
# Ensure uniqueness
new_path = file_path.parent / f"{base_name}.py"
counter = 2
while new_path.exists() and new_path != file_path:
new_path = file_path.parent / f"{base_name}_{counter}.py"
counter += 1
if not dry_run and new_path != file_path:
file_path.rename(new_path)
return (file_path, new_path)
def main():
"""Main renaming process."""
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--dry-run', action='store_true', help='Show renaming plan without executing')
parser.add_argument('--execute', action='store_true', help='Execute the renaming')
parser.add_argument('--category', help='Only process files in this category')
args = parser.parse_args()
base_dir = Path('/home/e/Desktop/ai-llm-red-team-handbook/scripts')
# Find all chapter-based files
chapter_files = list(base_dir.glob('**/chapter_*.py'))
print(f"Found {len(chapter_files)} files to rename")
if args.category:
chapter_files = [f for f in chapter_files if f.parent.name == args.category]
print(f"Filtered to {len(chapter_files)} files in category: {args.category}")
# Build renaming plan
rename_plan = []
for file_path in sorted(chapter_files):
old, new = rename_file(file_path, dry_run=True)
if old != new:
rename_plan.append((old, new))
print(f"\nRenaming plan ({len(rename_plan)} files):")
print("="*80)
for old, new in rename_plan[:20]: # Show first 20
print(f"{old.parent.name}/{old.name:60s} -> {new.name}")
if len(rename_plan) > 20:
print(f"... and {len(rename_plan) - 20} more files")
# Save plan to JSON
plan_file = base_dir / 'rename_plan.json'
plan_data = {
'total_files': len(rename_plan),
'renames': [
{
'old': str(old.relative_to(base_dir)),
'new': str(new.relative_to(base_dir)),
'category': old.parent.name
}
for old, new in rename_plan
]
}
plan_file.write_text(json.dumps(plan_data, indent=2))
print(f"\nFull plan saved to: {plan_file}")
# Execute if requested
if args.execute and not args.dry_run:
print("\n" + "="*80)
print("EXECUTING RENAMES...")
print("="*80 + "\n")
success_count = 0
error_count = 0
for old_path, new_path in rename_plan:
try:
old_path.rename(new_path)
success_count += 1
print(f"{old_path.name} -> {new_path.name}")
except Exception as e:
error_count += 1
print(f"{old_path.name}: {e}")
print(f"\n{success_count} files renamed successfully")
if error_count:
print(f"{error_count} files failed")
else:
print("\nRun with --execute to perform the renaming")
print("Example: python3 rename_scripts.py --execute")
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,224 @@
#!/bin/bash
#
# Comprehensive Test Runner for AI LLM Red Team Scripts
# Runs all test phases sequentially and generates comprehensive reports
#
set -e
# This script is in the tests/ directory, parent is scripts/
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PARENT_DIR="$(dirname "$SCRIPT_DIR")"
cd "$PARENT_DIR"
echo "================================================================================================="
echo "AI LLM RED TEAM HANDBOOK - COMPREHENSIVE SCRIPT TESTING"
echo "================================================================================================="
echo ""
echo "Start Time: $(date)"
echo ""
# Configuration
LLM_ENDPOINT="${LLM_ENDPOINT:-http://localhost:11434}"
REPORT_DIR="test_reports"
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
# Create report directory
mkdir -p "$REPORT_DIR"
echo "Test Configuration:"
echo " LLM Endpoint: $LLM_ENDPOINT"
echo " Report Directory: $REPORT_DIR"
echo " Timestamp: $TIMESTAMP"
echo ""
# Phase 1: Environment Setup
echo "=== Phase 1: Environment Setup & Validation ==="
echo ""
# Test LLM connection
echo "Testing LLM endpoint..."
if curl -s "${LLM_ENDPOINT}/api/tags" > /dev/null 2>&1; then
echo "✓ LLM endpoint is accessible"
else
echo "✗ WARNING: LLM endpoint is not accessible at $LLM_ENDPOINT"
echo " LLM-based validation tests will be skipped"
fi
echo ""
# Check Python version
echo "Checking Python version..."
python3 --version
echo ""
# Activate virtual environment if it exists
if [ -d "venv" ]; then
echo "Activating virtual environment..."
source venv/bin/activate
echo "✓ Virtual environment activated"
else
echo "⚠ No virtual environment found. Using system Python."
fi
echo ""
# Install test dependencies
echo "Installing test dependencies..."
pip install -q pytest pytest-cov pytest-timeout pytest-xdist requests 2>&1 | grep -v "Requirement already satisfied" || true
echo "✓ Dependencies installed"
echo ""
# Phase 2: Static Analysis
echo "=== Phase 2: Static Analysis & Code Quality ==="
echo ""
echo "Running syntax validation..."
find . -name "*.py" -type f ! -path "./venv/*" -exec python3 -m py_compile {} \; 2>&1 | tee "$REPORT_DIR/syntax_check_${TIMESTAMP}.log"
SYNTAX_ERRORS=$(grep -c "SyntaxError" "$REPORT_DIR/syntax_check_${TIMESTAMP}.log" || true)
if [ "$SYNTAX_ERRORS" -eq 0 ]; then
echo "✓ No syntax errors found"
else
echo "✗ Found $SYNTAX_ERRORS syntax errors"
fi
echo ""
# Phase 3: Functional Testing
echo "=== Phase 3: Functional Testing ==="
echo ""
echo "Running comprehensive functional tests..."
python3 tests/test_orchestrator.py \
--llm-endpoint "$LLM_ENDPOINT" \
--all \
--test-type functional \
--verbose \
--generate-report \
--format json \
--output "$REPORT_DIR/functional_tests_${TIMESTAMP}.json"
echo ""
# Phase 4: Tool Integration
echo "=== Phase 4: Tool Integration Verification ==="
echo ""
echo "Testing tool integrations..."
python3 tests/test_orchestrator.py \
--llm-endpoint "$LLM_ENDPOINT" \
--all \
--test-type integration \
--verbose \
--generate-report \
--format json \
--output "$REPORT_DIR/integration_tests_${TIMESTAMP}.json"
echo ""
# Phase 5: Run existing pytest suite
echo "=== Phase 5: Running Existing Test Suite ==="
echo ""
if [ -d "tests" ]; then
echo "Running pytest tests..."
pytest tests/ -v --tb=short --timeout=60 2>&1 | tee "$REPORT_DIR/pytest_${TIMESTAMP}.log" || true
echo ""
echo "Running pytest with coverage..."
pytest tests/ --cov=. --cov-report=html:"$REPORT_DIR/coverage_${TIMESTAMP}" --cov-report=term 2>&1 | tee "$REPORT_DIR/pytest_coverage_${TIMESTAMP}.log" || true
else
echo "⚠ No tests directory found, skipping pytest"
fi
echo ""
# Phase 6: Performance Assessment
echo "=== Phase 6: Performance Assessment ==="
echo ""
echo "Running performance benchmarks..."
python3 tests/test_orchestrator.py \
--llm-endpoint "$LLM_ENDPOINT" \
--all \
--test-type performance \
--verbose \
--generate-report \
--format json \
--output "$REPORT_DIR/performance_${TIMESTAMP}.json"
echo ""
# Phase 7: Compliance Testing
echo "=== Phase 7: Compliance & Security Standards ==="
echo ""
for STANDARD in "OWASP-LLM-TOP-10" "MITRE-ATLAS" "NIST-AI-RMF" "ETHICAL"; do
echo "Testing compliance with $STANDARD..."
python3 tests/test_orchestrator.py \
--llm-endpoint "$LLM_ENDPOINT" \
--all \
--test-type compliance \
--standard "$STANDARD" \
--verbose \
--generate-report \
--format json \
--output "$REPORT_DIR/compliance_${STANDARD}_${TIMESTAMP}.json"
echo ""
done
# Phase 8: LLM-Powered Validation
echo "=== Phase 8: LLM-Powered Validation ==="
echo ""
if curl -s "${LLM_ENDPOINT}/api/tags" > /dev/null 2>&1; then
echo "Running LLM validation..."
python3 tests/test_orchestrator.py \
--llm-endpoint "$LLM_ENDPOINT" \
--all \
--llm-validate \
--verbose \
--generate-report \
--format json \
--output "$REPORT_DIR/llm_validation_${TIMESTAMP}.json"
else
echo "⚠ Skipping LLM validation (endpoint not accessible)"
fi
echo ""
# Phase 9: Generate Comprehensive Reports
echo "=== Phase 9: Report Generation ==="
echo ""
echo "Generating HTML report..."
python3 tests/test_orchestrator.py \
--llm-endpoint "$LLM_ENDPOINT" \
--all \
--test-type all \
--generate-report \
--format html \
--output "$REPORT_DIR/comprehensive_report_${TIMESTAMP}.html"
echo ""
echo "Generating summary report..."
python3 tests/test_orchestrator.py \
--llm-endpoint "$LLM_ENDPOINT" \
--all \
--test-type all \
--generate-report \
--format summary \
--output "$REPORT_DIR/executive_summary_${TIMESTAMP}.txt"
echo ""
echo "================================================================================================="
echo "TESTING COMPLETE"
echo "================================================================================================="
echo ""
echo "End Time: $(date)"
echo ""
echo "Reports generated in: $REPORT_DIR/"
echo ""
echo "Key Reports:"
echo " - Functional Tests: $REPORT_DIR/functional_tests_${TIMESTAMP}.json"
echo " - Integration Tests: $REPORT_DIR/integration_tests_${TIMESTAMP}.json"
echo " - Performance Tests: $REPORT_DIR/performance_${TIMESTAMP}.json"
echo " - Comprehensive Report: $REPORT_DIR/comprehensive_report_${TIMESTAMP}.html"
echo " - Executive Summary: $REPORT_DIR/executive_summary_${TIMESTAMP}.txt"
echo ""
echo "View the HTML report in your browser:"
echo " xdg-open $REPORT_DIR/comprehensive_report_${TIMESTAMP}.html"
echo ""
echo "================================================================================================="

View File

@@ -0,0 +1,558 @@
#!/usr/bin/env python3
"""
Comprehensive Test Orchestrator for AI LLM Red Team Scripts
This script performs comprehensive testing of all scripts in the repository,
including functional testing, tool integration, performance assessment,
error handling, and compliance verification.
Uses local LLM (Ollama/LMStudio) for intelligent test generation and validation.
"""
import argparse
import json
import os
import sys
import time
import subprocess
import importlib.util
import traceback
from pathlib import Path
from typing import Dict, List, Tuple, Any
from datetime import datetime
import requests
# Configuration
SCRIPT_DIR = Path(__file__).parent.absolute()
CATEGORIES = [
"reconnaissance", "prompt_injection", "data_extraction", "jailbreak",
"plugin_exploitation", "rag_attacks", "evasion", "model_attacks",
"multimodal", "post_exploitation", "social_engineering", "automation",
"supply_chain", "compliance", "utils"
]
# Compliance standards
COMPLIANCE_STANDARDS = {
"OWASP-LLM-TOP-10": ["LLM01", "LLM02", "LLM03", "LLM04", "LLM05",
"LLM06", "LLM07", "LLM08", "LLM09", "LLM10"],
"MITRE-ATLAS": ["AML.T0000", "AML.T0001", "AML.T0002"],
"NIST-AI-RMF": ["GOVERN", "MAP", "MEASURE", "MANAGE"],
"ETHICAL": ["authorization", "scope", "documentation", "disclosure"]
}
class TestOrchestrator:
"""Main test orchestration class"""
def __init__(self, llm_endpoint: str = "http://localhost:11434", verbose: bool = True):
self.llm_endpoint = llm_endpoint
self.verbose = verbose
self.results = {
"timestamp": datetime.now().isoformat(),
"tests_run": 0,
"tests_passed": 0,
"tests_failed": 0,
"tests_skipped": 0,
"categories": {},
"errors": [],
"performance": {},
"compliance": {}
}
def log(self, message: str, level: str = "INFO"):
"""Log messages with timestamp"""
if self.verbose or level in ["ERROR", "WARNING"]:
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f"[{timestamp}] [{level}] {message}")
def test_llm_connection(self) -> bool:
"""Test connection to local LLM"""
self.log("Testing LLM endpoint connection...")
try:
response = requests.get(f"{self.llm_endpoint}/api/tags", timeout=5)
if response.status_code == 200:
models = response.json().get("models", [])
self.log(f"✓ LLM endpoint accessible. Available models: {len(models)}")
return True
else:
self.log(f"✗ LLM endpoint returned status {response.status_code}", "ERROR")
return False
except Exception as e:
self.log(f"✗ Failed to connect to LLM endpoint: {e}", "ERROR")
return False
def query_llm(self, prompt: str, model: str = "llama2") -> str:
"""Query the local LLM"""
try:
payload = {
"model": model,
"prompt": prompt,
"stream": False
}
response = requests.post(
f"{self.llm_endpoint}/api/generate",
json=payload,
timeout=30
)
if response.status_code == 200:
return response.json().get("response", "")
return ""
except Exception as e:
self.log(f"LLM query failed: {e}", "ERROR")
return ""
def discover_scripts(self, category: str = None) -> Dict[str, List[Path]]:
"""Discover all Python scripts in categories"""
self.log("Discovering scripts...")
discovered = {}
categories_to_scan = [category] if category else CATEGORIES
for cat in categories_to_scan:
cat_path = SCRIPT_DIR / cat
if not cat_path.exists():
continue
scripts = list(cat_path.glob("*.py"))
if scripts:
discovered[cat] = scripts
self.log(f" [{cat}] Found {len(scripts)} scripts")
total = sum(len(scripts) for scripts in discovered.values())
self.log(f"Total scripts discovered: {total}")
return discovered
def test_script_syntax(self, script_path: Path) -> Tuple[bool, str]:
"""Test if script has valid Python syntax"""
try:
with open(script_path, 'r') as f:
compile(f.read(), script_path, 'exec')
return True, "Syntax valid"
except SyntaxError as e:
return False, f"Syntax error: {e}"
except Exception as e:
return False, f"Error: {e}"
def test_script_imports(self, script_path: Path) -> Tuple[bool, str]:
"""Test if script imports can be resolved"""
try:
spec = importlib.util.spec_from_file_location("test_module", script_path)
if spec and spec.loader:
module = importlib.util.module_from_spec(spec)
# Don't execute, just verify imports
return True, "Imports valid"
return False, "Could not load module"
except ImportError as e:
return False, f"Import error: {e}"
except Exception as e:
return False, f"Error: {e}"
def test_script_help(self, script_path: Path) -> Tuple[bool, str]:
"""Test if script provides --help"""
try:
result = subprocess.run(
[sys.executable, str(script_path), "--help"],
capture_output=True,
text=True,
timeout=5
)
if result.returncode == 0 and ("usage:" in result.stdout.lower() or "help" in result.stdout.lower()):
return True, "Help available"
return False, "No help output"
except subprocess.TimeoutExpired:
return False, "Timeout"
except Exception as e:
return False, f"Error: {e}"
def test_functional(self, scripts: Dict[str, List[Path]]) -> Dict:
"""Functional testing phase"""
self.log("\n=== Phase 3: Functional Testing ===")
results = {}
for category, script_list in scripts.items():
self.log(f"\nTesting category: {category}")
category_results = []
for script in script_list:
self.results["tests_run"] += 1
test_result = {
"script": script.name,
"syntax": None,
"imports": None,
"help": None,
"status": "PASS"
}
# Syntax test
syntax_pass, syntax_msg = self.test_script_syntax(script)
test_result["syntax"] = {"pass": syntax_pass, "message": syntax_msg}
# Import test
import_pass, import_msg = self.test_script_imports(script)
test_result["imports"] = {"pass": import_pass, "message": import_msg}
# Help test
help_pass, help_msg = self.test_script_help(script)
test_result["help"] = {"pass": help_pass, "message": help_msg}
# Overall status
if syntax_pass and import_pass:
self.results["tests_passed"] += 1
self.log(f"{script.name}")
else:
self.results["tests_failed"] += 1
test_result["status"] = "FAIL"
self.log(f"{script.name}", "ERROR")
self.results["errors"].append({
"script": str(script),
"errors": [syntax_msg, import_msg]
})
category_results.append(test_result)
results[category] = category_results
self.results["categories"] = results
return results
def test_integration(self) -> Dict:
"""Tool integration testing"""
self.log("\n=== Phase 4: Tool Integration Testing ===")
# Test if common tools are available
tools = {
"requests": "pip show requests",
"transformers": "pip show transformers",
"tiktoken": "pip show tiktoken",
}
integration_results = {}
for tool, check_cmd in tools.items():
try:
result = subprocess.run(
check_cmd.split(),
capture_output=True,
text=True,
timeout=5
)
available = result.returncode == 0
integration_results[tool] = available
status = "" if available else ""
self.log(f" {status} {tool}")
except Exception as e:
integration_results[tool] = False
self.log(f"{tool}: {e}", "ERROR")
return integration_results
def test_performance(self, scripts: Dict[str, List[Path]], sample_size: int = 5) -> Dict:
"""Performance testing"""
self.log("\n=== Phase 6: Performance Assessment ===")
performance_results = {}
for category, script_list in scripts.items():
# Sample scripts from each category
sample_scripts = script_list[:sample_size]
for script in sample_scripts:
start_time = time.time()
try:
result = subprocess.run(
[sys.executable, str(script), "--help"],
capture_output=True,
timeout=5
)
elapsed = time.time() - start_time
performance_results[script.name] = {
"elapsed_seconds": elapsed,
"status": "success"
}
self.log(f" {script.name}: {elapsed:.2f}s")
except subprocess.TimeoutExpired:
performance_results[script.name] = {
"elapsed_seconds": 5.0,
"status": "timeout"
}
except Exception as e:
performance_results[script.name] = {
"elapsed_seconds": 0,
"status": f"error: {e}"
}
self.results["performance"] = performance_results
return performance_results
def test_compliance(self, scripts: Dict[str, List[Path]], standard: str = "OWASP-LLM-TOP-10") -> Dict:
"""Compliance testing"""
self.log(f"\n=== Phase 8: Compliance Testing ({standard}) ===")
compliance_results = {
"standard": standard,
"checks": [],
"coverage": 0.0
}
if standard not in COMPLIANCE_STANDARDS:
self.log(f"Unknown standard: {standard}", "WARNING")
return compliance_results
# For each compliance item, check if it's covered
items = COMPLIANCE_STANDARDS[standard]
covered_count = 0
for item in items:
# Simple heuristic: grep for item in script content
covered = False
for category, script_list in scripts.items():
for script in script_list:
try:
with open(script, 'r') as f:
content = f.read()
if item.lower() in content.lower():
covered = True
break
except:
pass
if covered:
break
if covered:
covered_count += 1
compliance_results["checks"].append({
"item": item,
"covered": covered
})
status = "" if covered else ""
self.log(f" {status} {item}")
compliance_results["coverage"] = (covered_count / len(items)) * 100 if items else 0
self.log(f"\nCoverage: {compliance_results['coverage']:.1f}%")
self.results["compliance"][standard] = compliance_results
return compliance_results
def llm_validate_script(self, script_path: Path) -> Dict:
"""Use LLM to validate script purpose and implementation"""
self.log(f"LLM validating: {script_path.name}")
try:
with open(script_path, 'r') as f:
code = f.read()[:2000] # First 2000 chars
prompt = f"""Analyze this Python security testing script and provide:
1. Primary purpose
2. Potential security concerns
3. Code quality rating (1-10)
Script: {script_path.name}
```python
{code}
```
Respond in JSON format."""
response = self.query_llm(prompt)
if response:
return {
"script": script_path.name,
"llm_analysis": response[:500], # Truncate
"validated": True
}
except Exception as e:
self.log(f"LLM validation error: {e}", "ERROR")
return {"script": script_path.name, "validated": False}
def generate_report(self, output_file: str, format: str = "json"):
"""Generate test report"""
self.log(f"\n=== Generating Report ({format}) ===")
if format == "json":
with open(output_file, 'w') as f:
json.dump(self.results, f, indent=2)
self.log(f"JSON report saved to: {output_file}")
elif format == "html":
html_content = self._generate_html_report()
with open(output_file, 'w') as f:
f.write(html_content)
self.log(f"HTML report saved to: {output_file}")
elif format == "summary":
summary = self._generate_summary()
with open(output_file, 'w') as f:
f.write(summary)
self.log(f"Summary report saved to: {output_file}")
def _generate_html_report(self) -> str:
"""Generate HTML report"""
pass_rate = (self.results["tests_passed"] / self.results["tests_run"] * 100) if self.results["tests_run"] > 0 else 0
html = f"""<!DOCTYPE html>
<html>
<head>
<title>AI LLM Red Team Scripts - Test Report</title>
<style>
body {{ font-family: Arial, sans-serif; margin: 20px; }}
h1 {{ color: #333; }}
.summary {{ background: #f0f0f0; padding: 15px; border-radius: 5px; }}
.pass {{ color: green; }}
.fail {{ color: red; }}
table {{ border-collapse: collapse; width: 100%; margin-top: 20px; }}
th, td {{ border: 1px solid #ddd; padding: 8px; text-align: left; }}
th {{ background-color: #4CAF50; color: white; }}
</style>
</head>
<body>
<h1>AI LLM Red Team Scripts - Comprehensive Test Report</h1>
<div class="summary">
<h2>Summary</h2>
<p><strong>Timestamp:</strong> {self.results["timestamp"]}</p>
<p><strong>Tests Run:</strong> {self.results["tests_run"]}</p>
<p class="pass"><strong>Tests Passed:</strong> {self.results["tests_passed"]}</p>
<p class="fail"><strong>Tests Failed:</strong> {self.results["tests_failed"]}</p>
<p><strong>Pass Rate:</strong> {pass_rate:.1f}%</p>
</div>
<h2>Category Results</h2>
<table>
<tr>
<th>Category</th>
<th>Scripts Tested</th>
<th>Status</th>
</tr>"""
for category, results in self.results["categories"].items():
passed = sum(1 for r in results if r["status"] == "PASS")
total = len(results)
status_class = "pass" if passed == total else "fail"
html += f"""
<tr>
<td>{category}</td>
<td>{total}</td>
<td class="{status_class}">{passed}/{total}</td>
</tr>"""
html += """
</table>
</body>
</html>"""
return html
def _generate_summary(self) -> str:
"""Generate text summary"""
pass_rate = (self.results["tests_passed"] / self.results["tests_run"] * 100) if self.results["tests_run"] > 0 else 0
summary = f"""
================================================================================
AI LLM RED TEAM HANDBOOK - COMPREHENSIVE TEST REPORT
================================================================================
Timestamp: {self.results["timestamp"]}
EXECUTIVE SUMMARY
--------------------------------------------------------------------------------
Total Tests Run: {self.results["tests_run"]}
Tests Passed: {self.results["tests_passed"]}
Tests Failed: {self.results["tests_failed"]}
Tests Skipped: {self.results["tests_skipped"]}
Pass Rate: {pass_rate:.1f}%
CATEGORY BREAKDOWN
--------------------------------------------------------------------------------
"""
for category, results in self.results["categories"].items():
passed = sum(1 for r in results if r["status"] == "PASS")
total = len(results)
summary += f"{category.ljust(25)} {passed}/{total}\n"
summary += """
================================================================================
END OF REPORT
================================================================================
"""
return summary
def main():
parser = argparse.ArgumentParser(
description="Comprehensive Test Orchestrator for AI LLM Red Team Scripts"
)
parser.add_argument("--llm-endpoint", default="http://localhost:11434",
help="LLM endpoint URL")
parser.add_argument("--category", choices=CATEGORIES,
help="Test specific category only")
parser.add_argument("--all", action="store_true",
help="Test all categories")
parser.add_argument("--test-type",
choices=["functional", "integration", "performance",
"compliance", "error-handling", "all"],
default="all", help="Type of test to run")
parser.add_argument("--standard", choices=list(COMPLIANCE_STANDARDS.keys()),
default="OWASP-LLM-TOP-10",
help="Compliance standard to test against")
parser.add_argument("--generate-report", action="store_true",
help="Generate test report")
parser.add_argument("--format", choices=["json", "html", "summary"],
default="json", help="Report format")
parser.add_argument("--output", default="test_report.json",
help="Output file for report")
parser.add_argument("--verbose", action="store_true",
help="Verbose output")
parser.add_argument("--llm-validate", action="store_true",
help="Use LLM to validate scripts")
args = parser.parse_args()
# Initialize orchestrator
orchestrator = TestOrchestrator(args.llm_endpoint, args.verbose)
# Test LLM connection
if not orchestrator.test_llm_connection():
print("Warning: LLM endpoint not available. LLM-based tests will be skipped.")
# Discover scripts
scripts = orchestrator.discover_scripts(args.category)
if not scripts:
print("No scripts found to test!")
return 1
# Run tests based on type
if args.test_type in ["functional", "all"]:
orchestrator.test_functional(scripts)
if args.test_type in ["integration", "all"]:
orchestrator.test_integration()
if args.test_type in ["performance", "all"]:
orchestrator.test_performance(scripts)
if args.test_type in ["compliance", "all"]:
orchestrator.test_compliance(scripts, args.standard)
# LLM validation if requested
if args.llm_validate:
orchestrator.log("\n=== Phase 9: LLM-Powered Validation ===")
for category, script_list in scripts.items():
for script in script_list[:3]: # Sample 3 per category
orchestrator.llm_validate_script(script)
# Generate report
if args.generate_report:
orchestrator.generate_report(args.output, args.format)
# Print summary
print(orchestrator._generate_summary())
return 0 if orchestrator.results["tests_failed"] == 0 else 1
if __name__ == "__main__":
sys.exit(main())

114
scripts/tools/README.md Normal file
View File

@@ -0,0 +1,114 @@
# Development & Build Tools
This directory contains tools for development, validation, and building of the AI LLM Red Team scripts.
## 📁 Structure
- **`build/`** - Script generation and build tools (archived)
- **`validation/`** - Health checks, link validation, and linting
- **`install.sh`** - Automated installation script
## 🔧 Tools
### Installation
**`install.sh`** - Automated environment setup
```bash
cd /home/e/Desktop/ai-llm-red-team-handbook/scripts
./tools/install.sh
```
This script:
- Checks Python 3.8+ installation
- Creates virtual environment
- Installs dependencies from `config/requirements.txt`
- Makes scripts executable
- Runs verification tests
### Validation Tools (`validation/`)
**`health_check.sh`** - System health and environment validation
```bash
./tools/validation/health_check.sh
```
**`validate_links.py`** - Link validation for documentation
```bash
python3 tools/validation/validate_links.py
```
**`global_lint.py`** - Code linting across all scripts
```bash
python3 tools/validation/global_lint.py
```
### Build Tools (`build/`)
**Archived build tools** used during initial script generation:
- `extract_code_blocks.py` - Extracts code from handbook chapters
- `generate_scripts.py` - Generates organized Python scripts
- `rename_scripts.py` - Renames scripts with descriptive names
- `code_catalog.json` - Catalog of all extracted code blocks (1.2MB)
- `rename_plan.json` - Script rename mapping
- `generated_scripts.txt` - List of generated scripts
These tools were used for the initial build and are kept for reference and potential regeneration.
## 🚀 Common Workflows
### Fresh Installation
```bash
# Run installation script
./tools/install.sh
# Activate environment
source venv/bin/activate
# Verify installation
python3 test_install.py
```
### Validation
```bash
# Run health check
./tools/validation/health_check.sh
# Validate documentation links
python3 tools/validation/validate_links.py
# Run linter
python3 tools/validation/global_lint.py
```
### Rebuild from Source (Advanced)
If you need to regenerate scripts from handbook chapters:
```bash
# Extract code blocks
python3 tools/build/extract_code_blocks.py
# Generate scripts
python3 tools/build/generate_scripts.py
# Review and rename
python3 tools/build/rename_scripts.py
```
## 📝 Notes
- Build tools are archived and typically not needed for normal use
- Validation tools can be run anytime to check system health
- Install script should be run once per setup or when dependencies change
---
**Last Updated:** 2026-01-07

View File

@@ -17,6 +17,8 @@ NC='\033[0m' # No Color
# Script directory
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
PARENT_DIR="$(dirname "$PARENT_DIR")"
VENV_DIR="$PARENT_DIR/venv"
VENV_DIR="$SCRIPT_DIR/venv"
# Print functions
@@ -111,9 +113,9 @@ main() {
# Step 4: Install Python dependencies
print_header "Step 4: Installing Python Dependencies"
if [ -f "$SCRIPT_DIR/requirements.txt" ]; then
if [ -f "$PARENT_DIR/config/requirements.txt" ]; then
print_info "Installing packages from requirements.txt..."
pip install --quiet -r "$SCRIPT_DIR/requirements.txt"
pip install --quiet -r "$PARENT_DIR/config/requirements.txt"
print_success "Python dependencies installed"
else
print_warning "requirements.txt not found, skipping dependency installation"
@@ -125,10 +127,10 @@ main() {
print_info "Setting executable permissions on scripts..."
# Make all Python scripts executable
find "$SCRIPT_DIR" -type f -name "*.py" -exec chmod +x {} \;
find "$PARENT_DIR" -type f -name "*.py" -exec chmod +x {} \;
# Make all shell scripts executable
find "$SCRIPT_DIR" -type f -name "*.sh" -exec chmod +x {} \;
find "$PARENT_DIR" -type f -name "*.sh" -exec chmod +x {} \;
print_success "Scripts are now executable"
@@ -261,8 +263,8 @@ EOF
print_header "Installation Log"
echo "Python version: $PYTHON_VERSION"
echo "Virtual env: $VENV_DIR"
echo "Script count: $(find "$SCRIPT_DIR" -name "*.py" -type f | wc -l) Python scripts"
echo "Script count: $(find "$SCRIPT_DIR" -name "*.sh" -type f | wc -l) Shell scripts"
echo "Script count: $(find "$PARENT_DIR" -name "*.py" -type f | wc -l) Python scripts"
echo "Script count: $(find "$PARENT_DIR" -name "*.sh" -type f | wc -l) Shell scripts"
echo ""
print_success "Setup complete! Happy red teaming! 🔴"

View File

@@ -1 +0,0 @@
"""Workflows module for AI LLM Red Teaming."""

View File

@@ -1,230 +0,0 @@
#!/usr/bin/env python3
"""
Full LLM Security Assessment Workflow
This workflow orchestrates a comprehensive security assessment combining
reconnaissance, prompt injection, data extraction, and exploitation techniques.
Usage:
python3 workflows/full_assessment.py --target https://api.example.com --output report.json
"""
import argparse
import sys
import json
from pathlib import Path
from datetime import datetime
from typing import List, Dict, Any, Optional
# Add parent to path for imports
sys.path.append(str(Path(__file__).parent.parent))
from utils.logging_config import setup_logging
from utils.validators import validate_url, validate_file_path, ValidationError
from utils.constants import (
VERSION,
EXIT_SUCCESS,
EXIT_ERROR,
EXIT_INVALID_INPUT,
SECURITY_NOTICE,
PROGRESS_SYMBOLS
)
logger = setup_logging(__name__)
@dataclass
class PhaseResult:
"""Result of a single assessment phase."""
phase_name: str
success: bool
scripts_run: int
duration_seconds: float
findings: List[Dict[str, Any]]
errors: List[str]
class AssessmentOrchestrator:
"""Orchestrates a full LLM security assessment."""
def __init__(
self,
target: str,
output_file: Optional[Path] = None,
verbose: bool = False
):
"""
Initialize the assessment orchestrator.
Args:
target: Target LLM API URL
output_file: Optional output file for results
verbose: Enable verbose logging
"""
self.target = target
self.output_file = output_file
self.verbose = verbose
self.results = {
'target': target,
'timestamp': datetime.now().isoformat(),
'phases': {}
}
def log(self, message):
"""Log message if verbose."""
if self.verbose:
print(f"[*] {message}")
def run_phase(self, phase_name, description, scripts):
"""Run a phase of the assessment."""
print(f"\n{'='*60}")
print(f"Phase: {phase_name}")
print(f"Description: {description}")
print(f"{'='*60}\n")
phase_results = []
for script_name, script_desc in scripts:
self.log(f"Running: {script_name}")
self.log(f"Purpose: {script_desc}")
# TODO: Actually execute the scripts
# For now, just record them
phase_results.append({
'script': script_name,
'description': script_desc,
'status': 'planned'
})
self.results['phases'][phase_name] = phase_results
return phase_results
def phase_1_reconnaissance(self):
"""Phase 1: Reconnaissance and fingerprinting."""
scripts = [
('reconnaissance/chapter_31_ai_system_reconnaissance_01_reconnaissance.py',
'LLM system fingerprinting'),
('reconnaissance/chapter_31_ai_system_reconnaissance_02_reconnaissance.py',
'API discovery and enumeration'),
]
return self.run_phase('Reconnaissance',
'Identify LLM type, version, and architecture',
scripts)
def phase_2_prompt_injection(self):
"""Phase 2: Prompt injection testing."""
scripts = [
('prompt_injection/chapter_14_prompt_injection_01_prompt_injection.py',
'Basic prompt injection'),
('prompt_injection/chapter_14_prompt_injection_02_prompt_injection.py',
'Context overflow attacks'),
('prompt_injection/chapter_14_prompt_injection_03_prompt_injection.py',
'System prompt leakage'),
]
return self.run_phase('Prompt Injection',
'Test prompt injection vulnerabilities',
scripts)
def phase_3_data_extraction(self):
"""Phase 3: Data extraction attempts."""
scripts = [
('data_extraction/chapter_15_data_leakage_and_extraction_01_data_extraction.py',
'PII extraction'),
('data_extraction/chapter_15_data_leakage_and_extraction_02_data_extraction.py',
'Training data extraction'),
('data_extraction/chapter_15_data_leakage_and_extraction_03_data_extraction.py',
'Memory dump attempts'),
]
return self.run_phase('Data Extraction',
'Attempt to extract sensitive data',
scripts)
def phase_4_jailbreak(self):
"""Phase 4: Jailbreak attempts."""
scripts = [
('jailbreak/chapter_16_jailbreaks_and_bypass_techniques_01_jailbreak.py',
'Character roleplay bypass'),
('jailbreak/chapter_16_jailbreaks_and_bypass_techniques_02_jailbreak.py',
'DAN techniques'),
]
return self.run_phase('Jailbreak Testing',
'Test guardrail bypasses',
scripts)
def phase_5_plugin_exploitation(self):
"""Phase 5: Plugin and API exploitation."""
scripts = [
('plugin_exploitation/chapter_17_01_fundamentals_and_architecture_01_plugin_exploitation.py',
'Plugin enumeration'),
('plugin_exploitation/chapter_17_02_api_authentication_and_authorization_01_plugin_exploitation.py',
'Authentication bypass'),
('plugin_exploitation/chapter_17_04_api_exploitation_and_function_calling_01_plugin_exploitation.py',
'Command injection'),
]
return self.run_phase('Plugin Exploitation',
'Test plugin and API vulnerabilities',
scripts)
def phase_6_reporting(self):
"""Phase 6: Generate final report."""
print(f"\n{'='*60}")
print("Generating Assessment Report")
print(f"{'='*60}\n")
if self.output_file:
Path(self.output_file).write_text(json.dumps(self.results, indent=2))
print(f"Report saved to: {self.output_file}")
else:
print(json.dumps(self.results, indent=2))
def run_full_assessment(self):
"""Run the complete assessment workflow."""
print(f"\n{'#'*60}")
print(f"# AI LLM Security Assessment")
print(f"# Target: {self.target}")
print(f"# Time: {self.results['timestamp']}")
print(f"{'#'*60}\n")
# Run all phases
self.phase_1_reconnaissance()
self.phase_2_prompt_injection()
self.phase_3_data_extraction()
self.phase_4_jailbreak()
self.phase_5_plugin_exploitation()
self.phase_6_reporting()
print(f"\n{'='*60}")
print("Assessment Complete")
print(f"{'='*60}\n")
def main():
"""Command-line interface."""
parser = argparse.ArgumentParser(
description='Full LLM Security Assessment Workflow',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Run full assessment
python3 workflows/full_assessment.py --target https://api.example.com
# Save results to file
python3 workflows/full_assessment.py --target https://api.example.com --output report.json
# Verbose mode
python3 workflows/full_assessment.py --target https://api.example.com --verbose
"""
)
parser.add_argument('--target', required=True, help='Target LLM API URL')
parser.add_argument('--output', '-o', help='Output file for results (JSON)')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose output')
args = parser.parse_args()
# Run assessment
orchestrator = AssessmentOrchestrator(args.target, args.output, args.verbose)
orchestrator.run_full_assessment()
if __name__ == "__main__":
main()

View File

@@ -1,58 +0,0 @@
#!/usr/bin/env python3
"""
Plugin Penetration Testing Workflow
Focused workflow for testing plugin and API vulnerabilities in LLM systems.
Covers authentication, authorization, command injection, and function hijacking.
Usage:
python3 workflows/plugin_pentest.py --target https://api.example.com --plugins weather,calculator
"""
import argparse
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent))
def main():
"""Plugin penetration testing workflow."""
parser = argparse.ArgumentParser(description='Plugin penetration testing workflow')
parser.add_argument('--target', required=True, help='Target LLM API URL')
parser.add_argument('--plugins', help='Comma-separated list of plugins to test')
parser.add_argument('--auth-token', help='Authentication token (if required)')
parser.add_argument('--output', '-o', help='Output report file')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose output')
args = parser.parse_args()
plugins = args.plugins.split(',') if args.plugins else []
print(f"Plugin Penetration Testing Workflow")
print(f"Target: {args.target}")
print(f"Plugins: {', '.join(plugins)}")
print(f"\nPhase 1: Plugin Discovery")
print(" - Enumerating available plugins")
print(" - Analyzing plugin manifests")
print(" - Mapping function signatures")
print(f"\nPhase 2: Authentication Testing")
print(" - Testing API key requirements")
print(" - Attempting token bypass")
print(" - Checking authorization boundaries")
print(f"\nPhase 3: Function Calling Exploitation")
print(" - Testing command injection in plugins")
print(" - Attempting function hijacking")
print(" - Exploiting type confusion")
print(f"\nPhase 4: Third-Party Integration Testing")
print(" - Analyzing external API calls")
print(" - Testing SSRF vulnerabilities")
print(" - Checking credential leakage")
print("\n[!] This is a template workflow - implement actual testing logic")
if __name__ == "__main__":
main()

View File

@@ -1,55 +0,0 @@
#!/usr/bin/env python3
"""
RAG-Focused Exploitation Workflow
Specialized workflow for attacking RAG (Retrieval Augmented Generation) systems.
Combines vector database poisoning, retrieval manipulation, and indirect injection.
Usage:
python3 workflows/rag_exploitation.py --target https://api.example.com --vector-db chromadb
"""
import argparse
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent))
def main():
"""RAG exploitation workflow."""
parser = argparse.ArgumentParser(description='RAG-focused exploitation workflow')
parser.add_argument('--target', required=True, help='Target RAG API URL')
parser.add_argument('--vector-db', choices=['chromadb', 'faiss', 'pinecone'], help='Vector database type')
parser.add_argument('--poison-docs', help='Documents for poisoning attack')
parser.add_argument('--output', '-o', help='Output report file')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose output')
args = parser.parse_args()
print(f"RAG Exploitation Workflow")
print(f"Target: {args.target}")
print(f"Vector DB: {args.vector_db}")
print(f"\nPhase 1: RAG Architecture Reconnaissance")
print(" - Identifying retrieval endpoints")
print(" - Analyzing embedding model")
print(" - Mapping vector database")
print(f"\nPhase 2: Vector Database Poisoning")
print(" - Crafting poisoned documents")
print(" - Injecting malicious embeddings")
print(" - Testing retrieval manipulation")
print(f"\nPhase 3: Indirect Injection via RAG")
print(" - Embedding hidden instructions")
print(" - Testing context injection")
print(" - Validating payload execution")
print(f"\nPhase 4: Data Extraction")
print(" - Extracting indexed documents")
print(" - Leaking embedding vectors")
print(" - Recovering training data")
print("\n[!] This is a template workflow - implement actual attack logic")
if __name__ == "__main__":
main()