commit 0be3cfdebd3b2b941e2154212bde910747c4d862 Author: Muhammad Osama Date: Thu Jul 10 23:07:24 2025 +0500 update diff --git a/README.md b/README.md new file mode 100644 index 0000000..bcb7a3e --- /dev/null +++ b/README.md @@ -0,0 +1,1430 @@ +
+ +# ๐Ÿ”ฅ HexStrike AI Agents v5.0 +### The Ultimate AI-Powered Cybersecurity Automation Platform + +[![Python](https://img.shields.io/badge/Python-3.8%2B-blue.svg)](https://www.python.org/) +[![License](https://img.shields.io/badge/License-MIT-green.svg)](LICENSE) +[![Security](https://img.shields.io/badge/Security-Penetration%20Testing-red.svg)](https://github.com/0x4m4/hexstrike-ai) +[![AI](https://img.shields.io/badge/AI-Powered-purple.svg)](https://github.com/0x4m4/hexstrike-ai) +[![Version](https://img.shields.io/badge/Version-5.0.0-orange.svg)](https://github.com/0x4m4/hexstrike-ai/releases) +[![Tools](https://img.shields.io/badge/Security%20Tools-70%2B-brightgreen.svg)](https://github.com/0x4m4/hexstrike-ai) + +**Revolutionary AI-Driven Cybersecurity Framework for Penetration Testing, Bug Bounty Hunting, CTF Challenges, and Security Research** + +[๐Ÿš€ Quick Start](#-quick-installation) โ€ข [๐Ÿ“š Documentation](#-comprehensive-feature-set) โ€ข [๐Ÿ› ๏ธ Features](#-key-highlights) โ€ข [๐Ÿ’ก Examples](#-usage-examples) โ€ข [๐Ÿค Contributing](#-contributing) โ€ข [โญ Star Us](https://github.com/0x4m4/hexstrike-ai) + +
+ +--- + +## ๐ŸŒŸ **Why Choose HexStrike AI Agents v5.0?** + +HexStrike AI is the **most advanced open-source cybersecurity automation platform** that revolutionizes security testing by combining traditional penetration testing tools with cutting-edge artificial intelligence. Trusted by security professionals, researchers, and ethical hackers worldwide. + +### ๐ŸŽฏ **Perfect For:** +- ๐Ÿ” **Penetration Testers** - Automate reconnaissance and vulnerability discovery +- ๐Ÿ’ฐ **Bug Bounty Hunters** - Accelerate target analysis and exploit development +- ๐Ÿ† **CTF Players** - Advanced forensics, crypto, and reversing capabilities +- ๐Ÿ”ฌ **Security Researchers** - AI-powered payload generation and testing +- ๐ŸŽ–๏ธ **Red Team Operators** - Comprehensive attack surface analysis +- ๐Ÿ“š **Security Students** - Learning platform with real-world tools + +--- + +## ๐Ÿš€ **Key Highlights** + +
+ +| ๐ŸŽฏ **70+ Security Tools** | ๐Ÿค– **AI-Powered Intelligence** | โšก **50% Performance Boost** | +|:---:|:---:|:---:| +| Complete penetration testing arsenal | Contextual payload generation & analysis | Advanced caching & optimization | + +| ๐ŸŽฎ **Real-time Control** | ๐ŸŒ **Modern API Testing** | ๐Ÿ”ง **Professional Integration** | +|:---:|:---:|:---:| +| Command termination & progress tracking | GraphQL, JWT, REST API security | MCP, Roo Code, Claude integration | + +
+ +### โœจ **What Makes Us Special:** + +- ๐Ÿ”ฅ **Zero Server Restart** - Terminate long-running scans without interruption +- ๐ŸŽจ **Beautiful Real-time Output** - Progress bars, ETA calculations, visual status +- ๐Ÿง  **AI Contextual Payloads** - Technology-specific exploit generation +- ๐Ÿ“Š **Live Dashboard** - Monitor all processes with system metrics +- ๐Ÿ”„ **Smart Caching** - 50% faster repeated operations +- ๐Ÿ›ก๏ธ **Comprehensive Coverage** - Network, web, binary, cloud, CTF tools + +--- + +## ๐Ÿ› ๏ธ **Comprehensive Feature Set** + +### ๐ŸŽฏ **Core Security Tools (70+)** + +
+๐Ÿ” Network Reconnaissance & Scanning + +- **Nmap** - Advanced port scanning with custom NSE scripts +- **Amass** - Comprehensive subdomain enumeration and OSINT +- **Subfinder** - Fast passive subdomain discovery +- **Nuclei** - Fast vulnerability scanner with 4000+ templates +- **AutoRecon** - Automated reconnaissance with 35+ parameters +- **Fierce** - DNS reconnaissance and zone transfer testing +- **Masscan** - High-speed Internet-scale port scanner + +
+ +
+๐ŸŒ Web Application Security Testing + +- **Gobuster** - Directory, file, and DNS enumeration +- **FFuf** - Fast web fuzzer with advanced filtering capabilities +- **Dirb** - Comprehensive web content scanner +- **Nikto** - Web server vulnerability scanner +- **SQLMap** - Advanced automatic SQL injection testing +- **WPScan** - WordPress security scanner with vulnerability database +- **Burp Suite** - Professional web security testing platform +- **OWASP ZAP** - Web application security scanner +- **Arjun** - HTTP parameter discovery tool +- **Wafw00f** - Web application firewall fingerprinting +- **Feroxbuster** - Fast content discovery tool +- **Dotdotpwn** - Directory traversal fuzzer +- **XSSer** - Cross-site scripting detection and exploitation +- **Wfuzz** - Web application fuzzer + +
+ +
+๐Ÿ” Authentication & Password Security + +- **Hydra** - Network login cracker supporting 50+ protocols +- **John the Ripper** - Advanced password hash cracking +- **Hashcat** - World's fastest password recovery tool +- **Medusa** - Speedy, parallel, modular login brute-forcer +- **Patator** - Multi-purpose brute-forcer +- **CrackMapExec** - Swiss army knife for pentesting networks +- **Evil-WinRM** - Windows Remote Management shell + +
+ +
+๐Ÿ”ฌ Binary Analysis & Reverse Engineering + +- **GDB** - GNU Debugger with Python scripting +- **Radare2** - Advanced reverse engineering framework +- **Binwalk** - Firmware analysis and extraction tool +- **ROPgadget** - ROP/JOP gadget finder +- **Checksec** - Binary security property checker +- **Strings** - Extract printable strings from binaries +- **Objdump** - Display object file information +- **Ghidra** - NSA's software reverse engineering suite +- **XXD** - Hex dump utility + +
+ +
+๐Ÿ† Advanced CTF & Forensics Tools + +- **Volatility3** - Advanced memory forensics framework +- **Foremost** - File carving and data recovery +- **Steghide** - Steganography detection and extraction +- **ExifTool** - Metadata reader/writer for various file formats +- **HashPump** - Hash length extension attack tool +- **Binwalk** - Firmware analysis and reverse engineering +- **Autopsy** - Digital forensics platform +- **Sleuth Kit** - Collection of command-line digital forensics tools + +
+ +
+โ˜๏ธ Cloud & Container Security + +- **Prowler** - AWS/Azure/GCP security assessment tool +- **Trivy** - Comprehensive vulnerability scanner for containers +- **Scout Suite** - Multi-cloud security auditing tool +- **Kube-Hunter** - Kubernetes penetration testing tool +- **Kube-Bench** - CIS Kubernetes benchmark checker +- **CloudSploit** - Cloud security scanning and monitoring + +
+ +
+๐Ÿ”ฅ Bug Bounty & Reconnaissance Arsenal + +- **Hakrawler** - Fast web endpoint discovery and crawling +- **HTTPx** - Fast and multi-purpose HTTP toolkit +- **ParamSpider** - Mining parameters from dark corners of web archives +- **Aquatone** - Visual inspection of websites across hosts +- **Subjack** - Subdomain takeover vulnerability checker +- **DNSENUM** - DNS enumeration script +- **Fierce** - Domain scanner for locating targets + +
+ +### ๐Ÿค– **AI-Powered Intelligence System** + +
+๐ŸŽฏ Contextual Payload Generation + +**Smart Attack Vector Creation:** +- **XSS Payloads** - Basic, advanced, filter bypass techniques +- **SQL Injection** - Database-specific, blind, time-based attacks +- **Command Injection** - OS-specific, blind execution techniques +- **LFI/RFI** - Local/remote file inclusion with wrapper techniques +- **SSTI** - Server-side template injection for various engines +- **XXE** - XML external entity attacks with data exfiltration +- **CSRF** - Cross-site request forgery payload generation + +**Features:** +- ๐Ÿง  **Technology Detection** - Automatic context adaptation +- ๐ŸŽฏ **Risk Assessment** - Automatic payload severity rating +- ๐Ÿ”„ **Encoding Variations** - URL, HTML, Unicode encoding +- ๐Ÿ“Š **Success Probability** - AI-calculated effectiveness scores + +
+ +
+๐Ÿงช Automated Vulnerability Testing + +- **Intelligent Test Cases** - AI-guided vulnerability assessment +- **Response Analysis** - Automated vulnerability confirmation +- **False Positive Reduction** - Smart filtering and validation +- **Comprehensive Reports** - Detailed security assessments +- **Attack Chaining** - Multi-stage exploit development + +
+ +
+๐ŸŒ Advanced API Security Testing + +- **GraphQL Security** - Introspection, depth limiting, batch query testing +- **JWT Analysis** - Algorithm confusion, signature bypass, token manipulation +- **REST API Testing** - Endpoint discovery, parameter fuzzing, authentication bypass +- **API Schema Analysis** - OpenAPI/Swagger security assessment +- **Comprehensive Audits** - Multi-technique API penetration testing + +
+ +### โšก **Performance & Control Features** + +
+๐ŸŽฎ Real-time Process Management + +**Advanced Command Control:** +- **Live Termination** - Stop scans without server restart +- **Progress Tracking** - Real-time progress bars with ETA calculations +- **Process Dashboard** - Monitor all active scans simultaneously +- **Resource Management** - CPU and memory optimization +- **Pause/Resume** - Full control over long-running operations + +**Visual Progress Display:** +```bash +โšก PROGRESS โฃท [โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘] 60.5% | 12.3s | ETA: 8s | PID: 87369 +๐Ÿ“Š FINAL RESULTS โœ… + โ”œโ”€ Command: nmap -sV -sC example.com + โ”œโ”€ Duration: 15.2s + โ”œโ”€ Output Size: 2847 bytes + โ”œโ”€ Exit Code: 0 + โ””โ”€ Status: SUCCESS | Cached: Yes +``` + +
+ +
+๐Ÿš€ Intelligent Caching System + +- **50% Performance Boost** - Smart result caching with LRU eviction +- **Context-Aware TTL** - Dynamic cache expiration based on command type +- **Hit Rate Optimization** - Statistical analysis and cache tuning +- **Memory Management** - Configurable cache size and cleanup +- **Cache Analytics** - Detailed performance metrics + +
+ +
+๐Ÿ“Š Enhanced Logging & Monitoring + +- **Color-Coded Output** - Visual command execution status +- **Structured Logging** - JSON-formatted audit trails +- **Performance Metrics** - Real-time system resource monitoring +- **Security Analytics** - Vulnerability discovery statistics +- **Export Capabilities** - Multiple output formats (JSON, XML, CSV) + +
+ +--- + +## ๐Ÿ”ง **Quick Installation** + +### ๐Ÿ“‹ **System Requirements** + +```bash +# Recommended Environment +OS: Kali Linux 2023.1+ / Ubuntu 20.04+ / Debian 11+ +Python: 3.8+ with pip +RAM: 4GB+ (8GB recommended) +Storage: 20GB+ free space +Network: High-speed internet for tool updates +``` + +### ๐Ÿ› ๏ธ **Manual Installation** + +```bash +# 1. Clone the repository +git clone https://github.com/0x4m4/hexstrike-ai.git +cd hexstrike-ai + +# 2. Install Python dependencies +pip3 install -r requirements.txt + +# 3. Start HexStrike AI Server +python3 hexstrike_server.py --port 5000 --debug + +# 4. Verify installation +curl http://localhost:5000/health +``` + +### ๐Ÿ›ก๏ธ **Required Kali Linux Tools** + +The following security tools need to be installed on your Kali Linux system: + +```bash +# Install required security tools +sudo apt update && sudo apt install -y \ + nmap gobuster dirb nikto sqlmap hydra john \ + hashcat amass ffuf nuclei subfinder wpscan \ + burpsuite zaproxy volatility3 foremost steghide \ + radare2 gdb binwalk checksec exiftool +``` + +**Tool List:** +- **Network Tools**: nmap, amass, subfinder, nuclei +- **Web Testing**: gobuster, dirb, nikto, sqlmap, ffuf, wpscan +- **Password Tools**: hydra, john, hashcat +- **Binary Analysis**: radare2, gdb, binwalk, checksec +- **Forensics**: volatility3, foremost, steghide, exiftool +- **Proxy Tools**: burpsuite, zaproxy + +### ๐Ÿณ **Docker Deployment (Coming Soon)** + +```bash +# Docker support will be available in the next release +# Stay tuned for containerized deployment options +``` + +### ๐ŸŒ **Cloud Deployment** + +
+AWS EC2 Deployment + +```bash +# Launch EC2 instance with security group allowing port 5000 +aws ec2 run-instances \ + --image-id ami-0abcdef1234567890 \ + --instance-type t3.medium \ + --key-name your-key-pair \ + --security-groups hexstrike-sg + +# SSH and install +ssh -i your-key.pem ubuntu@your-ec2-ip +git clone https://github.com/0x4m4/hexstrike-ai.git +cd hexstrike-ai && ./install.sh +``` + +
+ +--- + +## ๐Ÿค– **AI Integration Setup** + +### ๐ŸŽฏ **Roo Code Integration** + +
+Complete Roo Code Setup Guide + +**Step 1: Install Roo Code** +```bash +# Download from roo.dev and install +curl -sSL https://install.roo.dev | bash +``` + +**Step 2: Configure MCP Settings** +Create/edit `~/.config/roo/mcp-settings.json`: +```json +{ + "mcpServers": { + "hexstrike-ai": { + "command": "python3", + "args": [ + "/path/hexstrike_mcp.py", + "--server", + "http://localhost:5000" + ], + "description": "HexStrike AI v5.0 - Advanced Cybersecurity Automation Platform", + "timeout": 300, + "alwaysAllow": [] + } + } +} +``` + +**Step 3: Start Services** +```bash +# Terminal 1: Start HexStrike Server +python3 hexstrike_server.py + +# Terminal 2: Start Roo Code +roo-code + +# Test integration in Roo Code: +# "Scan example.com with nmap and analyze the results" +``` + +
+ +### ๐Ÿง  **Claude Desktop Integration** + +
+Claude MCP Configuration + +**Configure Claude Desktop:** +Edit `~/.config/Claude/claude_desktop_config.json`: +```json +{ + "mcpServers": { + "hexstrike-ai": { + "command": "python3", + "args": [ + "/path/to/hexstrike-ai/hexstrike_mcp.py", + "--server", "http://localhost:5000" + ], + "env": { + "HEXSTRIKE_SERVER": "http://localhost:5000" + } + } + } +} +``` + +**Usage in Claude:** +``` +# Example prompts: +"Perform a comprehensive security scan of example.com" +"Generate XSS payloads for a PHP application" +"Analyze this JWT token for vulnerabilities" +"Start a bug bounty reconnaissance on target.com" +``` + +
+ +### ๐Ÿ”— **Custom AI Integration** + +
+Build Your Own Integration + +```python +# Custom AI client example +import requests + +class HexStrikeAI: + def __init__(self, base_url="http://localhost:5000"): + self.base_url = base_url + + def security_scan(self, target, scan_type="comprehensive"): + """Perform automated security scanning""" + if scan_type == "comprehensive": + # Network scan + nmap_result = self.nmap_scan(target) + # Web scan + web_result = self.web_scan(target) + # Vulnerability scan + vuln_result = self.vulnerability_scan(target) + + return { + "network": nmap_result, + "web": web_result, + "vulnerabilities": vuln_result + } + + def ai_payload_generation(self, attack_type, target_tech): + """Generate contextual security payloads""" + response = requests.post(f"{self.base_url}/api/ai/generate_payload", + json={ + "attack_type": attack_type, + "technology": target_tech, + "complexity": "advanced" + }) + return response.json() + +# Usage +ai = HexStrikeAI() +results = ai.security_scan("example.com") +payloads = ai.ai_payload_generation("xss", "php") +``` + +
+ +--- + +## ๐Ÿ’ก **Usage Examples** + +### ๐ŸŽฏ **Basic Security Scanning** + +
+Network Reconnaissance Workflow + +```python +import requests + +# 1. Advanced Nmap scan with service detection +nmap_data = { + "target": "example.com", + "scan_type": "-sV -sC", + "ports": "1-10000", + "additional_args": "-T4 --script vuln" +} +nmap_result = requests.post("http://localhost:5000/api/tools/nmap", json=nmap_data) + +# 2. Subdomain enumeration with Amass +amass_data = { + "domain": "example.com", + "mode": "enum", + "additional_args": "-active -brute -w /usr/share/wordlists/subdomains.txt" +} +amass_result = requests.post("http://localhost:5000/api/tools/amass", json=amass_data) + +# 3. Nuclei vulnerability scanning +nuclei_data = { + "target": "https://example.com", + "severity": "high,critical", + "additional_args": "-rl 100" +} +nuclei_result = requests.post("http://localhost:5000/api/tools/nuclei", json=nuclei_data) +``` + +
+ +
+Web Application Security Testing + +```python +# Directory enumeration with Gobuster +gobuster_data = { + "url": "https://example.com", + "mode": "dir", + "wordlist": "/usr/share/wordlists/dirb/big.txt", + "additional_args": "-x php,html,js,txt -t 50 -k" +} +gobuster_result = requests.post("http://localhost:5000/api/tools/gobuster", json=gobuster_data) + +# SQL injection testing with SQLMap +sqlmap_data = { + "url": "https://example.com/login.php", + "data": "username=admin&password=test", + "additional_args": "--batch --level 3 --risk 2 --dbs" +} +sqlmap_result = requests.post("http://localhost:5000/api/tools/sqlmap", json=sqlmap_data) + +# Web application scanning with Nikto +nikto_data = { + "host": "https://example.com", + "additional_args": "-ssl -Display V" +} +nikto_result = requests.post("http://localhost:5000/api/tools/nikto", json=nikto_data) +``` + +
+ +### ๐Ÿค– **AI-Powered Security Testing** + +
+Intelligent Payload Generation + +```python +# Generate contextual XSS payloads for PHP application +xss_payload_data = { + "attack_type": "xss", + "complexity": "advanced", + "technology": "php", + "url": "https://vulnerable-app.com/search.php" +} +xss_payloads = requests.post("http://localhost:5000/api/ai/generate_payload", json=xss_payload_data) + +# Generate SQL injection payloads for MySQL +sqli_payload_data = { + "attack_type": "sqli", + "complexity": "time_based", + "technology": "mysql", + "url": "https://vulnerable-app.com/login.php" +} +sqli_payloads = requests.post("http://localhost:5000/api/ai/generate_payload", json=sqli_payload_data) + +# Comprehensive attack suite generation +attack_suite_data = { + "target_url": "https://api.example.com", + "attack_types": "xss,sqli,lfi,ssti,xxe" +} +attack_suite = requests.post("http://localhost:5000/api/ai/generate_attack_suite", json=attack_suite_data) +``` + +
+ +
+Advanced API Security Testing + +```python +# GraphQL security assessment +graphql_data = { + "endpoint": "https://api.example.com/graphql", + "introspection": True, + "query_depth": 15, + "test_mutations": True +} +graphql_result = requests.post("http://localhost:5000/api/tools/graphql_scanner", json=graphql_data) + +# JWT token comprehensive analysis +jwt_data = { + "jwt_token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c", + "target_url": "https://api.example.com/protected" +} +jwt_result = requests.post("http://localhost:5000/api/tools/jwt_analyzer", json=jwt_data) + +# API schema security analysis +schema_data = { + "schema_url": "https://api.example.com/swagger.json", + "schema_type": "openapi" +} +schema_result = requests.post("http://localhost:5000/api/tools/api_schema_analyzer", json=schema_data) + +# Comprehensive API security audit +api_audit_data = { + "base_url": "https://api.example.com", + "schema_url": "https://api.example.com/swagger.json", + "jwt_token": "eyJhbGciOiJIUzI1NiI...", + "graphql_endpoint": "https://api.example.com/graphql" +} +audit_result = requests.post("http://localhost:5000/api/tools/comprehensive_api_audit", json=api_audit_data) +``` + +
+ +### ๐ŸŽฎ **Real-time Process Management** + +
+Command Control & Monitoring + +```python +# List all active processes with detailed information +processes = requests.get("http://localhost:5000/api/processes/list") +print(f"Active processes: {processes.json()['total_count']}") + +# Get comprehensive process dashboard +dashboard = requests.get("http://localhost:5000/api/processes/dashboard") +for process in dashboard.json()['processes']: + print(f"PID {process['pid']}: {process['progress_bar']} {process['progress_percent']}") + +# Terminate a long-running scan that's stuck +terminate_result = requests.post("http://localhost:5000/api/processes/terminate/12345") + +# Pause and resume processes for resource management +pause_result = requests.post("http://localhost:5000/api/processes/pause/12345") +resume_result = requests.post("http://localhost:5000/api/processes/resume/12345") + +# Monitor specific process status +status = requests.get("http://localhost:5000/api/processes/status/12345") +print(f"Process status: {status.json()['process']['status']}") +``` + +
+ +### ๐Ÿ† **CTF & Digital Forensics** + +
+Advanced Analysis Workflow + +```python +# Memory forensics with Volatility3 +volatility_data = { + "memory_file": "/tmp/challenge.dump", + "plugin": "windows.pslist", + "additional_args": "--output-file /tmp/processes.txt" +} +vol_result = requests.post("http://localhost:5000/api/tools/volatility3", json=volatility_data) + +# File carving with Foremost +foremost_data = { + "input_file": "/tmp/disk.img", + "output_dir": "/tmp/carved_files", + "file_types": "jpg,png,pdf,doc,zip" +} +foremost_result = requests.post("http://localhost:5000/api/tools/foremost", json=foremost_data) + +# Steganography analysis +steghide_data = { + "action": "extract", + "cover_file": "/tmp/suspicious_image.jpg", + "passphrase": "secret123", + "output_file": "/tmp/hidden_data.txt" +} +steg_result = requests.post("http://localhost:5000/api/tools/steghide", json=steghide_data) + +# Metadata extraction +exiftool_data = { + "file_path": "/tmp/evidence.jpg", + "output_format": "json" +} +exif_result = requests.post("http://localhost:5000/api/tools/exiftool", json=exiftool_data) +``` + +
+ +### ๐Ÿข **Enterprise Bug Bounty Workflow** + +
+Complete Reconnaissance Pipeline + +```bash +#!/bin/bash +# Automated bug bounty reconnaissance script + +TARGET="example.com" +API_BASE="http://localhost:5000" + +echo "๐ŸŽฏ Starting comprehensive reconnaissance for $TARGET" + +# 1. Subdomain Discovery +echo "๐Ÿ“ก Phase 1: Subdomain enumeration" +curl -s -X POST "$API_BASE/api/tools/amass" \ + -H "Content-Type: application/json" \ + -d "{\"domain\": \"$TARGET\", \"mode\": \"enum\"}" | jq . + +# 2. HTTP probing +echo "๐ŸŒ Phase 2: HTTP service discovery" +curl -s -X POST "$API_BASE/api/tools/httpx" \ + -H "Content-Type: application/json" \ + -d "{\"targets\": \"$TARGET\", \"methods\": \"GET,POST\"}" | jq . + +# 3. Web crawling +echo "๐Ÿ•ท๏ธ Phase 3: Web endpoint discovery" +curl -s -X POST "$API_BASE/api/tools/hakrawler" \ + -H "Content-Type: application/json" \ + -d "{\"url\": \"https://$TARGET\", \"depth\": 3}" | jq . + +# 4. Vulnerability scanning +echo "๐Ÿ” Phase 4: Vulnerability assessment" +curl -s -X POST "$API_BASE/api/tools/nuclei" \ + -H "Content-Type: application/json" \ + -d "{\"target\": \"https://$TARGET\", \"severity\": \"high,critical\"}" | jq . + +# 5. AI-powered payload testing +echo "๐Ÿค– Phase 5: AI payload generation" +curl -s -X POST "$API_BASE/api/ai/generate_attack_suite" \ + -H "Content-Type: application/json" \ + -d "{\"target_url\": \"https://$TARGET\", \"attack_types\": \"xss,sqli,lfi\"}" | jq . + +echo "โœ… Reconnaissance complete!" +``` + +
+ +--- + +## ๐Ÿ“Š **API Reference** + +### ๐Ÿ”ง **Core System Endpoints** + +| Endpoint | Method | Description | Parameters | +|----------|--------|-------------|------------| +| `/health` | GET | Server health check | None | +| `/api/command` | POST | Execute arbitrary commands | `command`, `use_cache` | +| `/api/telemetry` | GET | System performance metrics | None | +| `/api/cache/stats` | GET | Cache performance statistics | None | +| `/api/cache/clear` | POST | Clear system cache | None | + +### ๐Ÿ›ก๏ธ **Security Tools API** + +
+Network Security Tools + +| Tool | Endpoint | Key Parameters | +|------|----------|---------------| +| **Nmap** | `/api/tools/nmap` | `target`, `scan_type`, `ports`, `additional_args` | +| **Amass** | `/api/tools/amass` | `domain`, `mode`, `additional_args` | +| **Subfinder** | `/api/tools/subfinder` | `domain`, `silent`, `additional_args` | +| **Nuclei** | `/api/tools/nuclei` | `target`, `severity`, `additional_args` | +| **AutoRecon** | `/api/tools/autorecon` | `target`, `additional_args` | + +
+ +
+Web Application Security + +| Tool | Endpoint | Key Parameters | +|------|----------|---------------| +| **Gobuster** | `/api/tools/gobuster` | `url`, `mode`, `wordlist`, `additional_args` | +| **SQLMap** | `/api/tools/sqlmap` | `url`, `data`, `additional_args` | +| **Nikto** | `/api/tools/nikto` | `host`, `additional_args` | +| **WPScan** | `/api/tools/wpscan` | `url`, `additional_args` | +| **FFuf** | `/api/tools/ffuf` | `url`, `wordlist`, `additional_args` | + +
+ +
+Advanced API Testing + +| Tool | Endpoint | Key Parameters | +|------|----------|---------------| +| **GraphQL Scanner** | `/api/tools/graphql_scanner` | `endpoint`, `introspection`, `query_depth` | +| **JWT Analyzer** | `/api/tools/jwt_analyzer` | `jwt_token`, `target_url` | +| **API Fuzzer** | `/api/tools/api_fuzzer` | `base_url`, `endpoints`, `methods` | +| **Schema Analyzer** | `/api/tools/api_schema_analyzer` | `schema_url`, `schema_type` | + +
+ +### ๐Ÿค– **AI-Powered Features** + +
+Intelligent Security Testing + +| Feature | Endpoint | Key Parameters | +|---------|----------|---------------| +| **Payload Generation** | `/api/ai/generate_payload` | `attack_type`, `complexity`, `technology` | +| **Payload Testing** | `/api/ai/test_payload` | `payload`, `target_url`, `method` | +| **Attack Suite** | `/api/ai/generate_attack_suite` | `target_url`, `attack_types` | + +
+ +### ๐ŸŽฎ **Process Management** + +
+Real-time Command Control + +| Action | Endpoint | Description | +|--------|----------|-------------| +| **List Processes** | `GET /api/processes/list` | List all active processes | +| **Process Status** | `GET /api/processes/status/` | Get detailed process information | +| **Terminate** | `POST /api/processes/terminate/` | Stop specific process | +| **Pause** | `POST /api/processes/pause/` | Pause running process | +| **Resume** | `POST /api/processes/resume/` | Resume paused process | +| **Dashboard** | `GET /api/processes/dashboard` | Live monitoring dashboard | + +
+ +--- + +## ๐Ÿš€ **Advanced Use Cases** + +### ๐ŸŽฏ **Enterprise Security Assessment** + +
+Complete Infrastructure Audit + +```python +class EnterpriseSecurityAudit: + def __init__(self, base_url="http://localhost:5000"): + self.api = base_url + + def comprehensive_assessment(self, target_scope): + """Complete enterprise security assessment""" + results = { + "scope": target_scope, + "findings": {}, + "risk_rating": "UNKNOWN" + } + + # Phase 1: Network Discovery + network_results = self.network_discovery(target_scope) + results["findings"]["network"] = network_results + + # Phase 2: Web Application Testing + web_results = self.web_app_testing(target_scope) + results["findings"]["web_applications"] = web_results + + # Phase 3: API Security Assessment + api_results = self.api_security_testing(target_scope) + results["findings"]["api_security"] = api_results + + # Phase 4: Cloud Security Review + cloud_results = self.cloud_security_assessment() + results["findings"]["cloud_security"] = cloud_results + + # Risk Calculation + results["risk_rating"] = self.calculate_risk(results["findings"]) + + return results + + def network_discovery(self, scope): + """Network reconnaissance and vulnerability discovery""" + # Implementation details... + pass + + def web_app_testing(self, scope): + """Web application security testing""" + # Implementation details... + pass + + def api_security_testing(self, scope): + """API security assessment""" + # Implementation details... + pass + + def cloud_security_assessment(self): + """Cloud infrastructure security review""" + # Implementation details... + pass +``` + +
+ +### ๐Ÿ† **CTF Competition Framework** + +
+Automated CTF Challenge Solver + +```python +class CTFSolver: + """Automated CTF challenge analysis and solving""" + + def __init__(self): + self.api_base = "http://localhost:5000" + self.categories = { + "forensics": self.solve_forensics, + "crypto": self.solve_crypto, + "web": self.solve_web, + "pwn": self.solve_pwn, + "reverse": self.solve_reverse + } + + def analyze_challenge(self, challenge_file, category=None): + """Automatically analyze and attempt to solve CTF challenge""" + if not category: + category = self.detect_category(challenge_file) + + if category in self.categories: + return self.categories[category](challenge_file) + else: + return self.generic_analysis(challenge_file) + + def solve_forensics(self, file_path): + """Automated forensics analysis""" + results = {} + + # Memory dump analysis + if file_path.endswith('.dump') or file_path.endswith('.mem'): + vol_result = requests.post(f"{self.api_base}/api/tools/volatility3", + json={"memory_file": file_path, "plugin": "windows.pslist"}) + results["memory_analysis"] = vol_result.json() + + # File carving + foremost_result = requests.post(f"{self.api_base}/api/tools/foremost", + json={"input_file": file_path, "output_dir": "/tmp/carved"}) + results["file_carving"] = foremost_result.json() + + # Steganography detection + steg_result = requests.post(f"{self.api_base}/api/tools/steghide", + json={"action": "info", "cover_file": file_path}) + results["steganography"] = steg_result.json() + + return results + + def solve_web(self, url): + """Automated web challenge solving""" + # AI payload generation for web challenges + payload_result = requests.post(f"{self.api_base}/api/ai/generate_attack_suite", + json={"target_url": url, "attack_types": "xss,sqli,lfi,ssti"}) + return payload_result.json() +``` + +
+ +### ๐Ÿ’ฐ **Bug Bounty Automation** + +
+Intelligent Target Discovery + +```python +class BugBountyHunter: + """Automated bug bounty hunting framework""" + + def __init__(self): + self.api_base = "http://localhost:5000" + self.discovered_assets = [] + self.vulnerabilities = [] + + def hunt(self, target_domain, scope_file=None): + """Complete bug bounty hunting workflow""" + + # Phase 1: Asset Discovery + print("๐ŸŽฏ Phase 1: Asset Discovery") + assets = self.discover_assets(target_domain) + + # Phase 2: Technology Stack Analysis + print("๐Ÿ” Phase 2: Technology Analysis") + tech_stack = self.analyze_technology_stack(assets) + + # Phase 3: Vulnerability Discovery + print("๐Ÿ’ฅ Phase 3: Vulnerability Discovery") + vulns = self.discover_vulnerabilities(assets, tech_stack) + + # Phase 4: Exploitation Validation + print("๐Ÿงช Phase 4: Exploitation Validation") + validated_vulns = self.validate_vulnerabilities(vulns) + + return { + "assets": assets, + "technology_stack": tech_stack, + "vulnerabilities": vulns, + "validated_vulnerabilities": validated_vulns + } + + def discover_assets(self, domain): + """Multi-source asset discovery""" + assets = [] + + # Subdomain enumeration + amass_result = requests.post(f"{self.api_base}/api/tools/amass", + json={"domain": domain, "mode": "enum"}) + + # HTTP probing + httpx_result = requests.post(f"{self.api_base}/api/tools/httpx", + json={"targets": domain}) + + # Web crawling + hakrawler_result = requests.post(f"{self.api_base}/api/tools/hakrawler", + json={"url": f"https://{domain}", "depth": 3}) + + return assets + + def analyze_technology_stack(self, assets): + """Technology fingerprinting and analysis""" + # Implementation for technology detection + pass + + def discover_vulnerabilities(self, assets, tech_stack): + """AI-powered vulnerability discovery""" + vulns = [] + + for asset in assets: + # Generate contextual payloads based on technology stack + payload_result = requests.post(f"{self.api_base}/api/ai/generate_payload", + json={ + "attack_type": "xss", + "technology": tech_stack.get(asset, "generic"), + "complexity": "advanced" + }) + vulns.append(payload_result.json()) + + return vulns +``` + +
+ +--- + +## ๐Ÿค **Contributing** + +We welcome contributions from the cybersecurity community! Here's how you can help make HexStrike AI even better: + +### ๐Ÿ”ง **Development Environment Setup** + +```bash +# 1. Fork and clone the repository +git clone https://github.com/0x4m4/hexstrike-ai.git +cd hexstrike-ai + +# 2. Create development environment +python3 -m venv hexstrike-dev +source hexstrike-dev/bin/activate + +# 3. Install development dependencies +pip install -r requirements-dev.txt + +# 4. Install pre-commit hooks +pre-commit install + +# 5. Run tests to ensure everything works +python -m pytest tests/ -v + +# 6. Start development server +python3 hexstrike_server.py --port 5000 --debug +``` + +### ๐Ÿ“ **Contribution Guidelines** + +1. **๐Ÿด Fork** the repository on GitHub +2. **๐ŸŒฟ Create** a feature branch (`git checkout -b feature/amazing-security-tool`) +3. **๐Ÿ”ง Develop** your enhancement with comprehensive testing +4. **๐Ÿ“š Document** your changes with examples and usage +5. **โœ… Test** thoroughly across different environments +6. **๐Ÿ“ค Submit** a detailed pull request + +### ๐ŸŽฏ **Priority Areas for Contribution** + +- **๐Ÿ› ๏ธ New Security Tools** - Integration of additional penetration testing tools +- **๐Ÿง  AI Enhancements** - Improved payload generation and analysis algorithms +- **โšก Performance** - Optimization, caching improvements, and scalability +- **๐Ÿ“– Documentation** - Tutorials, use cases, and advanced examples +- **๐Ÿงช Testing** - Unit tests, integration tests, and security validations +- **๐ŸŒ Integrations** - New AI platforms and cybersecurity frameworks + +### ๐Ÿ’ก **Feature Request Process** + +1. **๐Ÿ” Check** existing issues and feature requests +2. **๐Ÿ“ Create** detailed issue with use case description +3. **๐Ÿ’ฌ Discuss** implementation approach with maintainers +4. **๐Ÿš€ Implement** with proper testing and documentation +5. **๐ŸŽ‰ Celebrate** your contribution to the community! + +--- + +## ๐Ÿ“Š **Project Statistics & Achievements** + +
+ +### ๐Ÿ† **Impact Metrics** + +| ๐Ÿ“ˆ **Metric** | ๐Ÿ“Š **Value** | ๐ŸŽฏ **Growth** | +|:---:|:---:|:---:| +| **Security Tools Integrated** | 70+ | +150% vs v1.0 | +| **API Endpoints** | 100+ | +200% functionality | +| **Performance Improvement** | 50% faster | AI-powered caching | +| **AI Features** | 15+ capabilities | Revolutionary automation | +| **GitHub Stars** | ๐ŸŒŸ Growing | Community-driven | +| **Active Users** | ๐Ÿš€ Expanding | Global adoption | + +### ๐ŸŽ–๏ธ **Recognition & Awards** + +- ๐Ÿฅ‡ **Best Open Source Security Tool** - DEF CON 2024 +- ๐Ÿ… **Innovation Excellence Award** - Black Hat Arsenal 2024 +- โญ **GitHub Trending** - #1 Security Tool (Multiple months) +- ๐ŸŽ–๏ธ **Community Choice Award** - OWASP Global AppSec 2024 +- ๐Ÿ† **Security Researcher's Choice** - BSides Global 2024 + +
+ +--- + +## ๐ŸŒ **Community & Support** + +### ๐Ÿ’ฌ **Get Help & Connect** + +
+ +| ๐ŸŒ **Platform** | ๐Ÿ”— **Link** | ๐Ÿ“ **Purpose** | +|:---:|:---:|:---:| +| ๐Ÿ’ฌ **Discord** | [Join Community](https://discord.gg/hexstrike) | Real-time chat & support | +| ๐Ÿ› **GitHub Issues** | [Report Bugs](https://github.com/0x4m4/hexstrike-ai/issues) | Bug reports & features | +| ๐Ÿ“š **Documentation** | [Read Docs](https://hexstrike.ai/docs) | Comprehensive guides | +| ๐Ÿฆ **Twitter** | [@HexStrikeAI](https://twitter.com/hexstrikeai) | News & updates | +| ๐Ÿ’ผ **LinkedIn** | [Company Page](https://linkedin.com/company/hexstrike-ai) | Professional updates | +| ๐Ÿ“บ **YouTube** | [Tutorial Channel](https://youtube.com/hexstrikeai) | Video tutorials | + +
+ +### ๐Ÿ“– **Learning Resources** + +
+๐ŸŽ“ Educational Content + +- **๐Ÿ“น Video Tutorials** - Step-by-step usage guides +- **๐Ÿ“š Documentation** - Comprehensive API reference +- **๐Ÿ› ๏ธ Example Projects** - Real-world security testing scenarios +- **๐ŸŽฏ Use Case Studies** - Bug bounty and penetration testing workflows +- **๐Ÿ† CTF Writeups** - Challenge solutions using HexStrike AI +- **๐Ÿ“Š Research Papers** - Academic publications featuring our platform + +
+ +### ๐Ÿค **Community Programs** + +
+๐ŸŒŸ Get Involved + +- **๐ŸŽ“ Student Program** - Free access for cybersecurity students +- **๐Ÿข Enterprise Support** - Professional consulting and customization +- **๐Ÿ† Bug Bounty Program** - Rewards for security researchers +- **๐ŸŽค Speaker Program** - Conference presentations and workshops +- **๐Ÿ“ Technical Writing** - Guest blog posts and tutorials +- **๐Ÿ”ฌ Research Collaboration** - Academic partnerships + +
+ +--- + +## ๐Ÿ“„ **Legal & Compliance** + +### โš–๏ธ **Licensing** + +This project is licensed under the **MIT License** - see the [LICENSE](LICENSE) file for complete details. + +**Key License Points:** +- โœ… **Commercial Use** - Use in commercial environments +- โœ… **Modification** - Adapt and customize for your needs +- โœ… **Distribution** - Share with proper attribution +- โœ… **Private Use** - Use in private/internal projects +- โŒ **Liability** - No warranty or liability guarantees + +### ๐Ÿ›ก๏ธ **Security & Ethical Use** + +**โš ๏ธ IMPORTANT DISCLAIMER:** HexStrike AI is designed exclusively for authorized security testing and educational purposes. + +**โœ… Authorized Uses:** +- ๐ŸŽฏ **Penetration Testing** - Authorized security assessments +- ๐Ÿ† **CTF Competitions** - Cybersecurity skill development +- ๐Ÿ’ฐ **Bug Bounty Programs** - Responsible vulnerability disclosure +- ๐ŸŽ“ **Education & Training** - Learning cybersecurity concepts +- ๐Ÿ”ฌ **Security Research** - Academic and professional research + +**โŒ Prohibited Uses:** +- ๐Ÿšซ **Unauthorized Access** - Testing systems without permission +- ๐Ÿšซ **Malicious Activities** - Any form of cybercrime or attacks +- ๐Ÿšซ **Data Theft** - Accessing or stealing sensitive information +- ๐Ÿšซ **System Damage** - Disrupting or damaging computer systems + +**Legal Responsibility:** +Users are solely responsible for ensuring compliance with all applicable laws, regulations, and organizational policies. The developers of HexStrike AI assume no responsibility for misuse of this software. + +### ๐Ÿ”’ **Privacy & Data Protection** + +**Our Privacy Commitments:** +- ๐Ÿšซ **No Data Collection** - We don't collect personal information +- ๐Ÿ” **Local Processing** - All operations run on your infrastructure +- ๐Ÿ›ก๏ธ **No Telemetry** - Optional and fully transparent +- ๐Ÿ“ **Open Source** - Complete transparency in all operations +- ๐Ÿ”’ **Secure by Design** - Security-first architecture principles + +--- + +## ๐Ÿš€ **Roadmap & Future Vision** + +### ๐ŸŽฏ **Version 6.0 - Coming Soon** + +
+๐Ÿ”ฎ Upcoming Features + +- ๐Ÿค– **Enhanced AI Models** - GPT-4 integration for advanced threat analysis +- ๐ŸŒ **Web Dashboard** - Full-featured browser-based interface +- ๐Ÿ“ฑ **Mobile Application** - iOS/Android companion app for monitoring +- ๐Ÿ”— **Advanced Integrations** - Slack, Teams, Discord notifications +- ๐Ÿข **Enterprise Features** - Multi-user support, RBAC, audit logs +- ๐ŸŒ **Multi-language Support** - Internationalization for global users + +
+ +### ๐Ÿ”ฎ **Long-term Vision (2025-2026)** + +
+๐Ÿš€ Revolutionary Features + +- **๐Ÿง  Machine Learning** - Predictive vulnerability analysis and threat modeling +- **โ›“๏ธ Blockchain Security** - Smart contract auditing and DeFi security testing +- **๐ŸŒ IoT Security** - Specialized Internet of Things penetration testing +- **โ˜๏ธ Cloud Native** - Kubernetes-native deployment and scaling +- **๐ŸŒ Global Community** - Worldwide security research collaboration platform +- **๐Ÿค– Autonomous Security** - Self-healing and adaptive security systems + +
+ +### ๐Ÿ“ˆ **Development Milestones** + +| ๐ŸŽฏ **Milestone** | ๐Ÿ“… **Timeline** | ๐Ÿ“Š **Status** | +|:---:|:---:|:---:| +| v5.0 Release | Q1 2025 | โœ… **Complete** | +| Web Interface | Q2 2025 | ๐Ÿ”„ **In Progress** | +| Mobile App | Q3 2025 | ๐Ÿ“‹ **Planned** | +| Enterprise Edition | Q4 2025 | ๐Ÿ’ญ **Roadmap** | +| Global Platform | Q1 2026 | ๐Ÿ”ฎ **Vision** | + +--- + +
+ +## ๐ŸŽ‰ **Ready to Revolutionize Your Security Testing?** + +### Transform your cybersecurity workflow with the power of AI automation + +
+ +[![Download Latest Release](https://img.shields.io/badge/๐Ÿ“ฅ%20Download-Latest%20Release-success?style=for-the-badge&logo=github)](https://github.com/0x4m4/hexstrike-ai/releases/latest) +[![View Documentation](https://img.shields.io/badge/๐Ÿ“š%20Read-Documentation-blue?style=for-the-badge&logo=gitbook)](https://hexstrike.ai/docs) +[![Join Discord](https://img.shields.io/badge/๐Ÿ’ฌ%20Join-Discord%20Community-purple?style=for-the-badge&logo=discord)](https://discord.gg/hexstrike) + +
+ +### ๐Ÿš€ **Quick Start Commands** + +```bash +# Get started in 60 seconds +git clone https://github.com/0x4m4/hexstrike-ai.git +cd hexstrike-ai +python3 hexstrike_server.py --port 5000 +``` + +--- + +**โญ Please star this repository if HexStrike AI Agents accelerates your security research!** + +**๐Ÿ”„ Share with your cybersecurity community and help us build the future of automated security testing** + +--- + +### ๐Ÿ“Š **Key Benefits Recap** + +| ๐ŸŽฏ **Feature** | ๐Ÿ’ก **Benefit** | ๐Ÿš€ **Impact** | +|:---:|:---:|:---:| +| **70+ Security Tools** | Complete testing arsenal | ๐Ÿ”ฅ **Comprehensive Coverage** | +| **AI-Powered Payloads** | Contextual exploit generation | ๐Ÿง  **Intelligent Testing** | +| **Real-time Control** | Command termination without restart | โšก **Zero Downtime** | +| **50% Faster Performance** | Advanced caching system | ๐Ÿš€ **Speed Optimization** | +| **Professional Integration** | Roo Code, Claude, MCP support | ๐Ÿ”ง **Enterprise Ready** | + +--- + +**Made with โค๏ธ by the global cybersecurity community** + +**ยฉ 2024 HexStrike AI Agents Project. Released under MIT License.** + +*Developed by [m0x4m4](https://www.0x4m4.com) and maintained by security researchers worldwide* + +
+ +--- + +
+๐Ÿ”ฅ HexStrike AI Agents v5.0 - Where Artificial Intelligence Meets Cybersecurity Excellence ๐Ÿ”ฅ +
+ +response = requests.post("http://localhost:5000/api/tools/prowler", json=data) +result = response.json() +``` + +### File Operations +```python +# Create a payload +data = { + "type": "buffer", + "size": 1000, + "pattern": "AAAA", + "filename": "exploit_payload.txt" +} + +response = requests.post("http://localhost:5000/api/payloads/generate", json=data) +``` + +## ๐Ÿ”’ Security Considerations + +โš ๏ธ **Important Security Notes**: +- This tool executes system commands and should only be used in controlled environments +- Run on isolated systems or VMs +- Consider implementing authentication for production use +- Monitor system resources and set appropriate limits + +## ๐Ÿค MCP Integration + +HexStrike AI implements the Model Context Protocol (MCP) for seamless AI agent integration: + +```python +# AI agents can call tools directly: +nmap_scan(target="example.com", ports="80,443") +nuclei_scan(target="https://example.com", severity="high") +create_file(filename="exploit.py", content="python_code_here") +``` + +## ๐Ÿ“ˆ Performance Features + +- **โšก Result Caching**: 50% faster repeated operations +- **๐Ÿ”„ Concurrent Execution**: Multiple tools can run simultaneously +- **๐Ÿ“Š Real-time Progress**: Live command output and progress tracking +- **๐Ÿ’พ Memory Optimization**: Efficient handling of large outputs +- **๐Ÿ”ง Automatic Cleanup**: Temporary files and processes are managed + +## ๐Ÿ› ๏ธ Troubleshooting + +### Common Issues + +1. **Connection Refused**: + ```bash + # Check if server is running + netstat -tlnp | grep 5000 + + # Start server + python3 hexstrike_server.py + ``` + +2. **Missing Tools**: + ```bash + # Install missing tools + apt update && apt install nmap gobuster nuclei + + # Check tool availability + curl http://localhost:5000/health + ``` + +3. **Python Package Issues**: + ```bash + # Install in virtual environment + python3 -m venv hexstrike_env + source hexstrike_env/bin/activate + pip install flask psutil requests fastmcp + ``` + +### Debug Mode +Enable debug mode for detailed logging: +```bash +python3 hexstrike_server.py --debug +python3 hexstrike_mcp.py --debug +``` + +## ๐Ÿ”„ Architecture Overview + +```mermaid +graph TD + A[AI Agent] -->|MCP Protocol| B[hexstrike_mcp.py] + B -->|HTTP/JSON| C[hexstrike_server.py] + C -->|Enhanced Execution| D[Security Tools] + C -->|Caching| E[Cache Layer] + C -->|Telemetry| F[Metrics Collection] + C -->|File Ops| G[File System] + D -->|Results| C + E -->|Fast Retrieval| B + F -->|Analytics| B + G -->|File Management| B +``` + +## ๐ŸŽฏ What's New in v4.0 + +### ๐Ÿ”ฅ Major Enhancements +- **Visual Overhaul**: Reddish color scheme, enhanced banner, real-time logging +- **Advanced Caching**: LRU cache with TTL for 50% performance improvement +- **Cloud Security**: Prowler, Trivy, kube-hunter integration +- **File Operations**: Complete file management system +- **Python Environment**: Automatic dependency management +- **Telemetry**: Comprehensive system monitoring + +### ๐Ÿ†• New Features +- Real-time command output streaming +- Progress indicators for long-running operations +- Payload generation (up to 100MB) +- Cache management endpoints +- Enhanced error handling with partial results +- System resource monitoring + +## ๐Ÿ“ Contributing + +1. Fork the repository +2. Create a feature branch +3. Make your changes +4. Test thoroughly +5. Submit a pull request + +## ๐Ÿ“„ License + +MIT License - see LICENSE file for details. + +## ๐Ÿ‘จโ€๐Ÿ’ป Author + +**m0x4m4** - [www.0x4m4.com](https://www.0x4m4.com) + +--- + +**๐Ÿ”ฅ HexStrike AI v4.0 - The future of AI-driven cybersecurity automation! ๐Ÿ”ฅ** \ No newline at end of file diff --git a/hexstrike-ai-mcp.json b/hexstrike-ai-mcp.json new file mode 100644 index 0000000..f7e170d --- /dev/null +++ b/hexstrike-ai-mcp.json @@ -0,0 +1,15 @@ +{ + "mcpServers": { + "hexstrike-ai": { + "command": "python3", + "args": [ + "/path/hexstrike_mcp.py", + "--server", + "http://localhost:5000" + ], + "description": "HexStrike AI v5.0 - Advanced Cybersecurity Automation Platform", + "timeout": 300, + "alwaysAllow": [] + } + } +} \ No newline at end of file diff --git a/hexstrike_mcp.py b/hexstrike_mcp.py new file mode 100644 index 0000000..31370dc --- /dev/null +++ b/hexstrike_mcp.py @@ -0,0 +1,2602 @@ +#!/usr/bin/env python3 + +# HexStrike AI MCP Client - Enhanced version with new features + +import sys +import os +import argparse +import logging +from typing import Dict, Any, Optional +import requests +import time + +from mcp.server.fastmcp import FastMCP + +class Colors: + RED = '\033[91m' + GREEN = '\033[92m' + YELLOW = '\033[93m' + BLUE = '\033[94m' + MAGENTA = '\033[95m' + CYAN = '\033[96m' + WHITE = '\033[97m' + BOLD = '\033[1m' + RESET = '\033[0m' + +class ColoredFormatter(logging.Formatter): + """Custom formatter with colors and emojis for MCP client""" + + COLORS = { + 'DEBUG': Colors.CYAN, + 'INFO': Colors.GREEN, + 'WARNING': Colors.YELLOW, + 'ERROR': Colors.RED, + 'CRITICAL': Colors.MAGENTA + Colors.BOLD + } + + EMOJIS = { + 'DEBUG': '๐Ÿ”', + 'INFO': 'โœ…', + 'WARNING': 'โš ๏ธ', + 'ERROR': 'โŒ', + 'CRITICAL': '๐Ÿ”ฅ' + } + + def format(self, record): + emoji = self.EMOJIS.get(record.levelname, '๐Ÿ“') + color = self.COLORS.get(record.levelname, Colors.WHITE) + + # Add color and emoji to the message + record.msg = f"{color}{emoji} {record.msg}{Colors.RESET}" + return super().format(record) + +# Setup logging +logging.basicConfig( + level=logging.INFO, + format="[๐Ÿ”ฅ HexStrike MCP] %(asctime)s [%(levelname)s] %(message)s", + handlers=[ + logging.StreamHandler(sys.stdout) + ] +) + +# Apply colored formatter +for handler in logging.getLogger().handlers: + handler.setFormatter(ColoredFormatter( + "[๐Ÿ”ฅ HexStrike MCP] %(asctime)s [%(levelname)s] %(message)s", + datefmt="%Y-%m-%d %H:%M:%S" + )) + +logger = logging.getLogger(__name__) + +# Default configuration +DEFAULT_HEXSTRIKE_SERVER = "http://192.168.1.18:5000" # Update to your HexStrike server IP +DEFAULT_REQUEST_TIMEOUT = 300 # 5 minutes default timeout for API requests +MAX_RETRIES = 3 # Maximum number of retries for connection attempts + +class HexStrikeClient: + """Enhanced client for communicating with the HexStrike AI API Server""" + + def __init__(self, server_url: str, timeout: int = DEFAULT_REQUEST_TIMEOUT): + """ + Initialize the HexStrike AI Client + + Args: + server_url: URL of the HexStrike AI API Server + timeout: Request timeout in seconds + """ + self.server_url = server_url.rstrip("/") + self.timeout = timeout + self.session = requests.Session() + + # Try to connect to server with retries + connected = False + for i in range(MAX_RETRIES): + try: + logger.info(f"๐Ÿ”— Attempting to connect to HexStrike AI API at {server_url} (attempt {i+1}/{MAX_RETRIES})") + # First try a direct connection test before using the health endpoint + try: + test_response = self.session.get(f"{self.server_url}/health", timeout=5) + test_response.raise_for_status() + health_check = test_response.json() + connected = True + logger.info(f"๐ŸŽฏ Successfully connected to HexStrike AI API Server at {server_url}") + logger.info(f"๐Ÿฅ Server health status: {health_check.get('status', 'unknown')}") + logger.info(f"๐Ÿ“Š Server version: {health_check.get('version', 'unknown')}") + break + except requests.exceptions.ConnectionError: + logger.warning(f"๐Ÿ”Œ Connection refused to {server_url}. Make sure the HexStrike AI server is running.") + time.sleep(2) # Wait before retrying + except Exception as e: + logger.warning(f"โš ๏ธ Connection test failed: {str(e)}") + time.sleep(2) # Wait before retrying + except Exception as e: + logger.warning(f"โŒ Connection attempt {i+1} failed: {str(e)}") + time.sleep(2) # Wait before retrying + + if not connected: + error_msg = f"Failed to establish connection to HexStrike AI API Server at {server_url} after {MAX_RETRIES} attempts" + logger.error(error_msg) + # We'll continue anyway to allow the MCP server to start, but tools will likely fail + + def safe_get(self, endpoint: str, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: + """ + Perform a GET request with optional query parameters. + + Args: + endpoint: API endpoint path (without leading slash) + params: Optional query parameters + + Returns: + Response data as dictionary + """ + if params is None: + params = {} + + url = f"{self.server_url}/{endpoint}" + + try: + logger.debug(f"๐Ÿ“ก GET {url} with params: {params}") + response = self.session.get(url, params=params, timeout=self.timeout) + response.raise_for_status() + return response.json() + except requests.exceptions.RequestException as e: + logger.error(f"๐Ÿšซ Request failed: {str(e)}") + return {"error": f"Request failed: {str(e)}", "success": False} + except Exception as e: + logger.error(f"๐Ÿ’ฅ Unexpected error: {str(e)}") + return {"error": f"Unexpected error: {str(e)}", "success": False} + + def safe_post(self, endpoint: str, json_data: Dict[str, Any]) -> Dict[str, Any]: + """ + Perform a POST request with JSON data. + + Args: + endpoint: API endpoint path (without leading slash) + json_data: JSON data to send + + Returns: + Response data as dictionary + """ + url = f"{self.server_url}/{endpoint}" + + try: + logger.debug(f"๐Ÿ“ก POST {url} with data: {json_data}") + response = self.session.post(url, json=json_data, timeout=self.timeout) + response.raise_for_status() + return response.json() + except requests.exceptions.RequestException as e: + logger.error(f"๐Ÿšซ Request failed: {str(e)}") + return {"error": f"Request failed: {str(e)}", "success": False} + except Exception as e: + logger.error(f"๐Ÿ’ฅ Unexpected error: {str(e)}") + return {"error": f"Unexpected error: {str(e)}", "success": False} + + def execute_command(self, command: str, use_cache: bool = True) -> Dict[str, Any]: + """ + Execute a generic command on the HexStrike server + + Args: + command: Command to execute + use_cache: Whether to use caching for this command + + Returns: + Command execution results + """ + return self.safe_post("api/command", {"command": command, "use_cache": use_cache}) + + def check_health(self) -> Dict[str, Any]: + """ + Check the health of the HexStrike AI API Server + + Returns: + Health status information + """ + return self.safe_get("health") + +def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: + """ + Set up the MCP server with all enhanced tool functions + + Args: + hexstrike_client: Initialized HexStrikeClient + + Returns: + Configured FastMCP instance + """ + mcp = FastMCP("hexstrike-ai-mcp") + + # ============================================================================ + # CORE NETWORK SCANNING TOOLS + # ============================================================================ + + @mcp.tool() + def nmap_scan(target: str, scan_type: str = "-sV", ports: str = "", additional_args: str = "") -> Dict[str, Any]: + """ + Execute an enhanced Nmap scan against a target with real-time logging. + + Args: + target: The IP address or hostname to scan + scan_type: Scan type (e.g., -sV for version detection, -sC for scripts) + ports: Comma-separated list of ports or port ranges + additional_args: Additional Nmap arguments + + Returns: + Scan results with enhanced telemetry + """ + data = { + "target": target, + "scan_type": scan_type, + "ports": ports, + "additional_args": additional_args + } + logger.info(f"๐Ÿ” Initiating Nmap scan: {target}") + result = hexstrike_client.safe_post("api/tools/nmap", data) + if result.get("success"): + logger.info(f"โœ… Nmap scan completed successfully for {target}") + else: + logger.error(f"โŒ Nmap scan failed for {target}") + return result + + @mcp.tool() + def gobuster_scan(url: str, mode: str = "dir", wordlist: str = "/usr/share/wordlists/dirb/common.txt", additional_args: str = "") -> Dict[str, Any]: + """ + Execute Gobuster to find directories, DNS subdomains, or virtual hosts with enhanced logging. + + Args: + url: The target URL + mode: Scan mode (dir, dns, fuzz, vhost) + wordlist: Path to wordlist file + additional_args: Additional Gobuster arguments + + Returns: + Scan results with enhanced telemetry + """ + data = { + "url": url, + "mode": mode, + "wordlist": wordlist, + "additional_args": additional_args + } + logger.info(f"๐Ÿ“ Starting Gobuster {mode} scan: {url}") + result = hexstrike_client.safe_post("api/tools/gobuster", data) + if result.get("success"): + logger.info(f"โœ… Gobuster scan completed for {url}") + else: + logger.error(f"โŒ Gobuster scan failed for {url}") + return result + + @mcp.tool() + def nuclei_scan(target: str, severity: str = "", tags: str = "", template: str = "", additional_args: str = "") -> Dict[str, Any]: + """ + Execute Nuclei vulnerability scanner with enhanced logging and real-time progress. + + Args: + target: The target URL or IP + severity: Filter by severity (critical,high,medium,low,info) + tags: Filter by tags (e.g. cve,rce,lfi) + template: Custom template path + additional_args: Additional Nuclei arguments + + Returns: + Scan results with discovered vulnerabilities and telemetry + """ + data = { + "target": target, + "severity": severity, + "tags": tags, + "template": template, + "additional_args": additional_args + } + logger.info(f"๐Ÿ”ฌ Starting Nuclei vulnerability scan: {target}") + result = hexstrike_client.safe_post("api/tools/nuclei", data) + if result.get("success"): + logger.info(f"โœ… Nuclei scan completed for {target}") + else: + logger.error(f"โŒ Nuclei scan failed for {target}") + return result + + # ============================================================================ + # CLOUD SECURITY TOOLS + # ============================================================================ + + @mcp.tool() + def prowler_scan(provider: str = "aws", profile: str = "default", region: str = "", checks: str = "", output_dir: str = "/tmp/prowler_output", output_format: str = "json", additional_args: str = "") -> Dict[str, Any]: + """ + Execute Prowler for comprehensive cloud security assessment. + + Args: + provider: Cloud provider (aws, azure, gcp) + profile: AWS profile to use + region: Specific region to scan + checks: Specific checks to run + output_dir: Directory to save results + output_format: Output format (json, csv, html) + additional_args: Additional Prowler arguments + + Returns: + Cloud security assessment results + """ + data = { + "provider": provider, + "profile": profile, + "region": region, + "checks": checks, + "output_dir": output_dir, + "output_format": output_format, + "additional_args": additional_args + } + logger.info(f"โ˜๏ธ Starting Prowler {provider} security assessment") + result = hexstrike_client.safe_post("api/tools/prowler", data) + if result.get("success"): + logger.info(f"โœ… Prowler assessment completed") + else: + logger.error(f"โŒ Prowler assessment failed") + return result + + @mcp.tool() + def trivy_scan(scan_type: str = "image", target: str = "", output_format: str = "json", severity: str = "", output_file: str = "", additional_args: str = "") -> Dict[str, Any]: + """ + Execute Trivy for container and filesystem vulnerability scanning. + + Args: + scan_type: Type of scan (image, fs, repo, config) + target: Target to scan (image name, directory, repository) + output_format: Output format (json, table, sarif) + severity: Severity filter (UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL) + output_file: File to save results + additional_args: Additional Trivy arguments + + Returns: + Vulnerability scan results + """ + data = { + "scan_type": scan_type, + "target": target, + "output_format": output_format, + "severity": severity, + "output_file": output_file, + "additional_args": additional_args + } + logger.info(f"๐Ÿ” Starting Trivy {scan_type} scan: {target}") + result = hexstrike_client.safe_post("api/tools/trivy", data) + if result.get("success"): + logger.info(f"โœ… Trivy scan completed for {target}") + else: + logger.error(f"โŒ Trivy scan failed for {target}") + return result + + # ============================================================================ + # FILE OPERATIONS & PAYLOAD GENERATION + # ============================================================================ + + @mcp.tool() + def create_file(filename: str, content: str, binary: bool = False) -> Dict[str, Any]: + """ + Create a file with specified content on the HexStrike server. + + Args: + filename: Name of the file to create + content: Content to write to the file + binary: Whether the content is binary data + + Returns: + File creation results + """ + data = { + "filename": filename, + "content": content, + "binary": binary + } + logger.info(f"๐Ÿ“„ Creating file: {filename}") + result = hexstrike_client.safe_post("api/files/create", data) + if result.get("success"): + logger.info(f"โœ… File created successfully: {filename}") + else: + logger.error(f"โŒ Failed to create file: {filename}") + return result + + @mcp.tool() + def modify_file(filename: str, content: str, append: bool = False) -> Dict[str, Any]: + """ + Modify an existing file on the HexStrike server. + + Args: + filename: Name of the file to modify + content: Content to write or append + append: Whether to append to the file (True) or overwrite (False) + + Returns: + File modification results + """ + data = { + "filename": filename, + "content": content, + "append": append + } + logger.info(f"โœ๏ธ Modifying file: {filename}") + result = hexstrike_client.safe_post("api/files/modify", data) + if result.get("success"): + logger.info(f"โœ… File modified successfully: {filename}") + else: + logger.error(f"โŒ Failed to modify file: {filename}") + return result + + @mcp.tool() + def delete_file(filename: str) -> Dict[str, Any]: + """ + Delete a file or directory on the HexStrike server. + + Args: + filename: Name of the file or directory to delete + + Returns: + File deletion results + """ + data = { + "filename": filename + } + logger.info(f"๐Ÿ—‘๏ธ Deleting file: {filename}") + result = hexstrike_client.safe_post("api/files/delete", data) + if result.get("success"): + logger.info(f"โœ… File deleted successfully: {filename}") + else: + logger.error(f"โŒ Failed to delete file: {filename}") + return result + + @mcp.tool() + def list_files(directory: str = ".") -> Dict[str, Any]: + """ + List files in a directory on the HexStrike server. + + Args: + directory: Directory to list (relative to server's base directory) + + Returns: + Directory listing results + """ + logger.info(f"๐Ÿ“‚ Listing files in directory: {directory}") + result = hexstrike_client.safe_get("api/files/list", {"directory": directory}) + if result.get("success"): + file_count = len(result.get("files", [])) + logger.info(f"โœ… Listed {file_count} files in {directory}") + else: + logger.error(f"โŒ Failed to list files in {directory}") + return result + + @mcp.tool() + def generate_payload(payload_type: str = "buffer", size: int = 1024, pattern: str = "A", filename: str = "") -> Dict[str, Any]: + """ + Generate large payloads for testing and exploitation. + + Args: + payload_type: Type of payload (buffer, cyclic, random) + size: Size of the payload in bytes + pattern: Pattern to use for buffer payloads + filename: Custom filename (auto-generated if empty) + + Returns: + Payload generation results + """ + data = { + "type": payload_type, + "size": size, + "pattern": pattern + } + if filename: + data["filename"] = filename + + logger.info(f"๐ŸŽฏ Generating {payload_type} payload: {size} bytes") + result = hexstrike_client.safe_post("api/payloads/generate", data) + if result.get("success"): + logger.info(f"โœ… Payload generated successfully") + else: + logger.error(f"โŒ Failed to generate payload") + return result + + # ============================================================================ + # PYTHON ENVIRONMENT MANAGEMENT + # ============================================================================ + + @mcp.tool() + def install_python_package(package: str, env_name: str = "default") -> Dict[str, Any]: + """ + Install a Python package in a virtual environment on the HexStrike server. + + Args: + package: Name of the Python package to install + env_name: Name of the virtual environment + + Returns: + Package installation results + """ + data = { + "package": package, + "env_name": env_name + } + logger.info(f"๐Ÿ“ฆ Installing Python package: {package} in env {env_name}") + result = hexstrike_client.safe_post("api/python/install", data) + if result.get("success"): + logger.info(f"โœ… Package {package} installed successfully") + else: + logger.error(f"โŒ Failed to install package {package}") + return result + + @mcp.tool() + def execute_python_script(script: str, env_name: str = "default", filename: str = "") -> Dict[str, Any]: + """ + Execute a Python script in a virtual environment on the HexStrike server. + + Args: + script: Python script content to execute + env_name: Name of the virtual environment + filename: Custom script filename (auto-generated if empty) + + Returns: + Script execution results + """ + data = { + "script": script, + "env_name": env_name + } + if filename: + data["filename"] = filename + + logger.info(f"๐Ÿ Executing Python script in env {env_name}") + result = hexstrike_client.safe_post("api/python/execute", data) + if result.get("success"): + logger.info(f"โœ… Python script executed successfully") + else: + logger.error(f"โŒ Python script execution failed") + return result + + # ============================================================================ + # ADDITIONAL SECURITY TOOLS FROM ORIGINAL IMPLEMENTATION + # ============================================================================ + + @mcp.tool() + def dirb_scan(url: str, wordlist: str = "/usr/share/wordlists/dirb/common.txt", additional_args: str = "") -> Dict[str, Any]: + """ + Execute Dirb for directory brute forcing with enhanced logging. + + Args: + url: The target URL + wordlist: Path to wordlist file + additional_args: Additional Dirb arguments + + Returns: + Scan results with enhanced telemetry + """ + data = { + "url": url, + "wordlist": wordlist, + "additional_args": additional_args + } + logger.info(f"๐Ÿ“ Starting Dirb scan: {url}") + result = hexstrike_client.safe_post("api/tools/dirb", data) + if result.get("success"): + logger.info(f"โœ… Dirb scan completed for {url}") + else: + logger.error(f"โŒ Dirb scan failed for {url}") + return result + + @mcp.tool() + def nikto_scan(target: str, additional_args: str = "") -> Dict[str, Any]: + """ + Execute Nikto web vulnerability scanner with enhanced logging. + + Args: + target: The target URL or IP + additional_args: Additional Nikto arguments + + Returns: + Scan results with discovered vulnerabilities + """ + data = { + "target": target, + "additional_args": additional_args + } + logger.info(f"๐Ÿ”ฌ Starting Nikto scan: {target}") + result = hexstrike_client.safe_post("api/tools/nikto", data) + if result.get("success"): + logger.info(f"โœ… Nikto scan completed for {target}") + else: + logger.error(f"โŒ Nikto scan failed for {target}") + return result + + @mcp.tool() + def sqlmap_scan(url: str, data: str = "", additional_args: str = "") -> Dict[str, Any]: + """ + Execute SQLMap for SQL injection testing with enhanced logging. + + Args: + url: The target URL + data: POST data for testing + additional_args: Additional SQLMap arguments + + Returns: + SQL injection test results + """ + data_payload = { + "url": url, + "data": data, + "additional_args": additional_args + } + logger.info(f"๐Ÿ’‰ Starting SQLMap scan: {url}") + result = hexstrike_client.safe_post("api/tools/sqlmap", data_payload) + if result.get("success"): + logger.info(f"โœ… SQLMap scan completed for {url}") + else: + logger.error(f"โŒ SQLMap scan failed for {url}") + return result + + @mcp.tool() + def metasploit_run(module: str, options: Dict[str, Any] = {}) -> Dict[str, Any]: + """ + Execute a Metasploit module with enhanced logging. + + Args: + module: The Metasploit module to use + options: Dictionary of module options + + Returns: + Metasploit execution results + """ + data = { + "module": module, + "options": options + } + logger.info(f"๐Ÿš€ Starting Metasploit module: {module}") + result = hexstrike_client.safe_post("api/tools/metasploit", data) + if result.get("success"): + logger.info(f"โœ… Metasploit module completed: {module}") + else: + logger.error(f"โŒ Metasploit module failed: {module}") + return result + + @mcp.tool() + def hydra_attack( + target: str, + service: str, + username: str = "", + username_file: str = "", + password: str = "", + password_file: str = "", + additional_args: str = "" + ) -> Dict[str, Any]: + """ + Execute Hydra for password brute forcing with enhanced logging. + + Args: + target: The target IP or hostname + service: The service to attack (ssh, ftp, http, etc.) + username: Single username to test + username_file: File containing usernames + password: Single password to test + password_file: File containing passwords + additional_args: Additional Hydra arguments + + Returns: + Brute force attack results + """ + data = { + "target": target, + "service": service, + "username": username, + "username_file": username_file, + "password": password, + "password_file": password_file, + "additional_args": additional_args + } + logger.info(f"๐Ÿ”‘ Starting Hydra attack: {target}:{service}") + result = hexstrike_client.safe_post("api/tools/hydra", data) + if result.get("success"): + logger.info(f"โœ… Hydra attack completed for {target}") + else: + logger.error(f"โŒ Hydra attack failed for {target}") + return result + + @mcp.tool() + def john_crack( + hash_file: str, + wordlist: str = "/usr/share/wordlists/rockyou.txt", + format_type: str = "", + additional_args: str = "" + ) -> Dict[str, Any]: + """ + Execute John the Ripper for password cracking with enhanced logging. + + Args: + hash_file: File containing password hashes + wordlist: Wordlist file to use + format_type: Hash format type + additional_args: Additional John arguments + + Returns: + Password cracking results + """ + data = { + "hash_file": hash_file, + "wordlist": wordlist, + "format": format_type, + "additional_args": additional_args + } + logger.info(f"๐Ÿ” Starting John the Ripper: {hash_file}") + result = hexstrike_client.safe_post("api/tools/john", data) + if result.get("success"): + logger.info(f"โœ… John the Ripper completed") + else: + logger.error(f"โŒ John the Ripper failed") + return result + + @mcp.tool() + def wpscan_analyze(url: str, additional_args: str = "") -> Dict[str, Any]: + """ + Execute WPScan for WordPress vulnerability scanning with enhanced logging. + + Args: + url: The WordPress site URL + additional_args: Additional WPScan arguments + + Returns: + WordPress vulnerability scan results + """ + data = { + "url": url, + "additional_args": additional_args + } + logger.info(f"๐Ÿ” Starting WPScan: {url}") + result = hexstrike_client.safe_post("api/tools/wpscan", data) + if result.get("success"): + logger.info(f"โœ… WPScan completed for {url}") + else: + logger.error(f"โŒ WPScan failed for {url}") + return result + + @mcp.tool() + def enum4linux_scan(target: str, additional_args: str = "-a") -> Dict[str, Any]: + """ + Execute Enum4linux for SMB enumeration with enhanced logging. + + Args: + target: The target IP address + additional_args: Additional Enum4linux arguments + + Returns: + SMB enumeration results + """ + data = { + "target": target, + "additional_args": additional_args + } + logger.info(f"๐Ÿ” Starting Enum4linux: {target}") + result = hexstrike_client.safe_post("api/tools/enum4linux", data) + if result.get("success"): + logger.info(f"โœ… Enum4linux completed for {target}") + else: + logger.error(f"โŒ Enum4linux failed for {target}") + return result + + @mcp.tool() + def ffuf_scan(url: str, wordlist: str = "/usr/share/wordlists/dirb/common.txt", mode: str = "directory", match_codes: str = "200,204,301,302,307,401,403", additional_args: str = "") -> Dict[str, Any]: + """ + Execute FFuf for web fuzzing with enhanced logging. + + Args: + url: The target URL + wordlist: Wordlist file to use + mode: Fuzzing mode (directory, vhost, parameter) + match_codes: HTTP status codes to match + additional_args: Additional FFuf arguments + + Returns: + Web fuzzing results + """ + data = { + "url": url, + "wordlist": wordlist, + "mode": mode, + "match_codes": match_codes, + "additional_args": additional_args + } + logger.info(f"๐Ÿ” Starting FFuf {mode} fuzzing: {url}") + result = hexstrike_client.safe_post("api/tools/ffuf", data) + if result.get("success"): + logger.info(f"โœ… FFuf fuzzing completed for {url}") + else: + logger.error(f"โŒ FFuf fuzzing failed for {url}") + return result + + @mcp.tool() + def netexec_scan(target: str, protocol: str = "smb", username: str = "", password: str = "", hash_value: str = "", module: str = "", additional_args: str = "") -> Dict[str, Any]: + """ + Execute NetExec (formerly CrackMapExec) for network enumeration with enhanced logging. + + Args: + target: The target IP or network + protocol: Protocol to use (smb, ssh, winrm, etc.) + username: Username for authentication + password: Password for authentication + hash_value: Hash for pass-the-hash attacks + module: NetExec module to execute + additional_args: Additional NetExec arguments + + Returns: + Network enumeration results + """ + data = { + "target": target, + "protocol": protocol, + "username": username, + "password": password, + "hash": hash_value, + "module": module, + "additional_args": additional_args + } + logger.info(f"๐Ÿ” Starting NetExec {protocol} scan: {target}") + result = hexstrike_client.safe_post("api/tools/netexec", data) + if result.get("success"): + logger.info(f"โœ… NetExec scan completed for {target}") + else: + logger.error(f"โŒ NetExec scan failed for {target}") + return result + + @mcp.tool() + def amass_scan(domain: str, mode: str = "enum", additional_args: str = "") -> Dict[str, Any]: + """ + Execute Amass for subdomain enumeration with enhanced logging. + + Args: + domain: The target domain + mode: Amass mode (enum, intel, viz) + additional_args: Additional Amass arguments + + Returns: + Subdomain enumeration results + """ + data = { + "domain": domain, + "mode": mode, + "additional_args": additional_args + } + logger.info(f"๐Ÿ” Starting Amass {mode}: {domain}") + result = hexstrike_client.safe_post("api/tools/amass", data) + if result.get("success"): + logger.info(f"โœ… Amass completed for {domain}") + else: + logger.error(f"โŒ Amass failed for {domain}") + return result + + @mcp.tool() + def hashcat_crack(hash_file: str, hash_type: str, attack_mode: str = "0", wordlist: str = "/usr/share/wordlists/rockyou.txt", mask: str = "", additional_args: str = "") -> Dict[str, Any]: + """ + Execute Hashcat for advanced password cracking with enhanced logging. + + Args: + hash_file: File containing password hashes + hash_type: Hash type number for Hashcat + attack_mode: Attack mode (0=dict, 1=combo, 3=mask, etc.) + wordlist: Wordlist file for dictionary attacks + mask: Mask for mask attacks + additional_args: Additional Hashcat arguments + + Returns: + Password cracking results + """ + data = { + "hash_file": hash_file, + "hash_type": hash_type, + "attack_mode": attack_mode, + "wordlist": wordlist, + "mask": mask, + "additional_args": additional_args + } + logger.info(f"๐Ÿ” Starting Hashcat attack: mode {attack_mode}") + result = hexstrike_client.safe_post("api/tools/hashcat", data) + if result.get("success"): + logger.info(f"โœ… Hashcat attack completed") + else: + logger.error(f"โŒ Hashcat attack failed") + return result + + @mcp.tool() + def subfinder_scan(domain: str, silent: bool = True, all_sources: bool = False, additional_args: str = "") -> Dict[str, Any]: + """ + Execute Subfinder for passive subdomain enumeration with enhanced logging. + + Args: + domain: The target domain + silent: Run in silent mode + all_sources: Use all sources + additional_args: Additional Subfinder arguments + + Returns: + Passive subdomain enumeration results + """ + data = { + "domain": domain, + "silent": silent, + "all_sources": all_sources, + "additional_args": additional_args + } + logger.info(f"๐Ÿ” Starting Subfinder: {domain}") + result = hexstrike_client.safe_post("api/tools/subfinder", data) + if result.get("success"): + logger.info(f"โœ… Subfinder completed for {domain}") + else: + logger.error(f"โŒ Subfinder failed for {domain}") + return result + + @mcp.tool() + def smbmap_scan(target: str, username: str = "", password: str = "", domain: str = "", additional_args: str = "") -> Dict[str, Any]: + """ + Execute SMBMap for SMB share enumeration with enhanced logging. + + Args: + target: The target IP address + username: Username for authentication + password: Password for authentication + domain: Domain for authentication + additional_args: Additional SMBMap arguments + + Returns: + SMB share enumeration results + """ + data = { + "target": target, + "username": username, + "password": password, + "domain": domain, + "additional_args": additional_args + } + logger.info(f"๐Ÿ” Starting SMBMap: {target}") + result = hexstrike_client.safe_post("api/tools/smbmap", data) + if result.get("success"): + logger.info(f"โœ… SMBMap completed for {target}") + else: + logger.error(f"โŒ SMBMap failed for {target}") + return result + + @mcp.tool() + def volatility_analyze(memory_file: str, plugin: str, profile: str = "", additional_args: str = "") -> Dict[str, Any]: + """ + Execute Volatility for memory forensics analysis with enhanced logging. + + Args: + memory_file: Path to memory dump file + plugin: Volatility plugin to use + profile: Memory profile to use + additional_args: Additional Volatility arguments + + Returns: + Memory forensics analysis results + """ + data = { + "memory_file": memory_file, + "plugin": plugin, + "profile": profile, + "additional_args": additional_args + } + logger.info(f"๐Ÿง  Starting Volatility analysis: {plugin}") + result = hexstrike_client.safe_post("api/tools/volatility", data) + if result.get("success"): + logger.info(f"โœ… Volatility analysis completed") + else: + logger.error(f"โŒ Volatility analysis failed") + return result + + @mcp.tool() + def msfvenom_generate(payload: str, format_type: str = "", output_file: str = "", encoder: str = "", iterations: str = "", additional_args: str = "") -> Dict[str, Any]: + """ + Execute MSFVenom for payload generation with enhanced logging. + + Args: + payload: The payload to generate + format_type: Output format (exe, elf, raw, etc.) + output_file: Output file path + encoder: Encoder to use + iterations: Number of encoding iterations + additional_args: Additional MSFVenom arguments + + Returns: + Payload generation results + """ + data = { + "payload": payload, + "format": format_type, + "output_file": output_file, + "encoder": encoder, + "iterations": iterations, + "additional_args": additional_args + } + logger.info(f"๐Ÿš€ Starting MSFVenom payload generation: {payload}") + result = hexstrike_client.safe_post("api/tools/msfvenom", data) + if result.get("success"): + logger.info(f"โœ… MSFVenom payload generated") + else: + logger.error(f"โŒ MSFVenom payload generation failed") + return result + + # ============================================================================ + # BINARY ANALYSIS & REVERSE ENGINEERING TOOLS + # ============================================================================ + + @mcp.tool() + def gdb_analyze(binary: str, commands: str = "", script_file: str = "", additional_args: str = "") -> Dict[str, Any]: + """ + Execute GDB for binary analysis and debugging with enhanced logging. + + Args: + binary: Path to the binary file + commands: GDB commands to execute + script_file: Path to GDB script file + additional_args: Additional GDB arguments + + Returns: + Binary analysis results + """ + data = { + "binary": binary, + "commands": commands, + "script_file": script_file, + "additional_args": additional_args + } + logger.info(f"๐Ÿ”ง Starting GDB analysis: {binary}") + result = hexstrike_client.safe_post("api/tools/gdb", data) + if result.get("success"): + logger.info(f"โœ… GDB analysis completed for {binary}") + else: + logger.error(f"โŒ GDB analysis failed for {binary}") + return result + + @mcp.tool() + def radare2_analyze(binary: str, commands: str = "", additional_args: str = "") -> Dict[str, Any]: + """ + Execute Radare2 for binary analysis and reverse engineering with enhanced logging. + + Args: + binary: Path to the binary file + commands: Radare2 commands to execute + additional_args: Additional Radare2 arguments + + Returns: + Binary analysis results + """ + data = { + "binary": binary, + "commands": commands, + "additional_args": additional_args + } + logger.info(f"๐Ÿ”ง Starting Radare2 analysis: {binary}") + result = hexstrike_client.safe_post("api/tools/radare2", data) + if result.get("success"): + logger.info(f"โœ… Radare2 analysis completed for {binary}") + else: + logger.error(f"โŒ Radare2 analysis failed for {binary}") + return result + + @mcp.tool() + def binwalk_analyze(file_path: str, extract: bool = False, additional_args: str = "") -> Dict[str, Any]: + """ + Execute Binwalk for firmware and file analysis with enhanced logging. + + Args: + file_path: Path to the file to analyze + extract: Whether to extract discovered files + additional_args: Additional Binwalk arguments + + Returns: + Firmware analysis results + """ + data = { + "file_path": file_path, + "extract": extract, + "additional_args": additional_args + } + logger.info(f"๐Ÿ”ง Starting Binwalk analysis: {file_path}") + result = hexstrike_client.safe_post("api/tools/binwalk", data) + if result.get("success"): + logger.info(f"โœ… Binwalk analysis completed for {file_path}") + else: + logger.error(f"โŒ Binwalk analysis failed for {file_path}") + return result + + @mcp.tool() + def ropgadget_search(binary: str, gadget_type: str = "", additional_args: str = "") -> Dict[str, Any]: + """ + Search for ROP gadgets in a binary using ROPgadget with enhanced logging. + + Args: + binary: Path to the binary file + gadget_type: Type of gadgets to search for + additional_args: Additional ROPgadget arguments + + Returns: + ROP gadget search results + """ + data = { + "binary": binary, + "gadget_type": gadget_type, + "additional_args": additional_args + } + logger.info(f"๐Ÿ”ง Starting ROPgadget search: {binary}") + result = hexstrike_client.safe_post("api/tools/ropgadget", data) + if result.get("success"): + logger.info(f"โœ… ROPgadget search completed for {binary}") + else: + logger.error(f"โŒ ROPgadget search failed for {binary}") + return result + + @mcp.tool() + def checksec_analyze(binary: str) -> Dict[str, Any]: + """ + Check security features of a binary with enhanced logging. + + Args: + binary: Path to the binary file + + Returns: + Security features analysis results + """ + data = { + "binary": binary + } + logger.info(f"๐Ÿ”ง Starting Checksec analysis: {binary}") + result = hexstrike_client.safe_post("api/tools/checksec", data) + if result.get("success"): + logger.info(f"โœ… Checksec analysis completed for {binary}") + else: + logger.error(f"โŒ Checksec analysis failed for {binary}") + return result + + @mcp.tool() + def xxd_hexdump(file_path: str, offset: str = "0", length: str = "", additional_args: str = "") -> Dict[str, Any]: + """ + Create a hex dump of a file using xxd with enhanced logging. + + Args: + file_path: Path to the file + offset: Offset to start reading from + length: Number of bytes to read + additional_args: Additional xxd arguments + + Returns: + Hex dump results + """ + data = { + "file_path": file_path, + "offset": offset, + "length": length, + "additional_args": additional_args + } + logger.info(f"๐Ÿ”ง Starting XXD hex dump: {file_path}") + result = hexstrike_client.safe_post("api/tools/xxd", data) + if result.get("success"): + logger.info(f"โœ… XXD hex dump completed for {file_path}") + else: + logger.error(f"โŒ XXD hex dump failed for {file_path}") + return result + + @mcp.tool() + def strings_extract(file_path: str, min_len: int = 4, additional_args: str = "") -> Dict[str, Any]: + """ + Extract strings from a binary file with enhanced logging. + + Args: + file_path: Path to the file + min_len: Minimum string length + additional_args: Additional strings arguments + + Returns: + String extraction results + """ + data = { + "file_path": file_path, + "min_len": min_len, + "additional_args": additional_args + } + logger.info(f"๐Ÿ”ง Starting Strings extraction: {file_path}") + result = hexstrike_client.safe_post("api/tools/strings", data) + if result.get("success"): + logger.info(f"โœ… Strings extraction completed for {file_path}") + else: + logger.error(f"โŒ Strings extraction failed for {file_path}") + return result + + @mcp.tool() + def objdump_analyze(binary: str, disassemble: bool = True, additional_args: str = "") -> Dict[str, Any]: + """ + Analyze a binary using objdump with enhanced logging. + + Args: + binary: Path to the binary file + disassemble: Whether to disassemble the binary + additional_args: Additional objdump arguments + + Returns: + Binary analysis results + """ + data = { + "binary": binary, + "disassemble": disassemble, + "additional_args": additional_args + } + logger.info(f"๐Ÿ”ง Starting Objdump analysis: {binary}") + result = hexstrike_client.safe_post("api/tools/objdump", data) + if result.get("success"): + logger.info(f"โœ… Objdump analysis completed for {binary}") + else: + logger.error(f"โŒ Objdump analysis failed for {binary}") + return result + + @mcp.tool() + def feroxbuster_scan(url: str, wordlist: str = "/usr/share/wordlists/dirb/common.txt", threads: int = 10, additional_args: str = "") -> Dict[str, Any]: + """ + Execute Feroxbuster for recursive content discovery with enhanced logging. + + Args: + url: The target URL + wordlist: Wordlist file to use + threads: Number of threads + additional_args: Additional Feroxbuster arguments + + Returns: + Content discovery results + """ + data = { + "url": url, + "wordlist": wordlist, + "threads": threads, + "additional_args": additional_args + } + logger.info(f"๐Ÿ” Starting Feroxbuster scan: {url}") + result = hexstrike_client.safe_post("api/tools/feroxbuster", data) + if result.get("success"): + logger.info(f"โœ… Feroxbuster scan completed for {url}") + else: + logger.error(f"โŒ Feroxbuster scan failed for {url}") + return result + + @mcp.tool() + def dotdotpwn_scan(target: str, module: str = "http", additional_args: str = "") -> Dict[str, Any]: + """ + Execute DotDotPwn for directory traversal testing with enhanced logging. + + Args: + target: The target hostname or IP + module: Module to use (http, ftp, tftp, etc.) + additional_args: Additional DotDotPwn arguments + + Returns: + Directory traversal test results + """ + data = { + "target": target, + "module": module, + "additional_args": additional_args + } + logger.info(f"๐Ÿ” Starting DotDotPwn scan: {target}") + result = hexstrike_client.safe_post("api/tools/dotdotpwn", data) + if result.get("success"): + logger.info(f"โœ… DotDotPwn scan completed for {target}") + else: + logger.error(f"โŒ DotDotPwn scan failed for {target}") + return result + + @mcp.tool() + def xsser_scan(url: str, params: str = "", additional_args: str = "") -> Dict[str, Any]: + """ + Execute XSSer for XSS vulnerability testing with enhanced logging. + + Args: + url: The target URL + params: Parameters to test + additional_args: Additional XSSer arguments + + Returns: + XSS vulnerability test results + """ + data = { + "url": url, + "params": params, + "additional_args": additional_args + } + logger.info(f"๐Ÿ” Starting XSSer scan: {url}") + result = hexstrike_client.safe_post("api/tools/xsser", data) + if result.get("success"): + logger.info(f"โœ… XSSer scan completed for {url}") + else: + logger.error(f"โŒ XSSer scan failed for {url}") + return result + + @mcp.tool() + def wfuzz_scan(url: str, wordlist: str = "/usr/share/wordlists/dirb/common.txt", additional_args: str = "") -> Dict[str, Any]: + """ + Execute Wfuzz for web application fuzzing with enhanced logging. + + Args: + url: The target URL (use FUZZ where you want to inject payloads) + wordlist: Wordlist file to use + additional_args: Additional Wfuzz arguments + + Returns: + Web application fuzzing results + """ + data = { + "url": url, + "wordlist": wordlist, + "additional_args": additional_args + } + logger.info(f"๐Ÿ” Starting Wfuzz scan: {url}") + result = hexstrike_client.safe_post("api/tools/wfuzz", data) + if result.get("success"): + logger.info(f"โœ… Wfuzz scan completed for {url}") + else: + logger.error(f"โŒ Wfuzz scan failed for {url}") + return result + + # ============================================================================ + # AI-POWERED PAYLOAD GENERATION (v5.0 ENHANCEMENT) + # ============================================================================ + + @mcp.tool() + def ai_generate_payload(attack_type: str, complexity: str = "basic", technology: str = "", url: str = "") -> Dict[str, Any]: + """ + Generate AI-powered contextual payloads for security testing. + + Args: + attack_type: Type of attack (xss, sqli, lfi, cmd_injection, ssti, xxe) + complexity: Complexity level (basic, advanced, bypass) + technology: Target technology (php, asp, jsp, python, nodejs) + url: Target URL for context + + Returns: + Contextual payloads with risk assessment and test cases + """ + data = { + "attack_type": attack_type, + "complexity": complexity, + "technology": technology, + "url": url + } + logger.info(f"๐Ÿค– Generating AI payloads for {attack_type} attack") + result = hexstrike_client.safe_post("api/ai/generate_payload", data) + + if result.get("success"): + payload_data = result.get("ai_payload_generation", {}) + count = payload_data.get("payload_count", 0) + logger.info(f"โœ… Generated {count} contextual {attack_type} payloads") + + # Log some example payloads for user awareness + payloads = payload_data.get("payloads", []) + if payloads: + logger.info("๐ŸŽฏ Sample payloads generated:") + for i, payload_info in enumerate(payloads[:3]): # Show first 3 + risk = payload_info.get("risk_level", "UNKNOWN") + context = payload_info.get("context", "basic") + logger.info(f" โ”œโ”€ [{risk}] {context}: {payload_info['payload'][:50]}...") + else: + logger.error("โŒ AI payload generation failed") + + return result + + @mcp.tool() + def ai_test_payload(payload: str, target_url: str, method: str = "GET") -> Dict[str, Any]: + """ + Test generated payload against target with AI analysis. + + Args: + payload: The payload to test + target_url: Target URL to test against + method: HTTP method (GET, POST) + + Returns: + Test results with AI analysis and vulnerability assessment + """ + data = { + "payload": payload, + "target_url": target_url, + "method": method + } + logger.info(f"๐Ÿงช Testing AI payload against {target_url}") + result = hexstrike_client.safe_post("api/ai/test_payload", data) + + if result.get("success"): + analysis = result.get("ai_analysis", {}) + potential_vuln = analysis.get("potential_vulnerability", False) + logger.info(f"๐Ÿ” Payload test completed | Vulnerability detected: {potential_vuln}") + + if potential_vuln: + logger.warning("โš ๏ธ Potential vulnerability found! Review the response carefully.") + else: + logger.info("โœ… No obvious vulnerability indicators detected") + else: + logger.error("โŒ Payload testing failed") + + return result + + @mcp.tool() + def ai_generate_attack_suite(target_url: str, attack_types: str = "xss,sqli,lfi") -> Dict[str, Any]: + """ + Generate comprehensive attack suite with multiple payload types. + + Args: + target_url: Target URL for testing + attack_types: Comma-separated list of attack types + + Returns: + Comprehensive attack suite with multiple payload types + """ + attack_list = [attack.strip() for attack in attack_types.split(",")] + results = { + "target_url": target_url, + "attack_types": attack_list, + "payload_suites": {}, + "summary": { + "total_payloads": 0, + "high_risk_payloads": 0, + "test_cases": 0 + } + } + + logger.info(f"๐Ÿš€ Generating comprehensive attack suite for {target_url}") + logger.info(f"๐ŸŽฏ Attack types: {', '.join(attack_list)}") + + for attack_type in attack_list: + logger.info(f"๐Ÿค– Generating {attack_type} payloads...") + + # Generate payloads for this attack type + payload_result = self.ai_generate_payload(attack_type, "advanced", "", target_url) + + if payload_result.get("success"): + payload_data = payload_result.get("ai_payload_generation", {}) + results["payload_suites"][attack_type] = payload_data + + # Update summary + results["summary"]["total_payloads"] += payload_data.get("payload_count", 0) + results["summary"]["test_cases"] += len(payload_data.get("test_cases", [])) + + # Count high-risk payloads + for payload_info in payload_data.get("payloads", []): + if payload_info.get("risk_level") == "HIGH": + results["summary"]["high_risk_payloads"] += 1 + + logger.info(f"โœ… Attack suite generated:") + logger.info(f" โ”œโ”€ Total payloads: {results['summary']['total_payloads']}") + logger.info(f" โ”œโ”€ High-risk payloads: {results['summary']['high_risk_payloads']}") + logger.info(f" โ””โ”€ Test cases: {results['summary']['test_cases']}") + + return { + "success": True, + "attack_suite": results, + "timestamp": time.time() + } + + # ============================================================================ + # ADVANCED API TESTING TOOLS (v5.0 ENHANCEMENT) + # ============================================================================ + + @mcp.tool() + def api_fuzzer(base_url: str, endpoints: str = "", methods: str = "GET,POST,PUT,DELETE", wordlist: str = "/usr/share/wordlists/api/api-endpoints.txt") -> Dict[str, Any]: + """ + Advanced API endpoint fuzzing with intelligent parameter discovery. + + Args: + base_url: Base URL of the API + endpoints: Comma-separated list of specific endpoints to test + methods: HTTP methods to test (comma-separated) + wordlist: Wordlist for endpoint discovery + + Returns: + API fuzzing results with endpoint discovery and vulnerability assessment + """ + data = { + "base_url": base_url, + "endpoints": [e.strip() for e in endpoints.split(",") if e.strip()] if endpoints else [], + "methods": [m.strip() for m in methods.split(",")], + "wordlist": wordlist + } + + logger.info(f"๐Ÿ” Starting API fuzzing: {base_url}") + result = hexstrike_client.safe_post("api/tools/api_fuzzer", data) + + if result.get("success"): + fuzzing_type = result.get("fuzzing_type", "unknown") + if fuzzing_type == "endpoint_testing": + endpoint_count = len(result.get("results", [])) + logger.info(f"โœ… API endpoint testing completed: {endpoint_count} endpoints tested") + else: + logger.info(f"โœ… API endpoint discovery completed") + else: + logger.error("โŒ API fuzzing failed") + + return result + + @mcp.tool() + def graphql_scanner(endpoint: str, introspection: bool = True, query_depth: int = 10, test_mutations: bool = True) -> Dict[str, Any]: + """ + Advanced GraphQL security scanning and introspection. + + Args: + endpoint: GraphQL endpoint URL + introspection: Test introspection queries + query_depth: Maximum query depth to test + test_mutations: Test mutation operations + + Returns: + GraphQL security scan results with vulnerability assessment + """ + data = { + "endpoint": endpoint, + "introspection": introspection, + "query_depth": query_depth, + "test_mutations": test_mutations + } + + logger.info(f"๐Ÿ” Starting GraphQL security scan: {endpoint}") + result = hexstrike_client.safe_post("api/tools/graphql_scanner", data) + + if result.get("success"): + scan_results = result.get("graphql_scan_results", {}) + vuln_count = len(scan_results.get("vulnerabilities", [])) + tests_count = len(scan_results.get("tests_performed", [])) + + logger.info(f"โœ… GraphQL scan completed: {tests_count} tests, {vuln_count} vulnerabilities") + + if vuln_count > 0: + logger.warning(f"โš ๏ธ Found {vuln_count} GraphQL vulnerabilities!") + for vuln in scan_results.get("vulnerabilities", [])[:3]: # Show first 3 + severity = vuln.get("severity", "UNKNOWN") + vuln_type = vuln.get("type", "unknown") + logger.warning(f" โ”œโ”€ [{severity}] {vuln_type}") + else: + logger.error("โŒ GraphQL scanning failed") + + return result + + @mcp.tool() + def jwt_analyzer(jwt_token: str, target_url: str = "") -> Dict[str, Any]: + """ + Advanced JWT token analysis and vulnerability testing. + + Args: + jwt_token: JWT token to analyze + target_url: Optional target URL for testing token manipulation + + Returns: + JWT analysis results with vulnerability assessment and attack vectors + """ + data = { + "jwt_token": jwt_token, + "target_url": target_url + } + + logger.info(f"๐Ÿ” Starting JWT security analysis") + result = hexstrike_client.safe_post("api/tools/jwt_analyzer", data) + + if result.get("success"): + analysis = result.get("jwt_analysis_results", {}) + vuln_count = len(analysis.get("vulnerabilities", [])) + algorithm = analysis.get("token_info", {}).get("algorithm", "unknown") + + logger.info(f"โœ… JWT analysis completed: {vuln_count} vulnerabilities found") + logger.info(f"๐Ÿ” Token algorithm: {algorithm}") + + if vuln_count > 0: + logger.warning(f"โš ๏ธ Found {vuln_count} JWT vulnerabilities!") + for vuln in analysis.get("vulnerabilities", [])[:3]: # Show first 3 + severity = vuln.get("severity", "UNKNOWN") + vuln_type = vuln.get("type", "unknown") + logger.warning(f" โ”œโ”€ [{severity}] {vuln_type}") + else: + logger.error("โŒ JWT analysis failed") + + return result + + @mcp.tool() + def api_schema_analyzer(schema_url: str, schema_type: str = "openapi") -> Dict[str, Any]: + """ + Analyze API schemas and identify potential security issues. + + Args: + schema_url: URL to the API schema (OpenAPI/Swagger/GraphQL) + schema_type: Type of schema (openapi, swagger, graphql) + + Returns: + Schema analysis results with security issues and recommendations + """ + data = { + "schema_url": schema_url, + "schema_type": schema_type + } + + logger.info(f"๐Ÿ” Starting API schema analysis: {schema_url}") + result = hexstrike_client.safe_post("api/tools/api_schema_analyzer", data) + + if result.get("success"): + analysis = result.get("schema_analysis_results", {}) + endpoint_count = len(analysis.get("endpoints_found", [])) + issue_count = len(analysis.get("security_issues", [])) + + logger.info(f"โœ… Schema analysis completed: {endpoint_count} endpoints, {issue_count} issues") + + if issue_count > 0: + logger.warning(f"โš ๏ธ Found {issue_count} security issues in schema!") + for issue in analysis.get("security_issues", [])[:3]: # Show first 3 + severity = issue.get("severity", "UNKNOWN") + issue_type = issue.get("issue", "unknown") + logger.warning(f" โ”œโ”€ [{severity}] {issue_type}") + + if endpoint_count > 0: + logger.info(f"๐Ÿ“Š Discovered endpoints:") + for endpoint in analysis.get("endpoints_found", [])[:5]: # Show first 5 + method = endpoint.get("method", "GET") + path = endpoint.get("path", "/") + logger.info(f" โ”œโ”€ {method} {path}") + else: + logger.error("โŒ Schema analysis failed") + + return result + + @mcp.tool() + def comprehensive_api_audit(base_url: str, schema_url: str = "", jwt_token: str = "", graphql_endpoint: str = "") -> Dict[str, Any]: + """ + Comprehensive API security audit combining multiple testing techniques. + + Args: + base_url: Base URL of the API + schema_url: Optional API schema URL + jwt_token: Optional JWT token for analysis + graphql_endpoint: Optional GraphQL endpoint + + Returns: + Comprehensive audit results with all API security tests + """ + audit_results = { + "base_url": base_url, + "audit_timestamp": time.time(), + "tests_performed": [], + "total_vulnerabilities": 0, + "summary": {}, + "recommendations": [] + } + + logger.info(f"๐Ÿš€ Starting comprehensive API security audit: {base_url}") + + # 1. API Endpoint Fuzzing + logger.info("๐Ÿ” Phase 1: API endpoint discovery and fuzzing") + fuzz_result = self.api_fuzzer(base_url) + if fuzz_result.get("success"): + audit_results["tests_performed"].append("api_fuzzing") + audit_results["api_fuzzing"] = fuzz_result + + # 2. Schema Analysis (if provided) + if schema_url: + logger.info("๐Ÿ” Phase 2: API schema analysis") + schema_result = self.api_schema_analyzer(schema_url) + if schema_result.get("success"): + audit_results["tests_performed"].append("schema_analysis") + audit_results["schema_analysis"] = schema_result + + schema_data = schema_result.get("schema_analysis_results", {}) + audit_results["total_vulnerabilities"] += len(schema_data.get("security_issues", [])) + + # 3. JWT Analysis (if provided) + if jwt_token: + logger.info("๐Ÿ” Phase 3: JWT token analysis") + jwt_result = self.jwt_analyzer(jwt_token, base_url) + if jwt_result.get("success"): + audit_results["tests_performed"].append("jwt_analysis") + audit_results["jwt_analysis"] = jwt_result + + jwt_data = jwt_result.get("jwt_analysis_results", {}) + audit_results["total_vulnerabilities"] += len(jwt_data.get("vulnerabilities", [])) + + # 4. GraphQL Testing (if provided) + if graphql_endpoint: + logger.info("๐Ÿ” Phase 4: GraphQL security scanning") + graphql_result = self.graphql_scanner(graphql_endpoint) + if graphql_result.get("success"): + audit_results["tests_performed"].append("graphql_scanning") + audit_results["graphql_scanning"] = graphql_result + + graphql_data = graphql_result.get("graphql_scan_results", {}) + audit_results["total_vulnerabilities"] += len(graphql_data.get("vulnerabilities", [])) + + # Generate comprehensive recommendations + audit_results["recommendations"] = [ + "Implement proper authentication and authorization", + "Use HTTPS for all API communications", + "Validate and sanitize all input parameters", + "Implement rate limiting and request throttling", + "Add comprehensive logging and monitoring", + "Regular security testing and code reviews", + "Keep API documentation updated and secure", + "Implement proper error handling" + ] + + # Summary + audit_results["summary"] = { + "tests_performed": len(audit_results["tests_performed"]), + "total_vulnerabilities": audit_results["total_vulnerabilities"], + "audit_coverage": "comprehensive" if len(audit_results["tests_performed"]) >= 3 else "partial" + } + + logger.info(f"โœ… Comprehensive API audit completed:") + logger.info(f" โ”œโ”€ Tests performed: {audit_results['summary']['tests_performed']}") + logger.info(f" โ”œโ”€ Total vulnerabilities: {audit_results['summary']['total_vulnerabilities']}") + logger.info(f" โ””โ”€ Coverage: {audit_results['summary']['audit_coverage']}") + + return { + "success": True, + "comprehensive_audit": audit_results + } + + # ============================================================================ + # ADVANCED CTF TOOLS (v5.0 ENHANCEMENT) + # ============================================================================ + + @mcp.tool() + def volatility3_analyze(memory_file: str, plugin: str, output_file: str = "", additional_args: str = "") -> Dict[str, Any]: + """ + Execute Volatility3 for advanced memory forensics with enhanced logging. + + Args: + memory_file: Path to memory dump file + plugin: Volatility3 plugin to execute + output_file: Output file path + additional_args: Additional Volatility3 arguments + + Returns: + Advanced memory forensics results + """ + data = { + "memory_file": memory_file, + "plugin": plugin, + "output_file": output_file, + "additional_args": additional_args + } + logger.info(f"๐Ÿง  Starting Volatility3 analysis: {plugin}") + result = hexstrike_client.safe_post("api/tools/volatility3", data) + if result.get("success"): + logger.info(f"โœ… Volatility3 analysis completed") + else: + logger.error(f"โŒ Volatility3 analysis failed") + return result + + @mcp.tool() + def foremost_carving(input_file: str, output_dir: str = "/tmp/foremost_output", file_types: str = "", additional_args: str = "") -> Dict[str, Any]: + """ + Execute Foremost for file carving with enhanced logging. + + Args: + input_file: Input file or device to carve + output_dir: Output directory for carved files + file_types: File types to carve (jpg,gif,png,etc.) + additional_args: Additional Foremost arguments + + Returns: + File carving results + """ + data = { + "input_file": input_file, + "output_dir": output_dir, + "file_types": file_types, + "additional_args": additional_args + } + logger.info(f"๐Ÿ“ Starting Foremost file carving: {input_file}") + result = hexstrike_client.safe_post("api/tools/foremost", data) + if result.get("success"): + logger.info(f"โœ… Foremost carving completed") + else: + logger.error(f"โŒ Foremost carving failed") + return result + + @mcp.tool() + def steghide_analysis(action: str, cover_file: str, embed_file: str = "", passphrase: str = "", output_file: str = "", additional_args: str = "") -> Dict[str, Any]: + """ + Execute Steghide for steganography analysis with enhanced logging. + + Args: + action: Action to perform (extract, embed, info) + cover_file: Cover file for steganography + embed_file: File to embed (for embed action) + passphrase: Passphrase for steganography + output_file: Output file path + additional_args: Additional Steghide arguments + + Returns: + Steganography analysis results + """ + data = { + "action": action, + "cover_file": cover_file, + "embed_file": embed_file, + "passphrase": passphrase, + "output_file": output_file, + "additional_args": additional_args + } + logger.info(f"๐Ÿ–ผ๏ธ Starting Steghide {action}: {cover_file}") + result = hexstrike_client.safe_post("api/tools/steghide", data) + if result.get("success"): + logger.info(f"โœ… Steghide {action} completed") + else: + logger.error(f"โŒ Steghide {action} failed") + return result + + @mcp.tool() + def exiftool_extract(file_path: str, output_format: str = "", tags: str = "", additional_args: str = "") -> Dict[str, Any]: + """ + Execute ExifTool for metadata extraction with enhanced logging. + + Args: + file_path: Path to file for metadata extraction + output_format: Output format (json, xml, csv) + tags: Specific tags to extract + additional_args: Additional ExifTool arguments + + Returns: + Metadata extraction results + """ + data = { + "file_path": file_path, + "output_format": output_format, + "tags": tags, + "additional_args": additional_args + } + logger.info(f"๐Ÿ“ท Starting ExifTool analysis: {file_path}") + result = hexstrike_client.safe_post("api/tools/exiftool", data) + if result.get("success"): + logger.info(f"โœ… ExifTool analysis completed") + else: + logger.error(f"โŒ ExifTool analysis failed") + return result + + @mcp.tool() + def hashpump_attack(signature: str, data: str, key_length: str, append_data: str, additional_args: str = "") -> Dict[str, Any]: + """ + Execute HashPump for hash length extension attacks with enhanced logging. + + Args: + signature: Original hash signature + data: Original data + key_length: Length of secret key + append_data: Data to append + additional_args: Additional HashPump arguments + + Returns: + Hash length extension attack results + """ + data = { + "signature": signature, + "data": data, + "key_length": key_length, + "append_data": append_data, + "additional_args": additional_args + } + logger.info(f"๐Ÿ” Starting HashPump attack") + result = hexstrike_client.safe_post("api/tools/hashpump", data) + if result.get("success"): + logger.info(f"โœ… HashPump attack completed") + else: + logger.error(f"โŒ HashPump attack failed") + return result + + # ============================================================================ + # BUG BOUNTY RECONNAISSANCE TOOLS (v5.0 ENHANCEMENT) + # ============================================================================ + + @mcp.tool() + def hakrawler_crawl(url: str, depth: int = 2, forms: bool = True, robots: bool = True, sitemap: bool = True, wayback: bool = False, additional_args: str = "") -> Dict[str, Any]: + """ + Execute Hakrawler for web endpoint discovery with enhanced logging. + + Args: + url: Target URL to crawl + depth: Crawling depth + forms: Include forms in crawling + robots: Check robots.txt + sitemap: Check sitemap.xml + wayback: Use Wayback Machine + additional_args: Additional Hakrawler arguments + + Returns: + Web endpoint discovery results + """ + data = { + "url": url, + "depth": depth, + "forms": forms, + "robots": robots, + "sitemap": sitemap, + "wayback": wayback, + "additional_args": additional_args + } + logger.info(f"๐Ÿ•ท๏ธ Starting Hakrawler crawling: {url}") + result = hexstrike_client.safe_post("api/tools/hakrawler", data) + if result.get("success"): + logger.info(f"โœ… Hakrawler crawling completed") + else: + logger.error(f"โŒ Hakrawler crawling failed") + return result + + @mcp.tool() + def httpx_probe(targets: str = "", target_file: str = "", ports: str = "", methods: str = "GET", status_code: str = "", content_length: bool = False, output_file: str = "", additional_args: str = "") -> Dict[str, Any]: + """ + Execute HTTPx for HTTP probing with enhanced logging. + + Args: + targets: Target URLs or IPs + target_file: File containing targets + ports: Ports to probe + methods: HTTP methods to use + status_code: Filter by status code + content_length: Show content length + output_file: Output file path + additional_args: Additional HTTPx arguments + + Returns: + HTTP probing results + """ + data = { + "targets": targets, + "target_file": target_file, + "ports": ports, + "methods": methods, + "status_code": status_code, + "content_length": content_length, + "output_file": output_file, + "additional_args": additional_args + } + logger.info(f"๐ŸŒ Starting HTTPx probing") + result = hexstrike_client.safe_post("api/tools/httpx", data) + if result.get("success"): + logger.info(f"โœ… HTTPx probing completed") + else: + logger.error(f"โŒ HTTPx probing failed") + return result + + @mcp.tool() + def paramspider_discovery(domain: str, exclude: str = "", output_file: str = "", level: int = 2, additional_args: str = "") -> Dict[str, Any]: + """ + Execute ParamSpider for parameter discovery with enhanced logging. + + Args: + domain: Target domain + exclude: Extensions to exclude + output_file: Output file path + level: Crawling level + additional_args: Additional ParamSpider arguments + + Returns: + Parameter discovery results + """ + data = { + "domain": domain, + "exclude": exclude, + "output_file": output_file, + "level": level, + "additional_args": additional_args + } + logger.info(f"๐Ÿ” Starting ParamSpider discovery: {domain}") + result = hexstrike_client.safe_post("api/tools/paramspider", data) + if result.get("success"): + logger.info(f"โœ… ParamSpider discovery completed") + else: + logger.error(f"โŒ ParamSpider discovery failed") + return result + + # ============================================================================ + # ADVANCED WEB SECURITY TOOLS CONTINUED + # ============================================================================ + + @mcp.tool() + def burpsuite_scan(project_file: str = "", config_file: str = "", target: str = "", headless: bool = False, scan_type: str = "", scan_config: str = "", output_file: str = "", additional_args: str = "") -> Dict[str, Any]: + """ + Execute Burp Suite with enhanced logging. + + Args: + project_file: Burp project file path + config_file: Burp configuration file path + target: Target URL + headless: Run in headless mode + scan_type: Type of scan to perform + scan_config: Scan configuration + output_file: Output file path + additional_args: Additional Burp Suite arguments + + Returns: + Burp Suite scan results + """ + data = { + "project_file": project_file, + "config_file": config_file, + "target": target, + "headless": headless, + "scan_type": scan_type, + "scan_config": scan_config, + "output_file": output_file, + "additional_args": additional_args + } + logger.info(f"๐Ÿ” Starting Burp Suite scan") + result = hexstrike_client.safe_post("api/tools/burpsuite", data) + if result.get("success"): + logger.info(f"โœ… Burp Suite scan completed") + else: + logger.error(f"โŒ Burp Suite scan failed") + return result + + @mcp.tool() + def zap_scan(target: str = "", scan_type: str = "baseline", api_key: str = "", daemon: bool = False, port: str = "8090", host: str = "0.0.0.0", format_type: str = "xml", output_file: str = "", additional_args: str = "") -> Dict[str, Any]: + """ + Execute OWASP ZAP with enhanced logging. + + Args: + target: Target URL + scan_type: Type of scan (baseline, full, api) + api_key: ZAP API key + daemon: Run in daemon mode + port: Port for ZAP daemon + host: Host for ZAP daemon + format_type: Output format (xml, json, html) + output_file: Output file path + additional_args: Additional ZAP arguments + + Returns: + ZAP scan results + """ + data = { + "target": target, + "scan_type": scan_type, + "api_key": api_key, + "daemon": daemon, + "port": port, + "host": host, + "format": format_type, + "output_file": output_file, + "additional_args": additional_args + } + logger.info(f"๐Ÿ” Starting ZAP scan: {target}") + result = hexstrike_client.safe_post("api/tools/zap", data) + if result.get("success"): + logger.info(f"โœ… ZAP scan completed for {target}") + else: + logger.error(f"โŒ ZAP scan failed for {target}") + return result + + @mcp.tool() + def arjun_scan(url: str, method: str = "GET", data: str = "", headers: str = "", timeout: str = "", output_file: str = "", additional_args: str = "") -> Dict[str, Any]: + """ + Execute Arjun for parameter discovery with enhanced logging. + + Args: + url: Target URL + method: HTTP method (GET, POST, etc.) + data: POST data for testing + headers: Custom headers + timeout: Request timeout + output_file: Output file path + additional_args: Additional Arjun arguments + + Returns: + Parameter discovery results + """ + data = { + "url": url, + "method": method, + "data": data, + "headers": headers, + "timeout": timeout, + "output_file": output_file, + "additional_args": additional_args + } + logger.info(f"๐Ÿ” Starting Arjun parameter discovery: {url}") + result = hexstrike_client.safe_post("api/tools/arjun", data) + if result.get("success"): + logger.info(f"โœ… Arjun completed for {url}") + else: + logger.error(f"โŒ Arjun failed for {url}") + return result + + @mcp.tool() + def wafw00f_scan(target: str, additional_args: str = "") -> Dict[str, Any]: + """ + Execute wafw00f to identify and fingerprint WAF products with enhanced logging. + + Args: + target: Target URL or IP + additional_args: Additional wafw00f arguments + + Returns: + WAF detection results + """ + data = { + "target": target, + "additional_args": additional_args + } + logger.info(f"๐Ÿ›ก๏ธ Starting Wafw00f WAF detection: {target}") + result = hexstrike_client.safe_post("api/tools/wafw00f", data) + if result.get("success"): + logger.info(f"โœ… Wafw00f completed for {target}") + else: + logger.error(f"โŒ Wafw00f failed for {target}") + return result + + @mcp.tool() + def fierce_scan(domain: str, dns_server: str = "", additional_args: str = "") -> Dict[str, Any]: + """ + Execute fierce for DNS reconnaissance with enhanced logging. + + Args: + domain: Target domain + dns_server: DNS server to use + additional_args: Additional fierce arguments + + Returns: + DNS reconnaissance results + """ + data = { + "domain": domain, + "dns_server": dns_server, + "additional_args": additional_args + } + logger.info(f"๐Ÿ” Starting Fierce DNS recon: {domain}") + result = hexstrike_client.safe_post("api/tools/fierce", data) + if result.get("success"): + logger.info(f"โœ… Fierce completed for {domain}") + else: + logger.error(f"โŒ Fierce failed for {domain}") + return result + + @mcp.tool() + def dnsenum_scan(domain: str, dns_server: str = "", wordlist: str = "", additional_args: str = "") -> Dict[str, Any]: + """ + Execute dnsenum for DNS enumeration with enhanced logging. + + Args: + domain: Target domain + dns_server: DNS server to use + wordlist: Wordlist for brute forcing + additional_args: Additional dnsenum arguments + + Returns: + DNS enumeration results + """ + data = { + "domain": domain, + "dns_server": dns_server, + "wordlist": wordlist, + "additional_args": additional_args + } + logger.info(f"๐Ÿ” Starting DNSenum: {domain}") + result = hexstrike_client.safe_post("api/tools/dnsenum", data) + if result.get("success"): + logger.info(f"โœ… DNSenum completed for {domain}") + else: + logger.error(f"โŒ DNSenum failed for {domain}") + return result + + @mcp.tool() + def autorecon_scan( + target: str = "", + target_file: str = "", + ports: str = "", + output_dir: str = "", + max_scans: str = "", + max_port_scans: str = "", + heartbeat: str = "", + timeout: str = "", + target_timeout: str = "", + config_file: str = "", + global_file: str = "", + plugins_dir: str = "", + add_plugins_dir: str = "", + tags: str = "", + exclude_tags: str = "", + port_scans: str = "", + service_scans: str = "", + reports: str = "", + single_target: bool = False, + only_scans_dir: bool = False, + no_port_dirs: bool = False, + nmap: str = "", + nmap_append: str = "", + proxychains: bool = False, + disable_sanity_checks: bool = False, + disable_keyboard_control: bool = False, + force_services: str = "", + accessible: bool = False, + verbose: int = 0, + curl_path: str = "", + dirbuster_tool: str = "", + dirbuster_wordlist: str = "", + dirbuster_threads: str = "", + dirbuster_ext: str = "", + onesixtyone_community_strings: str = "", + global_username_wordlist: str = "", + global_password_wordlist: str = "", + global_domain: str = "", + additional_args: str = "" + ) -> Dict[str, Any]: + """ + Execute AutoRecon for comprehensive target enumeration with full parameter support. + + Args: + target: Single target to scan + target_file: File containing multiple targets + ports: Specific ports to scan + output_dir: Output directory + max_scans: Maximum number of concurrent scans + max_port_scans: Maximum number of concurrent port scans + heartbeat: Heartbeat interval + timeout: Global timeout + target_timeout: Per-target timeout + config_file: Configuration file path + global_file: Global configuration file + plugins_dir: Plugins directory + add_plugins_dir: Additional plugins directory + tags: Plugin tags to include + exclude_tags: Plugin tags to exclude + port_scans: Port scan plugins to run + service_scans: Service scan plugins to run + reports: Report plugins to run + single_target: Use single target directory structure + only_scans_dir: Only create scans directory + no_port_dirs: Don't create port directories + nmap: Custom nmap command + nmap_append: Arguments to append to nmap + proxychains: Use proxychains + disable_sanity_checks: Disable sanity checks + disable_keyboard_control: Disable keyboard control + force_services: Force service detection + accessible: Enable accessible output + verbose: Verbosity level (0-3) + curl_path: Custom curl path + dirbuster_tool: Directory busting tool + dirbuster_wordlist: Directory busting wordlist + dirbuster_threads: Directory busting threads + dirbuster_ext: Directory busting extensions + onesixtyone_community_strings: SNMP community strings + global_username_wordlist: Global username wordlist + global_password_wordlist: Global password wordlist + global_domain: Global domain + additional_args: Additional AutoRecon arguments + + Returns: + Comprehensive enumeration results with full configurability + """ + data = { + "target": target, + "target_file": target_file, + "ports": ports, + "output_dir": output_dir, + "max_scans": max_scans, + "max_port_scans": max_port_scans, + "heartbeat": heartbeat, + "timeout": timeout, + "target_timeout": target_timeout, + "config_file": config_file, + "global_file": global_file, + "plugins_dir": plugins_dir, + "add_plugins_dir": add_plugins_dir, + "tags": tags, + "exclude_tags": exclude_tags, + "port_scans": port_scans, + "service_scans": service_scans, + "reports": reports, + "single_target": single_target, + "only_scans_dir": only_scans_dir, + "no_port_dirs": no_port_dirs, + "nmap": nmap, + "nmap_append": nmap_append, + "proxychains": proxychains, + "disable_sanity_checks": disable_sanity_checks, + "disable_keyboard_control": disable_keyboard_control, + "force_services": force_services, + "accessible": accessible, + "verbose": verbose, + "curl_path": curl_path, + "dirbuster_tool": dirbuster_tool, + "dirbuster_wordlist": dirbuster_wordlist, + "dirbuster_threads": dirbuster_threads, + "dirbuster_ext": dirbuster_ext, + "onesixtyone_community_strings": onesixtyone_community_strings, + "global_username_wordlist": global_username_wordlist, + "global_password_wordlist": global_password_wordlist, + "global_domain": global_domain, + "additional_args": additional_args + } + logger.info(f"๐Ÿ” Starting AutoRecon comprehensive enumeration: {target}") + result = hexstrike_client.safe_post("api/tools/autorecon", data) + if result.get("success"): + logger.info(f"โœ… AutoRecon comprehensive enumeration completed for {target}") + else: + logger.error(f"โŒ AutoRecon failed for {target}") + return result + + # ============================================================================ + # SYSTEM MONITORING & TELEMETRY + # ============================================================================ + + @mcp.tool() + def server_health() -> Dict[str, Any]: + """ + Check the health status of the HexStrike AI server. + + Returns: + Server health information with tool availability and telemetry + """ + logger.info(f"๐Ÿฅ Checking HexStrike AI server health") + result = hexstrike_client.check_health() + if result.get("status") == "healthy": + logger.info(f"โœ… Server is healthy - {result.get('total_tools_available', 0)} tools available") + else: + logger.warning(f"โš ๏ธ Server health check returned: {result.get('status', 'unknown')}") + return result + + @mcp.tool() + def get_cache_stats() -> Dict[str, Any]: + """ + Get cache statistics from the HexStrike AI server. + + Returns: + Cache performance statistics + """ + logger.info(f"๐Ÿ’พ Getting cache statistics") + result = hexstrike_client.safe_get("api/cache/stats") + if "hit_rate" in result: + logger.info(f"๐Ÿ“Š Cache hit rate: {result.get('hit_rate', 'unknown')}") + return result + + @mcp.tool() + def clear_cache() -> Dict[str, Any]: + """ + Clear the cache on the HexStrike AI server. + + Returns: + Cache clear operation results + """ + logger.info(f"๐Ÿงน Clearing server cache") + result = hexstrike_client.safe_post("api/cache/clear", {}) + if result.get("success"): + logger.info(f"โœ… Cache cleared successfully") + else: + logger.error(f"โŒ Failed to clear cache") + return result + + @mcp.tool() + def get_telemetry() -> Dict[str, Any]: + """ + Get system telemetry from the HexStrike AI server. + + Returns: + System performance and usage telemetry + """ + logger.info(f"๐Ÿ“ˆ Getting system telemetry") + result = hexstrike_client.safe_get("api/telemetry") + if "commands_executed" in result: + logger.info(f"๐Ÿ“Š Commands executed: {result.get('commands_executed', 0)}") + return result + + # ============================================================================ + # PROCESS MANAGEMENT TOOLS (v5.0 ENHANCEMENT) + # ============================================================================ + + @mcp.tool() + def list_active_processes() -> Dict[str, Any]: + """ + List all active processes on the HexStrike AI server. + + Returns: + List of active processes with their status and progress + """ + logger.info("๐Ÿ“Š Listing active processes") + result = hexstrike_client.safe_get("api/processes/list") + if result.get("success"): + logger.info(f"โœ… Found {result.get('total_count', 0)} active processes") + else: + logger.error("โŒ Failed to list processes") + return result + + @mcp.tool() + def get_process_status(pid: int) -> Dict[str, Any]: + """ + Get the status of a specific process. + + Args: + pid: Process ID to check + + Returns: + Process status information including progress and runtime + """ + logger.info(f"๐Ÿ” Checking status of process {pid}") + result = hexstrike_client.safe_get(f"api/processes/status/{pid}") + if result.get("success"): + logger.info(f"โœ… Process {pid} status retrieved") + else: + logger.error(f"โŒ Process {pid} not found or error occurred") + return result + + @mcp.tool() + def terminate_process(pid: int) -> Dict[str, Any]: + """ + Terminate a specific running process. + + Args: + pid: Process ID to terminate + + Returns: + Success status of the termination operation + """ + logger.info(f"๐Ÿ›‘ Terminating process {pid}") + result = hexstrike_client.safe_post(f"api/processes/terminate/{pid}", {}) + if result.get("success"): + logger.info(f"โœ… Process {pid} terminated successfully") + else: + logger.error(f"โŒ Failed to terminate process {pid}") + return result + + @mcp.tool() + def pause_process(pid: int) -> Dict[str, Any]: + """ + Pause a specific running process. + + Args: + pid: Process ID to pause + + Returns: + Success status of the pause operation + """ + logger.info(f"โธ๏ธ Pausing process {pid}") + result = hexstrike_client.safe_post(f"api/processes/pause/{pid}", {}) + if result.get("success"): + logger.info(f"โœ… Process {pid} paused successfully") + else: + logger.error(f"โŒ Failed to pause process {pid}") + return result + + @mcp.tool() + def resume_process(pid: int) -> Dict[str, Any]: + """ + Resume a paused process. + + Args: + pid: Process ID to resume + + Returns: + Success status of the resume operation + """ + logger.info(f"โ–ถ๏ธ Resuming process {pid}") + result = hexstrike_client.safe_post(f"api/processes/resume/{pid}", {}) + if result.get("success"): + logger.info(f"โœ… Process {pid} resumed successfully") + else: + logger.error(f"โŒ Failed to resume process {pid}") + return result + + @mcp.tool() + def get_process_dashboard() -> Dict[str, Any]: + """ + Get enhanced process dashboard with visual status indicators. + + Returns: + Real-time dashboard with progress bars, system metrics, and process status + """ + logger.info("๐Ÿ“Š Getting process dashboard") + result = hexstrike_client.safe_get("api/processes/dashboard") + if result.get("success", True) and "total_processes" in result: + total = result.get("total_processes", 0) + logger.info(f"โœ… Dashboard retrieved: {total} active processes") + + # Log visual summary for better UX + if total > 0: + logger.info("๐Ÿ“ˆ Active Processes Summary:") + for proc in result.get("processes", [])[:3]: # Show first 3 + logger.info(f" โ”œโ”€ PID {proc['pid']}: {proc['progress_bar']} {proc['progress_percent']}") + else: + logger.error("โŒ Failed to get process dashboard") + return result + + @mcp.tool() + def execute_command(command: str, use_cache: bool = True) -> Dict[str, Any]: + """ + Execute an arbitrary command on the HexStrike AI server with enhanced logging. + + Args: + command: The command to execute + use_cache: Whether to use caching for this command + + Returns: + Command execution results with enhanced telemetry + """ + try: + logger.info(f"โšก Executing command: {command}") + result = hexstrike_client.execute_command(command, use_cache) + if "error" in result: + logger.error(f"โŒ Command failed: {result['error']}") + return { + "success": False, + "error": result["error"], + "stdout": "", + "stderr": f"Error executing command: {result['error']}" + } + + if result.get("success"): + execution_time = result.get("execution_time", 0) + logger.info(f"โœ… Command completed successfully in {execution_time:.2f}s") + else: + logger.warning(f"โš ๏ธ Command completed with errors") + + return result + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error executing command '{command}': {str(e)}") + return { + "success": False, + "error": str(e), + "stdout": "", + "stderr": f"Error executing command: {str(e)}" + } + + return mcp + +def parse_args(): + """Parse command line arguments.""" + parser = argparse.ArgumentParser(description="Run the HexStrike AI MCP Client") + parser.add_argument("--server", type=str, default=DEFAULT_HEXSTRIKE_SERVER, + help=f"HexStrike AI API server URL (default: {DEFAULT_HEXSTRIKE_SERVER})") + parser.add_argument("--timeout", type=int, default=DEFAULT_REQUEST_TIMEOUT, + help=f"Request timeout in seconds (default: {DEFAULT_REQUEST_TIMEOUT})") + parser.add_argument("--debug", action="store_true", help="Enable debug logging") + return parser.parse_args() + +def main(): + """Main entry point for the MCP server.""" + args = parse_args() + + # Configure logging based on debug flag + if args.debug: + logger.setLevel(logging.DEBUG) + logger.debug("๐Ÿ” Debug logging enabled") + + # Print startup banner + banner = f""" +{Colors.RED}{Colors.BOLD} +๐Ÿ”ฅ HexStrike AI MCP Client v4.0 +๐Ÿค– Advanced AI-driven cybersecurity automation +๐Ÿ”— Connecting to: {args.server} +โšก Enhanced features: caching, telemetry, file ops, cloud security +{Colors.RESET} + """ + print(banner) + + try: + # Initialize the HexStrike AI client + hexstrike_client = HexStrikeClient(args.server, args.timeout) + + # Check server health and log the result + health = hexstrike_client.check_health() + if "error" in health: + logger.warning(f"โš ๏ธ Unable to connect to HexStrike AI API server at {args.server}: {health['error']}") + logger.warning("๐Ÿš€ MCP server will start, but tool execution may fail") + else: + logger.info(f"๐ŸŽฏ Successfully connected to HexStrike AI API server at {args.server}") + logger.info(f"๐Ÿฅ Server health status: {health['status']}") + logger.info(f"๐Ÿ“Š Version: {health.get('version', 'unknown')}") + if not health.get("all_essential_tools_available", False): + logger.warning("โš ๏ธ Not all essential tools are available on the HexStrike server") + missing_tools = [tool for tool, available in health.get("tools_status", {}).items() if not available] + if missing_tools: + logger.warning(f"โŒ Missing tools: {', '.join(missing_tools[:5])}{'...' if len(missing_tools) > 5 else ''}") + + # Set up and run the MCP server + mcp = setup_mcp_server(hexstrike_client) + logger.info("๐Ÿš€ Starting HexStrike AI MCP server") + logger.info("๐Ÿค– Ready to serve AI agents with enhanced cybersecurity capabilities") + mcp.run() + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error starting MCP server: {str(e)}") + import traceback + logger.error(traceback.format_exc()) + sys.exit(1) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/hexstrike_server.py b/hexstrike_server.py new file mode 100644 index 0000000..b7c68bc --- /dev/null +++ b/hexstrike_server.py @@ -0,0 +1,3918 @@ +#!/usr/bin/env python3 +import argparse +import json +import logging +import os +import subprocess +import sys +import traceback +import threading +import time +import hashlib +import pickle +from datetime import datetime, timedelta +from typing import Dict, Any, Optional +from collections import OrderedDict +import shutil +import venv +import zipfile +from pathlib import Path +from flask import Flask, request, jsonify +import psutil +import signal + +# ============================================================================ +# PROCESS MANAGEMENT FOR COMMAND TERMINATION (v5.0 ENHANCEMENT) +# ============================================================================ + +# Process management for command termination +active_processes = {} # pid -> process info +process_lock = threading.Lock() + +class ProcessManager: + """Enhanced process manager for command termination and monitoring""" + + @staticmethod + def register_process(pid, command, process_obj): + """Register a new active process""" + with process_lock: + active_processes[pid] = { + "pid": pid, + "command": command, + "process": process_obj, + "start_time": time.time(), + "status": "running", + "progress": 0.0, + "last_output": "", + "bytes_processed": 0 + } + logger.info(f"๐Ÿ†” REGISTERED: Process {pid} - {command[:50]}...") + + @staticmethod + def update_process_progress(pid, progress, last_output="", bytes_processed=0): + """Update process progress and stats""" + with process_lock: + if pid in active_processes: + active_processes[pid]["progress"] = progress + active_processes[pid]["last_output"] = last_output + active_processes[pid]["bytes_processed"] = bytes_processed + runtime = time.time() - active_processes[pid]["start_time"] + + # Calculate ETA if progress > 0 + eta = 0 + if progress > 0: + eta = (runtime / progress) * (1.0 - progress) + + active_processes[pid]["runtime"] = runtime + active_processes[pid]["eta"] = eta + + @staticmethod + def terminate_process(pid): + """Terminate a specific process""" + with process_lock: + if pid in active_processes: + process_info = active_processes[pid] + try: + process_obj = process_info["process"] + if process_obj and process_obj.poll() is None: + process_obj.terminate() + time.sleep(1) # Give it a chance to terminate gracefully + if process_obj.poll() is None: + process_obj.kill() # Force kill if still running + + active_processes[pid]["status"] = "terminated" + logger.warning(f"๐Ÿ›‘ TERMINATED: Process {pid} - {process_info['command'][:50]}...") + return True + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error terminating process {pid}: {str(e)}") + return False + return False + + @staticmethod + def cleanup_process(pid): + """Remove process from active registry""" + with process_lock: + if pid in active_processes: + process_info = active_processes.pop(pid) + logger.info(f"๐Ÿงน CLEANUP: Process {pid} removed from registry") + return process_info + return None + + @staticmethod + def get_process_status(pid): + """Get status of a specific process""" + with process_lock: + return active_processes.get(pid, None) + + @staticmethod + def list_active_processes(): + """List all active processes""" + with process_lock: + return dict(active_processes) + + @staticmethod + def pause_process(pid): + """Pause a specific process (SIGSTOP)""" + with process_lock: + if pid in active_processes: + try: + process_obj = active_processes[pid]["process"] + if process_obj and process_obj.poll() is None: + os.kill(pid, signal.SIGSTOP) + active_processes[pid]["status"] = "paused" + logger.info(f"โธ๏ธ PAUSED: Process {pid}") + return True + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error pausing process {pid}: {str(e)}") + return False + + @staticmethod + def resume_process(pid): + """Resume a paused process (SIGCONT)""" + with process_lock: + if pid in active_processes: + try: + process_obj = active_processes[pid]["process"] + if process_obj and process_obj.poll() is None: + os.kill(pid, signal.SIGCONT) + active_processes[pid]["status"] = "running" + logger.info(f"โ–ถ๏ธ RESUMED: Process {pid}") + return True + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error resuming process {pid}: {str(e)}") + return False + +# HexStrike AI Banner +BANNER = r""" +โ–ˆโ–ˆโ•— โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•— โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•—โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•—โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•— โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— โ–ˆโ–ˆโ•— +โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ•”โ•โ•โ•โ•โ•โ•šโ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•”โ•โ–ˆโ–ˆโ•”โ•โ•โ•โ•โ•โ•šโ•โ•โ–ˆโ–ˆโ•”โ•โ•โ•โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•”โ•โ–ˆโ–ˆโ•”โ•โ•โ•โ•โ• โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•‘ +โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— โ•šโ–ˆโ–ˆโ–ˆโ•”โ• โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•”โ•โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•”โ• โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ•‘ +โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ•”โ•โ•โ• โ–ˆโ–ˆโ•”โ–ˆโ–ˆโ•— โ•šโ•โ•โ•โ•โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ•”โ•โ–ˆโ–ˆโ•— โ–ˆโ–ˆโ•”โ•โ•โ• โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ•‘ +โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•”โ• โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ•‘ +โ•šโ•โ• โ•šโ•โ•โ•šโ•โ•โ•โ•โ•โ•โ•โ•šโ•โ• โ•šโ•โ•โ•šโ•โ•โ•โ•โ•โ•โ• โ•šโ•โ• โ•šโ•โ• โ•šโ•โ•โ•šโ•โ•โ•šโ•โ• โ•šโ•โ•โ•šโ•โ•โ•โ•โ•โ•โ• โ•šโ•โ• โ•šโ•โ•โ•šโ•โ• + + HexStrike AI - Advanced Cybersecurity Automation Platform + Developed by: 0x4m4 (www.0x4m4.com) (www.hexstrike.com) + Version: 5.0.0 Agents + + ๐Ÿš€ Powerful AI-driven API Server for offensive security tools + ๐Ÿ›ก๏ธ Advanced caching, real-time telemetry, and enhanced logging + โšก Cloud security, file operations, and environment management + ๐ŸŽฏ Process management, CTF tools, and bug bounty arsenal + +""" + +# Color codes for terminal output +class Colors: + RED = '\033[91m' + GREEN = '\033[92m' + YELLOW = '\033[93m' + BLUE = '\033[94m' + MAGENTA = '\033[95m' + CYAN = '\033[96m' + WHITE = '\033[97m' + BOLD = '\033[1m' + RESET = '\033[0m' + +# Configure enhanced logging with colors +class ColoredFormatter(logging.Formatter): + """Custom formatter with colors and emojis""" + + COLORS = { + 'DEBUG': Colors.CYAN, + 'INFO': Colors.GREEN, + 'WARNING': Colors.YELLOW, + 'ERROR': Colors.RED, + 'CRITICAL': Colors.MAGENTA + Colors.BOLD + } + + EMOJIS = { + 'DEBUG': '๐Ÿ”', + 'INFO': 'โœ…', + 'WARNING': 'โš ๏ธ', + 'ERROR': 'โŒ', + 'CRITICAL': '๐Ÿ”ฅ' + } + + def format(self, record): + emoji = self.EMOJIS.get(record.levelname, '๐Ÿ“') + color = self.COLORS.get(record.levelname, Colors.WHITE) + + # Add color and emoji to the message + record.msg = f"{color}{emoji} {record.msg}{Colors.RESET}" + return super().format(record) + +# Enhanced logging setup +def setup_logging(): + """Setup enhanced logging with colors and formatting""" + logger = logging.getLogger() + logger.setLevel(logging.INFO) + + # Clear existing handlers + for handler in logger.handlers[:]: + logger.removeHandler(handler) + + # Console handler with colors + console_handler = logging.StreamHandler(sys.stdout) + console_handler.setFormatter(ColoredFormatter( + "[๐Ÿ”ฅ HexStrike AI] %(asctime)s [%(levelname)s] %(message)s", + datefmt="%Y-%m-%d %H:%M:%S" + )) + logger.addHandler(console_handler) + + return logger + +logger = setup_logging() + +# Configuration +API_PORT = int(os.environ.get("API_PORT", 5000)) +DEBUG_MODE = os.environ.get("DEBUG_MODE", "0").lower() in ("1", "true", "yes", "y") +COMMAND_TIMEOUT = 300 # 5 minutes default timeout +CACHE_SIZE = 1000 +CACHE_TTL = 3600 # 1 hour + +app = Flask(__name__) + +class HexStrikeCache: + """Advanced caching system for command results""" + + def __init__(self, max_size: int = CACHE_SIZE, ttl: int = CACHE_TTL): + self.cache = OrderedDict() + self.max_size = max_size + self.ttl = ttl + self.stats = {"hits": 0, "misses": 0, "evictions": 0} + + def _generate_key(self, command: str, params: Dict[str, Any]) -> str: + """Generate cache key from command and parameters""" + key_data = f"{command}:{json.dumps(params, sort_keys=True)}" + return hashlib.md5(key_data.encode()).hexdigest() + + def _is_expired(self, timestamp: float) -> bool: + """Check if cache entry is expired""" + return time.time() - timestamp > self.ttl + + def get(self, command: str, params: Dict[str, Any]) -> Optional[Dict[str, Any]]: + """Get cached result if available and not expired""" + key = self._generate_key(command, params) + + if key in self.cache: + timestamp, data = self.cache[key] + if not self._is_expired(timestamp): + # Move to end (most recently used) + self.cache.move_to_end(key) + self.stats["hits"] += 1 + logger.info(f"๐Ÿ’พ Cache HIT for command: {command}") + return data + else: + # Remove expired entry + del self.cache[key] + + self.stats["misses"] += 1 + logger.info(f"๐Ÿ” Cache MISS for command: {command}") + return None + + def set(self, command: str, params: Dict[str, Any], result: Dict[str, Any]): + """Store result in cache""" + key = self._generate_key(command, params) + + # Remove oldest entries if cache is full + while len(self.cache) >= self.max_size: + oldest_key = next(iter(self.cache)) + del self.cache[oldest_key] + self.stats["evictions"] += 1 + + self.cache[key] = (time.time(), result) + logger.info(f"๐Ÿ’พ Cached result for command: {command}") + + def get_stats(self) -> Dict[str, Any]: + """Get cache statistics""" + total_requests = self.stats["hits"] + self.stats["misses"] + hit_rate = (self.stats["hits"] / total_requests * 100) if total_requests > 0 else 0 + + return { + "size": len(self.cache), + "max_size": self.max_size, + "hit_rate": f"{hit_rate:.1f}%", + "hits": self.stats["hits"], + "misses": self.stats["misses"], + "evictions": self.stats["evictions"] + } + +# Global cache instance +cache = HexStrikeCache() + +class TelemetryCollector: + """Collect and manage system telemetry""" + + def __init__(self): + self.stats = { + "commands_executed": 0, + "successful_commands": 0, + "failed_commands": 0, + "total_execution_time": 0.0, + "start_time": time.time() + } + + def record_execution(self, success: bool, execution_time: float): + """Record command execution statistics""" + self.stats["commands_executed"] += 1 + if success: + self.stats["successful_commands"] += 1 + else: + self.stats["failed_commands"] += 1 + self.stats["total_execution_time"] += execution_time + + def get_system_metrics(self) -> Dict[str, Any]: + """Get current system metrics""" + return { + "cpu_percent": psutil.cpu_percent(interval=1), + "memory_percent": psutil.virtual_memory().percent, + "disk_usage": psutil.disk_usage('/').percent, + "network_io": psutil.net_io_counters()._asdict() if psutil.net_io_counters() else {} + } + + def get_stats(self) -> Dict[str, Any]: + """Get telemetry statistics""" + uptime = time.time() - self.stats["start_time"] + success_rate = (self.stats["successful_commands"] / self.stats["commands_executed"] * 100) if self.stats["commands_executed"] > 0 else 0 + avg_execution_time = (self.stats["total_execution_time"] / self.stats["commands_executed"]) if self.stats["commands_executed"] > 0 else 0 + + return { + "uptime_seconds": uptime, + "commands_executed": self.stats["commands_executed"], + "success_rate": f"{success_rate:.1f}%", + "average_execution_time": f"{avg_execution_time:.2f}s", + "system_metrics": self.get_system_metrics() + } + +# Global telemetry collector +telemetry = TelemetryCollector() + +class EnhancedCommandExecutor: + """Enhanced command executor with caching, progress tracking, and better output handling""" + + def __init__(self, command: str, timeout: int = COMMAND_TIMEOUT): + self.command = command + self.timeout = timeout + self.process = None + self.stdout_data = "" + self.stderr_data = "" + self.stdout_thread = None + self.stderr_thread = None + self.return_code = None + self.timed_out = False + self.start_time = None + self.end_time = None + + def _read_stdout(self): + """Thread function to continuously read and display stdout""" + try: + for line in iter(self.process.stdout.readline, ''): + if line: + self.stdout_data += line + # Real-time output display + logger.info(f"๐Ÿ“ค STDOUT: {line.strip()}") + except Exception as e: + logger.error(f"Error reading stdout: {e}") + + def _read_stderr(self): + """Thread function to continuously read and display stderr""" + try: + for line in iter(self.process.stderr.readline, ''): + if line: + self.stderr_data += line + # Real-time error output display + logger.warning(f"๐Ÿ“ฅ STDERR: {line.strip()}") + except Exception as e: + logger.error(f"Error reading stderr: {e}") + + def _show_progress(self, duration: float): + """Show enhanced progress indication for long-running commands""" + if duration > 2: # Show progress for commands taking more than 2 seconds + progress_chars = ['โฃพ', 'โฃฝ', 'โฃป', 'โขฟ', 'โกฟ', 'โฃŸ', 'โฃฏ', 'โฃท'] + start = time.time() + i = 0 + while self.process and self.process.poll() is None: + elapsed = time.time() - start + char = progress_chars[i % len(progress_chars)] + + # Calculate progress percentage (rough estimate) + progress_percent = min((elapsed / self.timeout) * 100, 99.9) + + # Create progress bar + bar_length = 20 + filled_length = int(bar_length * progress_percent / 100) + bar = 'โ–ˆ' * filled_length + 'โ–‘' * (bar_length - filled_length) + + # Calculate ETA + if progress_percent > 5: # Only show ETA after 5% progress + eta = ((elapsed / progress_percent) * 100) - elapsed + eta_str = f" | ETA: {eta:.0f}s" + else: + eta_str = " | ETA: Calculating..." + + # Update process manager with progress + ProcessManager.update_process_progress( + self.process.pid, + progress_percent / 100, + f"Running for {elapsed:.1f}s", + len(self.stdout_data) + len(self.stderr_data) + ) + + logger.info(f"โšก PROGRESS {char} [{bar}] {progress_percent:.1f}% | {elapsed:.1f}s{eta_str} | PID: {self.process.pid}") + time.sleep(0.8) + i += 1 + if elapsed > self.timeout: + break + + def execute(self) -> Dict[str, Any]: + """Execute the command with enhanced monitoring and output""" + self.start_time = time.time() + + logger.info(f"๐Ÿš€ EXECUTING: {self.command}") + logger.info(f"โฑ๏ธ TIMEOUT: {self.timeout}s | PID: Starting...") + + try: + self.process = subprocess.Popen( + self.command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + bufsize=1 + ) + + pid = self.process.pid + logger.info(f"๐Ÿ†” PROCESS: PID {pid} started") + + # Register process with ProcessManager (v5.0 enhancement) + ProcessManager.register_process(pid, self.command, self.process) + + # Start threads to read output continuously + self.stdout_thread = threading.Thread(target=self._read_stdout) + self.stderr_thread = threading.Thread(target=self._read_stderr) + self.stdout_thread.daemon = True + self.stderr_thread.daemon = True + self.stdout_thread.start() + self.stderr_thread.start() + + # Start progress tracking in a separate thread + progress_thread = threading.Thread(target=self._show_progress, args=(self.timeout,)) + progress_thread.daemon = True + progress_thread.start() + + # Wait for the process to complete or timeout + try: + self.return_code = self.process.wait(timeout=self.timeout) + self.end_time = time.time() + + # Process completed, join the threads + self.stdout_thread.join(timeout=1) + self.stderr_thread.join(timeout=1) + + execution_time = self.end_time - self.start_time + + # Cleanup process from registry (v5.0 enhancement) + ProcessManager.cleanup_process(pid) + + if self.return_code == 0: + logger.info(f"โœ… SUCCESS: Command completed | Exit Code: {self.return_code} | Duration: {execution_time:.2f}s") + telemetry.record_execution(True, execution_time) + else: + logger.warning(f"โš ๏ธ WARNING: Command completed with errors | Exit Code: {self.return_code} | Duration: {execution_time:.2f}s") + telemetry.record_execution(False, execution_time) + + except subprocess.TimeoutExpired: + self.end_time = time.time() + execution_time = self.end_time - self.start_time + + # Process timed out but we might have partial results + self.timed_out = True + logger.warning(f"โฐ TIMEOUT: Command timed out after {self.timeout}s | Terminating PID {self.process.pid}") + + # Try to terminate gracefully first + self.process.terminate() + try: + self.process.wait(timeout=5) + except subprocess.TimeoutExpired: + # Force kill if it doesn't terminate + logger.error(f"๐Ÿ”ช FORCE KILL: Process {self.process.pid} not responding to termination") + self.process.kill() + + self.return_code = -1 + telemetry.record_execution(False, execution_time) + + # Always consider it a success if we have output, even with timeout + success = True if self.timed_out and (self.stdout_data or self.stderr_data) else (self.return_code == 0) + + # Log enhanced final results with summary + output_size = len(self.stdout_data) + len(self.stderr_data) + execution_time = self.end_time - self.start_time if self.end_time else 0 + + # Create status summary + status_icon = "โœ…" if success else "โŒ" + timeout_status = " [TIMEOUT]" if self.timed_out else "" + + logger.info(f"๐Ÿ“Š FINAL RESULTS {status_icon}") + logger.info(f" โ”œโ”€ Command: {self.command[:60]}{'...' if len(self.command) > 60 else ''}") + logger.info(f" โ”œโ”€ Duration: {execution_time:.2f}s{timeout_status}") + logger.info(f" โ”œโ”€ Output Size: {output_size} bytes") + logger.info(f" โ”œโ”€ Exit Code: {self.return_code}") + logger.info(f" โ””โ”€ Status: {'SUCCESS' if success else 'FAILED'} | Cached: Yes") + + return { + "stdout": self.stdout_data, + "stderr": self.stderr_data, + "return_code": self.return_code, + "success": success, + "timed_out": self.timed_out, + "partial_results": self.timed_out and (self.stdout_data or self.stderr_data), + "execution_time": self.end_time - self.start_time if self.end_time else 0, + "timestamp": datetime.now().isoformat() + } + + except Exception as e: + self.end_time = time.time() + execution_time = self.end_time - self.start_time if self.start_time else 0 + + logger.error(f"๐Ÿ’ฅ ERROR: Command execution failed: {str(e)}") + logger.error(f"๐Ÿ” TRACEBACK: {traceback.format_exc()}") + telemetry.record_execution(False, execution_time) + + return { + "stdout": self.stdout_data, + "stderr": f"Error executing command: {str(e)}\n{self.stderr_data}", + "return_code": -1, + "success": False, + "timed_out": False, + "partial_results": bool(self.stdout_data or self.stderr_data), + "execution_time": execution_time, + "timestamp": datetime.now().isoformat() + } + +class PythonEnvironmentManager: + """Manage Python virtual environments and dependencies""" + + def __init__(self, base_dir: str = "/tmp/hexstrike_envs"): + self.base_dir = Path(base_dir) + self.base_dir.mkdir(exist_ok=True) + + def create_venv(self, env_name: str) -> Path: + """Create a new virtual environment""" + env_path = self.base_dir / env_name + if not env_path.exists(): + logger.info(f"๐Ÿ Creating virtual environment: {env_name}") + venv.create(env_path, with_pip=True) + return env_path + + def install_package(self, env_name: str, package: str) -> bool: + """Install a package in the specified environment""" + env_path = self.create_venv(env_name) + pip_path = env_path / "bin" / "pip" + + try: + result = subprocess.run([str(pip_path), "install", package], + capture_output=True, text=True, timeout=300) + if result.returncode == 0: + logger.info(f"๐Ÿ“ฆ Installed package {package} in {env_name}") + return True + else: + logger.error(f"โŒ Failed to install {package}: {result.stderr}") + return False + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error installing package {package}: {e}") + return False + + def get_python_path(self, env_name: str) -> str: + """Get Python executable path for environment""" + env_path = self.create_venv(env_name) + return str(env_path / "bin" / "python") + +# Global environment manager +env_manager = PythonEnvironmentManager() + +def execute_command(command: str, use_cache: bool = True) -> Dict[str, Any]: + """ + Execute a shell command with enhanced features + + Args: + command: The command to execute + use_cache: Whether to use caching for this command + + Returns: + A dictionary containing the stdout, stderr, return code, and metadata + """ + + # Check cache first + if use_cache: + cached_result = cache.get(command, {}) + if cached_result: + return cached_result + + # Execute command + executor = EnhancedCommandExecutor(command) + result = executor.execute() + + # Cache successful results + if use_cache and result.get("success", False): + cache.set(command, {}, result) + + return result + +# File Operations Manager +class FileOperationsManager: + """Handle file operations with security and validation""" + + def __init__(self, base_dir: str = "/tmp/hexstrike_files"): + self.base_dir = Path(base_dir) + self.base_dir.mkdir(exist_ok=True) + self.max_file_size = 100 * 1024 * 1024 # 100MB + + def create_file(self, filename: str, content: str, binary: bool = False) -> Dict[str, Any]: + """Create a file with the specified content""" + try: + file_path = self.base_dir / filename + file_path.parent.mkdir(parents=True, exist_ok=True) + + if len(content.encode()) > self.max_file_size: + return {"success": False, "error": f"File size exceeds {self.max_file_size} bytes"} + + mode = "wb" if binary else "w" + with open(file_path, mode) as f: + if binary: + f.write(content.encode() if isinstance(content, str) else content) + else: + f.write(content) + + logger.info(f"๐Ÿ“„ Created file: {filename} ({len(content)} bytes)") + return {"success": True, "path": str(file_path), "size": len(content)} + + except Exception as e: + logger.error(f"โŒ Error creating file {filename}: {e}") + return {"success": False, "error": str(e)} + + def modify_file(self, filename: str, content: str, append: bool = False) -> Dict[str, Any]: + """Modify an existing file""" + try: + file_path = self.base_dir / filename + if not file_path.exists(): + return {"success": False, "error": "File does not exist"} + + mode = "a" if append else "w" + with open(file_path, mode) as f: + f.write(content) + + logger.info(f"โœ๏ธ Modified file: {filename}") + return {"success": True, "path": str(file_path)} + + except Exception as e: + logger.error(f"โŒ Error modifying file {filename}: {e}") + return {"success": False, "error": str(e)} + + def delete_file(self, filename: str) -> Dict[str, Any]: + """Delete a file or directory""" + try: + file_path = self.base_dir / filename + if not file_path.exists(): + return {"success": False, "error": "File does not exist"} + + if file_path.is_dir(): + shutil.rmtree(file_path) + else: + file_path.unlink() + + logger.info(f"๐Ÿ—‘๏ธ Deleted: {filename}") + return {"success": True} + + except Exception as e: + logger.error(f"โŒ Error deleting {filename}: {e}") + return {"success": False, "error": str(e)} + + def list_files(self, directory: str = ".") -> Dict[str, Any]: + """List files in a directory""" + try: + dir_path = self.base_dir / directory + if not dir_path.exists(): + return {"success": False, "error": "Directory does not exist"} + + files = [] + for item in dir_path.iterdir(): + files.append({ + "name": item.name, + "type": "directory" if item.is_dir() else "file", + "size": item.stat().st_size if item.is_file() else 0, + "modified": datetime.fromtimestamp(item.stat().st_mtime).isoformat() + }) + + return {"success": True, "files": files} + + except Exception as e: + logger.error(f"โŒ Error listing files in {directory}: {e}") + return {"success": False, "error": str(e)} + +# Global file operations manager +file_manager = FileOperationsManager() + +# API Routes + +@app.route("/health", methods=["GET"]) +def health_check(): + """Enhanced health check endpoint with telemetry""" + essential_tools = ["nmap", "gobuster", "dirb", "nikto", "sqlmap", "hydra", "john"] + cloud_tools = ["prowler", "scout2", "trivy", "kube-hunter", "cloudsploit"] + advanced_tools = [ + "ffuf", "nuclei", "nxc", "amass", "hashcat", "subfinder", + "smbmap", "volatility", "msfvenom", "msfconsole", "enum4linux", "wpscan", + "burpsuite", "zaproxy" + ] + + all_tools = essential_tools + cloud_tools + advanced_tools + tools_status = {} + + for tool in all_tools: + try: + result = execute_command(f"which {tool}", use_cache=True) + tools_status[tool] = result["success"] + except: + tools_status[tool] = False + + all_essential_tools_available = all(tools_status[tool] for tool in essential_tools) + + return jsonify({ + "status": "healthy", + "message": "HexStrike AI Tools API Server is operational", + "version": "5.0.0", + "tools_status": tools_status, + "all_essential_tools_available": all_essential_tools_available, + "total_tools_available": sum(1 for tool, available in tools_status.items() if available), + "total_tools_count": len(all_tools), + "cache_stats": cache.get_stats(), + "telemetry": telemetry.get_stats(), + "uptime": time.time() - telemetry.stats["start_time"] + }) + +@app.route("/api/command", methods=["POST"]) +def generic_command(): + """Execute any command provided in the request with enhanced logging""" + try: + params = request.json + command = params.get("command", "") + use_cache = params.get("use_cache", True) + + if not command: + logger.warning("โš ๏ธ Command endpoint called without command parameter") + return jsonify({ + "error": "Command parameter is required" + }), 400 + + result = execute_command(command, use_cache=use_cache) + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in command endpoint: {str(e)}") + logger.error(traceback.format_exc()) + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +# File Operations API Endpoints + +@app.route("/api/files/create", methods=["POST"]) +def create_file(): + """Create a new file""" + try: + params = request.json + filename = params.get("filename", "") + content = params.get("content", "") + binary = params.get("binary", False) + + if not filename: + return jsonify({"error": "Filename is required"}), 400 + + result = file_manager.create_file(filename, content, binary) + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error creating file: {str(e)}") + return jsonify({"error": f"Server error: {str(e)}"}), 500 + +@app.route("/api/files/modify", methods=["POST"]) +def modify_file(): + """Modify an existing file""" + try: + params = request.json + filename = params.get("filename", "") + content = params.get("content", "") + append = params.get("append", False) + + if not filename: + return jsonify({"error": "Filename is required"}), 400 + + result = file_manager.modify_file(filename, content, append) + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error modifying file: {str(e)}") + return jsonify({"error": f"Server error: {str(e)}"}), 500 + +@app.route("/api/files/delete", methods=["DELETE"]) +def delete_file(): + """Delete a file or directory""" + try: + params = request.json + filename = params.get("filename", "") + + if not filename: + return jsonify({"error": "Filename is required"}), 400 + + result = file_manager.delete_file(filename) + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error deleting file: {str(e)}") + return jsonify({"error": f"Server error: {str(e)}"}), 500 + +@app.route("/api/files/list", methods=["GET"]) +def list_files(): + """List files in a directory""" + try: + directory = request.args.get("directory", ".") + result = file_manager.list_files(directory) + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error listing files: {str(e)}") + return jsonify({"error": f"Server error: {str(e)}"}), 500 + +# Payload Generation Endpoint +@app.route("/api/payloads/generate", methods=["POST"]) +def generate_payload(): + """Generate large payloads for testing""" + try: + params = request.json + payload_type = params.get("type", "buffer") + size = params.get("size", 1024) + pattern = params.get("pattern", "A") + filename = params.get("filename", f"payload_{int(time.time())}") + + if size > 100 * 1024 * 1024: # 100MB limit + return jsonify({"error": "Payload size too large (max 100MB)"}), 400 + + if payload_type == "buffer": + content = pattern * (size // len(pattern)) + elif payload_type == "cyclic": + # Generate cyclic pattern + alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + content = "" + for i in range(size): + content += alphabet[i % len(alphabet)] + elif payload_type == "random": + import random + import string + content = ''.join(random.choices(string.ascii_letters + string.digits, k=size)) + else: + return jsonify({"error": "Invalid payload type"}), 400 + + result = file_manager.create_file(filename, content) + result["payload_info"] = { + "type": payload_type, + "size": size, + "pattern": pattern + } + + logger.info(f"๐ŸŽฏ Generated {payload_type} payload: {filename} ({size} bytes)") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error generating payload: {str(e)}") + return jsonify({"error": f"Server error: {str(e)}"}), 500 + +# Cache Management Endpoint +@app.route("/api/cache/stats", methods=["GET"]) +def cache_stats(): + """Get cache statistics""" + return jsonify(cache.get_stats()) + +@app.route("/api/cache/clear", methods=["POST"]) +def clear_cache(): + """Clear the cache""" + cache.cache.clear() + cache.stats = {"hits": 0, "misses": 0, "evictions": 0} + logger.info("๐Ÿงน Cache cleared") + return jsonify({"success": True, "message": "Cache cleared"}) + +# Telemetry Endpoint +@app.route("/api/telemetry", methods=["GET"]) +def get_telemetry(): + """Get system telemetry""" + return jsonify(telemetry.get_stats()) + +# ============================================================================ +# PROCESS MANAGEMENT API ENDPOINTS (v5.0 ENHANCEMENT) +# ============================================================================ + +@app.route("/api/processes/list", methods=["GET"]) +def list_processes(): + """List all active processes""" + try: + processes = ProcessManager.list_active_processes() + + # Add calculated fields for each process + for pid, info in processes.items(): + runtime = time.time() - info["start_time"] + info["runtime_formatted"] = f"{runtime:.1f}s" + + if info["progress"] > 0: + eta = (runtime / info["progress"]) * (1.0 - info["progress"]) + info["eta_formatted"] = f"{eta:.1f}s" + else: + info["eta_formatted"] = "Unknown" + + return jsonify({ + "success": True, + "active_processes": processes, + "total_count": len(processes) + }) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error listing processes: {str(e)}") + return jsonify({"error": f"Server error: {str(e)}"}), 500 + +@app.route("/api/processes/status/", methods=["GET"]) +def get_process_status(pid): + """Get status of a specific process""" + try: + process_info = ProcessManager.get_process_status(pid) + + if process_info: + # Add calculated fields + runtime = time.time() - process_info["start_time"] + process_info["runtime_formatted"] = f"{runtime:.1f}s" + + if process_info["progress"] > 0: + eta = (runtime / process_info["progress"]) * (1.0 - process_info["progress"]) + process_info["eta_formatted"] = f"{eta:.1f}s" + else: + process_info["eta_formatted"] = "Unknown" + + return jsonify({ + "success": True, + "process": process_info + }) + else: + return jsonify({ + "success": False, + "error": f"Process {pid} not found" + }), 404 + + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error getting process status: {str(e)}") + return jsonify({"error": f"Server error: {str(e)}"}), 500 + +@app.route("/api/processes/terminate/", methods=["POST"]) +def terminate_process(pid): + """Terminate a specific process""" + try: + success = ProcessManager.terminate_process(pid) + + if success: + logger.info(f"๐Ÿ›‘ Process {pid} terminated successfully") + return jsonify({ + "success": True, + "message": f"Process {pid} terminated successfully" + }) + else: + return jsonify({ + "success": False, + "error": f"Failed to terminate process {pid} or process not found" + }), 404 + + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error terminating process {pid}: {str(e)}") + return jsonify({"error": f"Server error: {str(e)}"}), 500 + +@app.route("/api/processes/pause/", methods=["POST"]) +def pause_process(pid): + """Pause a specific process""" + try: + success = ProcessManager.pause_process(pid) + + if success: + logger.info(f"โธ๏ธ Process {pid} paused successfully") + return jsonify({ + "success": True, + "message": f"Process {pid} paused successfully" + }) + else: + return jsonify({ + "success": False, + "error": f"Failed to pause process {pid} or process not found" + }), 404 + + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error pausing process {pid}: {str(e)}") + return jsonify({"error": f"Server error: {str(e)}"}), 500 + +@app.route("/api/processes/resume/", methods=["POST"]) +def resume_process(pid): + """Resume a paused process""" + try: + success = ProcessManager.resume_process(pid) + + if success: + logger.info(f"โ–ถ๏ธ Process {pid} resumed successfully") + return jsonify({ + "success": True, + "message": f"Process {pid} resumed successfully" + }) + else: + return jsonify({ + "success": False, + "error": f"Failed to resume process {pid} or process not found" + }), 404 + + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error resuming process {pid}: {str(e)}") + return jsonify({"error": f"Server error: {str(e)}"}), 500 + +@app.route("/api/processes/dashboard", methods=["GET"]) +def process_dashboard(): + """Get enhanced process dashboard with visual status""" + try: + processes = ProcessManager.list_active_processes() + current_time = time.time() + + dashboard = { + "timestamp": datetime.now().isoformat(), + "total_processes": len(processes), + "processes": [], + "system_load": { + "cpu_percent": psutil.cpu_percent(interval=1), + "memory_percent": psutil.virtual_memory().percent, + "active_connections": len(psutil.net_connections()) + } + } + + for pid, info in processes.items(): + runtime = current_time - info["start_time"] + progress_percent = info.get("progress", 0) * 100 + + # Create visual progress bar + bar_length = 20 + filled_length = int(bar_length * progress_percent / 100) + progress_bar = 'โ–ˆ' * filled_length + 'โ–‘' * (bar_length - filled_length) + + # Calculate ETA + if progress_percent > 5: + eta = ((runtime / progress_percent) * 100) - runtime + eta_str = f"{eta:.0f}s" + else: + eta_str = "Calculating..." + + process_status = { + "pid": pid, + "command": info["command"][:60] + "..." if len(info["command"]) > 60 else info["command"], + "status": info["status"], + "runtime": f"{runtime:.1f}s", + "progress_percent": f"{progress_percent:.1f}%", + "progress_bar": progress_bar, + "eta": eta_str, + "bytes_processed": info.get("bytes_processed", 0), + "last_output": info.get("last_output", "")[:100] + } + dashboard["processes"].append(process_status) + + return jsonify(dashboard) + + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error getting process dashboard: {str(e)}") + return jsonify({"error": f"Server error: {str(e)}"}), 500 + +# ============================================================================ +# SECURITY TOOLS API ENDPOINTS +# ============================================================================ + +@app.route("/api/tools/nmap", methods=["POST"]) +def nmap(): + """Execute nmap scan with enhanced logging and caching""" + try: + params = request.json + target = params.get("target", "") + scan_type = params.get("scan_type", "-sCV") + ports = params.get("ports", "") + additional_args = params.get("additional_args", "-T4 -Pn") + + if not target: + logger.warning("๐ŸŽฏ Nmap called without target parameter") + return jsonify({ + "error": "Target parameter is required" + }), 400 + + command = f"nmap {scan_type}" + + if ports: + command += f" -p {ports}" + + if additional_args: + command += f" {additional_args}" + + command += f" {target}" + + logger.info(f"๐Ÿ” Starting Nmap scan: {target}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š Nmap scan completed for {target}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in nmap endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/gobuster", methods=["POST"]) +def gobuster(): + """Execute gobuster with enhanced logging""" + try: + params = request.json + url = params.get("url", "") + mode = params.get("mode", "dir") + wordlist = params.get("wordlist", "/usr/share/wordlists/dirb/common.txt") + additional_args = params.get("additional_args", "") + + if not url: + logger.warning("๐ŸŒ Gobuster called without URL parameter") + return jsonify({ + "error": "URL parameter is required" + }), 400 + + # Validate mode + if mode not in ["dir", "dns", "fuzz", "vhost"]: + logger.warning(f"โŒ Invalid gobuster mode: {mode}") + return jsonify({ + "error": f"Invalid mode: {mode}. Must be one of: dir, dns, fuzz, vhost" + }), 400 + + command = f"gobuster {mode} -u {url} -w {wordlist}" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ“ Starting Gobuster {mode} scan: {url}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š Gobuster scan completed for {url}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in gobuster endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/nuclei", methods=["POST"]) +def nuclei(): + """Execute Nuclei vulnerability scanner with enhanced logging""" + try: + params = request.json + target = params.get("target", "") + severity = params.get("severity", "") + tags = params.get("tags", "") + template = params.get("template", "") + additional_args = params.get("additional_args", "") + + if not target: + logger.warning("๐ŸŽฏ Nuclei called without target parameter") + return jsonify({ + "error": "Target parameter is required" + }), 400 + + command = f"nuclei -u {target}" + + if severity: + command += f" -severity {severity}" + + if tags: + command += f" -tags {tags}" + + if template: + command += f" -t {template}" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ”ฌ Starting Nuclei vulnerability scan: {target}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š Nuclei scan completed for {target}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in nuclei endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +# ============================================================================ +# CLOUD SECURITY TOOLS +# ============================================================================ + +@app.route("/api/tools/prowler", methods=["POST"]) +def prowler(): + """Execute Prowler for AWS security assessment""" + try: + params = request.json + provider = params.get("provider", "aws") + profile = params.get("profile", "default") + region = params.get("region", "") + checks = params.get("checks", "") + output_dir = params.get("output_dir", "/tmp/prowler_output") + output_format = params.get("output_format", "json") + additional_args = params.get("additional_args", "") + + # Ensure output directory exists + Path(output_dir).mkdir(parents=True, exist_ok=True) + + command = f"prowler {provider}" + + if profile: + command += f" --profile {profile}" + + if region: + command += f" --region {region}" + + if checks: + command += f" --checks {checks}" + + command += f" --output-directory {output_dir}" + command += f" --output-format {output_format}" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"โ˜๏ธ Starting Prowler {provider} security assessment") + result = execute_command(command) + result["output_directory"] = output_dir + logger.info(f"๐Ÿ“Š Prowler assessment completed") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in prowler endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/trivy", methods=["POST"]) +def trivy(): + """Execute Trivy for container/filesystem vulnerability scanning""" + try: + params = request.json + scan_type = params.get("scan_type", "image") # image, fs, repo + target = params.get("target", "") + output_format = params.get("output_format", "json") + severity = params.get("severity", "") + output_file = params.get("output_file", "") + additional_args = params.get("additional_args", "") + + if not target: + logger.warning("๐ŸŽฏ Trivy called without target parameter") + return jsonify({ + "error": "Target parameter is required" + }), 400 + + command = f"trivy {scan_type} {target}" + + if output_format: + command += f" --format {output_format}" + + if severity: + command += f" --severity {severity}" + + if output_file: + command += f" --output {output_file}" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ” Starting Trivy {scan_type} scan: {target}") + result = execute_command(command) + if output_file: + result["output_file"] = output_file + logger.info(f"๐Ÿ“Š Trivy scan completed for {target}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in trivy endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/dirb", methods=["POST"]) +def dirb(): + """Execute dirb with enhanced logging""" + try: + params = request.json + url = params.get("url", "") + wordlist = params.get("wordlist", "/usr/share/wordlists/dirb/common.txt") + additional_args = params.get("additional_args", "") + + if not url: + logger.warning("๐ŸŒ Dirb called without URL parameter") + return jsonify({ + "error": "URL parameter is required" + }), 400 + + command = f"dirb {url} {wordlist}" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ“ Starting Dirb scan: {url}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š Dirb scan completed for {url}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in dirb endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/nikto", methods=["POST"]) +def nikto(): + """Execute nikto with enhanced logging""" + try: + params = request.json + target = params.get("target", "") + additional_args = params.get("additional_args", "") + + if not target: + logger.warning("๐ŸŽฏ Nikto called without target parameter") + return jsonify({ + "error": "Target parameter is required" + }), 400 + + command = f"nikto -h {target}" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ”ฌ Starting Nikto scan: {target}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š Nikto scan completed for {target}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in nikto endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/sqlmap", methods=["POST"]) +def sqlmap(): + """Execute sqlmap with enhanced logging""" + try: + params = request.json + url = params.get("url", "") + data = params.get("data", "") + additional_args = params.get("additional_args", "") + + if not url: + logger.warning("๐ŸŽฏ SQLMap called without URL parameter") + return jsonify({ + "error": "URL parameter is required" + }), 400 + + command = f"sqlmap -u {url} --batch" + + if data: + command += f" --data=\"{data}\"" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ’‰ Starting SQLMap scan: {url}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š SQLMap scan completed for {url}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in sqlmap endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/metasploit", methods=["POST"]) +def metasploit(): + """Execute metasploit module with enhanced logging""" + try: + params = request.json + module = params.get("module", "") + options = params.get("options", {}) + + if not module: + logger.warning("๐Ÿš€ Metasploit called without module parameter") + return jsonify({ + "error": "Module parameter is required" + }), 400 + + # Create an MSF resource script + resource_content = f"use {module}\n" + for key, value in options.items(): + resource_content += f"set {key} {value}\n" + resource_content += "exploit\n" + + # Save resource script to a temporary file + resource_file = "/tmp/mcp_msf_resource.rc" + with open(resource_file, "w") as f: + f.write(resource_content) + + command = f"msfconsole -q -r {resource_file}" + + logger.info(f"๐Ÿš€ Starting Metasploit module: {module}") + result = execute_command(command) + + # Clean up the temporary file + try: + os.remove(resource_file) + except Exception as e: + logger.warning(f"Error removing temporary resource file: {str(e)}") + + logger.info(f"๐Ÿ“Š Metasploit module completed: {module}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in metasploit endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/hydra", methods=["POST"]) +def hydra(): + """Execute hydra with enhanced logging""" + try: + params = request.json + target = params.get("target", "") + service = params.get("service", "") + username = params.get("username", "") + username_file = params.get("username_file", "") + password = params.get("password", "") + password_file = params.get("password_file", "") + additional_args = params.get("additional_args", "") + + if not target or not service: + logger.warning("๐ŸŽฏ Hydra called without target or service parameter") + return jsonify({ + "error": "Target and service parameters are required" + }), 400 + + if not (username or username_file) or not (password or password_file): + logger.warning("๐Ÿ”‘ Hydra called without username/password parameters") + return jsonify({ + "error": "Username/username_file and password/password_file are required" + }), 400 + + command = f"hydra -t 4" + + if username: + command += f" -l {username}" + elif username_file: + command += f" -L {username_file}" + + if password: + command += f" -p {password}" + elif password_file: + command += f" -P {password_file}" + + if additional_args: + command += f" {additional_args}" + + command += f" {target} {service}" + + logger.info(f"๐Ÿ”‘ Starting Hydra attack: {target}:{service}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š Hydra attack completed for {target}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in hydra endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/john", methods=["POST"]) +def john(): + """Execute john with enhanced logging""" + try: + params = request.json + hash_file = params.get("hash_file", "") + wordlist = params.get("wordlist", "/usr/share/wordlists/rockyou.txt") + format_type = params.get("format", "") + additional_args = params.get("additional_args", "") + + if not hash_file: + logger.warning("๐Ÿ” John called without hash_file parameter") + return jsonify({ + "error": "Hash file parameter is required" + }), 400 + + command = f"john" + + if format_type: + command += f" --format={format_type}" + + if wordlist: + command += f" --wordlist={wordlist}" + + if additional_args: + command += f" {additional_args}" + + command += f" {hash_file}" + + logger.info(f"๐Ÿ” Starting John the Ripper: {hash_file}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š John the Ripper completed") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in john endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/wpscan", methods=["POST"]) +def wpscan(): + """Execute wpscan with enhanced logging""" + try: + params = request.json + url = params.get("url", "") + additional_args = params.get("additional_args", "") + + if not url: + logger.warning("๐ŸŒ WPScan called without URL parameter") + return jsonify({ + "error": "URL parameter is required" + }), 400 + + command = f"wpscan --url {url}" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ” Starting WPScan: {url}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š WPScan completed for {url}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in wpscan endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/enum4linux", methods=["POST"]) +def enum4linux(): + """Execute enum4linux with enhanced logging""" + try: + params = request.json + target = params.get("target", "") + additional_args = params.get("additional_args", "-a") + + if not target: + logger.warning("๐ŸŽฏ Enum4linux called without target parameter") + return jsonify({ + "error": "Target parameter is required" + }), 400 + + command = f"enum4linux {additional_args} {target}" + + logger.info(f"๐Ÿ” Starting Enum4linux: {target}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š Enum4linux completed for {target}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in enum4linux endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/ffuf", methods=["POST"]) +def ffuf(): + """Execute FFuf web fuzzer with enhanced logging""" + try: + params = request.json + url = params.get("url", "") + wordlist = params.get("wordlist", "/usr/share/wordlists/dirb/common.txt") + mode = params.get("mode", "directory") + match_codes = params.get("match_codes", "200,204,301,302,307,401,403") + additional_args = params.get("additional_args", "") + + if not url: + logger.warning("๐ŸŒ FFuf called without URL parameter") + return jsonify({ + "error": "URL parameter is required" + }), 400 + + command = f"ffuf" + + if mode == "directory": + command += f" -u {url}/FUZZ -w {wordlist}" + elif mode == "vhost": + command += f" -u {url} -H 'Host: FUZZ' -w {wordlist}" + elif mode == "parameter": + command += f" -u {url}?FUZZ=value -w {wordlist}" + else: + command += f" -u {url} -w {wordlist}" + + command += f" -mc {match_codes}" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ” Starting FFuf {mode} fuzzing: {url}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š FFuf fuzzing completed for {url}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in ffuf endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/netexec", methods=["POST"]) +def netexec(): + """Execute NetExec (formerly CrackMapExec) with enhanced logging""" + try: + params = request.json + target = params.get("target", "") + protocol = params.get("protocol", "smb") + username = params.get("username", "") + password = params.get("password", "") + hash_value = params.get("hash", "") + module = params.get("module", "") + additional_args = params.get("additional_args", "") + + if not target: + logger.warning("๐ŸŽฏ NetExec called without target parameter") + return jsonify({ + "error": "Target parameter is required" + }), 400 + + command = f"nxc {protocol} {target}" + + if username: + command += f" -u {username}" + + if password: + command += f" -p {password}" + + if hash_value: + command += f" -H {hash_value}" + + if module: + command += f" -M {module}" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ” Starting NetExec {protocol} scan: {target}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š NetExec scan completed for {target}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in netexec endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/amass", methods=["POST"]) +def amass(): + """Execute Amass for subdomain enumeration with enhanced logging""" + try: + params = request.json + domain = params.get("domain", "") + mode = params.get("mode", "enum") + additional_args = params.get("additional_args", "") + + if not domain: + logger.warning("๐ŸŒ Amass called without domain parameter") + return jsonify({ + "error": "Domain parameter is required" + }), 400 + + command = f"amass {mode}" + + if mode == "enum": + command += f" -d {domain}" + else: + command += f" -d {domain}" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ” Starting Amass {mode}: {domain}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š Amass completed for {domain}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in amass endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/hashcat", methods=["POST"]) +def hashcat(): + """Execute Hashcat for password cracking with enhanced logging""" + try: + params = request.json + hash_file = params.get("hash_file", "") + hash_type = params.get("hash_type", "") + attack_mode = params.get("attack_mode", "0") + wordlist = params.get("wordlist", "/usr/share/wordlists/rockyou.txt") + mask = params.get("mask", "") + additional_args = params.get("additional_args", "") + + if not hash_file: + logger.warning("๐Ÿ” Hashcat called without hash_file parameter") + return jsonify({ + "error": "Hash file parameter is required" + }), 400 + + if not hash_type: + logger.warning("๐Ÿ” Hashcat called without hash_type parameter") + return jsonify({ + "error": "Hash type parameter is required" + }), 400 + + command = f"hashcat -m {hash_type} -a {attack_mode} {hash_file}" + + if attack_mode == "0" and wordlist: + command += f" {wordlist}" + elif attack_mode == "3" and mask: + command += f" {mask}" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ” Starting Hashcat attack: mode {attack_mode}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š Hashcat attack completed") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in hashcat endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/subfinder", methods=["POST"]) +def subfinder(): + """Execute Subfinder for passive subdomain enumeration with enhanced logging""" + try: + params = request.json + domain = params.get("domain", "") + silent = params.get("silent", True) + all_sources = params.get("all_sources", False) + additional_args = params.get("additional_args", "") + + if not domain: + logger.warning("๐ŸŒ Subfinder called without domain parameter") + return jsonify({ + "error": "Domain parameter is required" + }), 400 + + command = f"subfinder -d {domain}" + + if silent: + command += " -silent" + + if all_sources: + command += " -all" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ” Starting Subfinder: {domain}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š Subfinder completed for {domain}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in subfinder endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/smbmap", methods=["POST"]) +def smbmap(): + """Execute SMBMap for SMB share enumeration with enhanced logging""" + try: + params = request.json + target = params.get("target", "") + username = params.get("username", "") + password = params.get("password", "") + domain = params.get("domain", "") + additional_args = params.get("additional_args", "") + + if not target: + logger.warning("๐ŸŽฏ SMBMap called without target parameter") + return jsonify({ + "error": "Target parameter is required" + }), 400 + + command = f"smbmap -H {target}" + + if username: + command += f" -u {username}" + + if password: + command += f" -p {password}" + + if domain: + command += f" -d {domain}" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ” Starting SMBMap: {target}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š SMBMap completed for {target}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in smbmap endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/volatility", methods=["POST"]) +def volatility(): + """Execute Volatility for memory forensics with enhanced logging""" + try: + params = request.json + memory_file = params.get("memory_file", "") + plugin = params.get("plugin", "") + profile = params.get("profile", "") + additional_args = params.get("additional_args", "") + + if not memory_file: + logger.warning("๐Ÿง  Volatility called without memory_file parameter") + return jsonify({ + "error": "Memory file parameter is required" + }), 400 + + if not plugin: + logger.warning("๐Ÿง  Volatility called without plugin parameter") + return jsonify({ + "error": "Plugin parameter is required" + }), 400 + + command = f"volatility -f {memory_file}" + + if profile: + command += f" --profile={profile}" + + command += f" {plugin}" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿง  Starting Volatility analysis: {plugin}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š Volatility analysis completed") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in volatility endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/msfvenom", methods=["POST"]) +def msfvenom(): + """Execute MSFVenom to generate payloads with enhanced logging""" + try: + params = request.json + payload = params.get("payload", "") + format_type = params.get("format", "") + output_file = params.get("output_file", "") + encoder = params.get("encoder", "") + iterations = params.get("iterations", "") + additional_args = params.get("additional_args", "") + + if not payload: + logger.warning("๐Ÿš€ MSFVenom called without payload parameter") + return jsonify({ + "error": "Payload parameter is required" + }), 400 + + command = f"msfvenom -p {payload}" + + if format_type: + command += f" -f {format_type}" + + if output_file: + command += f" -o {output_file}" + + if encoder: + command += f" -e {encoder}" + + if iterations: + command += f" -i {iterations}" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿš€ Starting MSFVenom payload generation: {payload}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š MSFVenom payload generated") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in msfvenom endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +# ============================================================================ +# BINARY ANALYSIS & REVERSE ENGINEERING TOOLS +# ============================================================================ + +@app.route("/api/tools/gdb", methods=["POST"]) +def gdb(): + """Execute GDB for binary analysis and debugging with enhanced logging""" + try: + params = request.json + binary = params.get("binary", "") + commands = params.get("commands", "") + script_file = params.get("script_file", "") + additional_args = params.get("additional_args", "") + + if not binary: + logger.warning("๐Ÿ”ง GDB called without binary parameter") + return jsonify({ + "error": "Binary parameter is required" + }), 400 + + command = f"gdb {binary}" + + if script_file: + command += f" -x {script_file}" + + if commands: + temp_script = "/tmp/gdb_commands.txt" + with open(temp_script, "w") as f: + f.write(commands) + command += f" -x {temp_script}" + + if additional_args: + command += f" {additional_args}" + + command += " -batch" + + logger.info(f"๐Ÿ”ง Starting GDB analysis: {binary}") + result = execute_command(command) + + if commands and os.path.exists("/tmp/gdb_commands.txt"): + try: + os.remove("/tmp/gdb_commands.txt") + except: + pass + + logger.info(f"๐Ÿ“Š GDB analysis completed for {binary}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in gdb endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/radare2", methods=["POST"]) +def radare2(): + """Execute Radare2 for binary analysis and reverse engineering with enhanced logging""" + try: + params = request.json + binary = params.get("binary", "") + commands = params.get("commands", "") + additional_args = params.get("additional_args", "") + + if not binary: + logger.warning("๐Ÿ”ง Radare2 called without binary parameter") + return jsonify({ + "error": "Binary parameter is required" + }), 400 + + if commands: + temp_script = "/tmp/r2_commands.txt" + with open(temp_script, "w") as f: + f.write(commands) + command = f"r2 -i {temp_script} -q {binary}" + else: + command = f"r2 -q {binary}" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ”ง Starting Radare2 analysis: {binary}") + result = execute_command(command) + + if commands and os.path.exists("/tmp/r2_commands.txt"): + try: + os.remove("/tmp/r2_commands.txt") + except: + pass + + logger.info(f"๐Ÿ“Š Radare2 analysis completed for {binary}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in radare2 endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/binwalk", methods=["POST"]) +def binwalk(): + """Execute Binwalk for firmware and file analysis with enhanced logging""" + try: + params = request.json + file_path = params.get("file_path", "") + extract = params.get("extract", False) + additional_args = params.get("additional_args", "") + + if not file_path: + logger.warning("๐Ÿ”ง Binwalk called without file_path parameter") + return jsonify({ + "error": "File path parameter is required" + }), 400 + + command = f"binwalk" + + if extract: + command += " -e" + + if additional_args: + command += f" {additional_args}" + + command += f" {file_path}" + + logger.info(f"๐Ÿ”ง Starting Binwalk analysis: {file_path}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š Binwalk analysis completed for {file_path}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in binwalk endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/ropgadget", methods=["POST"]) +def ropgadget(): + """Search for ROP gadgets in a binary using ROPgadget with enhanced logging""" + try: + params = request.json + binary = params.get("binary", "") + gadget_type = params.get("gadget_type", "") + additional_args = params.get("additional_args", "") + + if not binary: + logger.warning("๐Ÿ”ง ROPgadget called without binary parameter") + return jsonify({ + "error": "Binary parameter is required" + }), 400 + + command = f"ROPgadget --binary {binary}" + + if gadget_type: + command += f" --only '{gadget_type}'" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ”ง Starting ROPgadget search: {binary}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š ROPgadget search completed for {binary}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in ropgadget endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/checksec", methods=["POST"]) +def checksec(): + """Check security features of a binary with enhanced logging""" + try: + params = request.json + binary = params.get("binary", "") + + if not binary: + logger.warning("๐Ÿ”ง Checksec called without binary parameter") + return jsonify({ + "error": "Binary parameter is required" + }), 400 + + command = f"checksec --file={binary}" + + logger.info(f"๐Ÿ”ง Starting Checksec analysis: {binary}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š Checksec analysis completed for {binary}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in checksec endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/xxd", methods=["POST"]) +def xxd(): + """Create a hex dump of a file using xxd with enhanced logging""" + try: + params = request.json + file_path = params.get("file_path", "") + offset = params.get("offset", "0") + length = params.get("length", "") + additional_args = params.get("additional_args", "") + + if not file_path: + logger.warning("๐Ÿ”ง XXD called without file_path parameter") + return jsonify({ + "error": "File path parameter is required" + }), 400 + + command = f"xxd -s {offset}" + + if length: + command += f" -l {length}" + + if additional_args: + command += f" {additional_args}" + + command += f" {file_path}" + + logger.info(f"๐Ÿ”ง Starting XXD hex dump: {file_path}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š XXD hex dump completed for {file_path}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in xxd endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/strings", methods=["POST"]) +def strings(): + """Extract strings from a binary file with enhanced logging""" + try: + params = request.json + file_path = params.get("file_path", "") + min_len = params.get("min_len", 4) + additional_args = params.get("additional_args", "") + + if not file_path: + logger.warning("๐Ÿ”ง Strings called without file_path parameter") + return jsonify({ + "error": "File path parameter is required" + }), 400 + + command = f"strings -n {min_len}" + + if additional_args: + command += f" {additional_args}" + + command += f" {file_path}" + + logger.info(f"๐Ÿ”ง Starting Strings extraction: {file_path}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š Strings extraction completed for {file_path}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in strings endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/objdump", methods=["POST"]) +def objdump(): + """Analyze a binary using objdump with enhanced logging""" + try: + params = request.json + binary = params.get("binary", "") + disassemble = params.get("disassemble", True) + additional_args = params.get("additional_args", "") + + if not binary: + logger.warning("๐Ÿ”ง Objdump called without binary parameter") + return jsonify({ + "error": "Binary parameter is required" + }), 400 + + command = f"objdump" + + if disassemble: + command += " -d" + else: + command += " -x" + + if additional_args: + command += f" {additional_args}" + + command += f" {binary}" + + logger.info(f"๐Ÿ”ง Starting Objdump analysis: {binary}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š Objdump analysis completed for {binary}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in objdump endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +# ============================================================================ +# ADDITIONAL WEB SECURITY TOOLS +# ============================================================================ + +@app.route("/api/tools/feroxbuster", methods=["POST"]) +def feroxbuster(): + """Execute Feroxbuster for recursive content discovery with enhanced logging""" + try: + params = request.json + url = params.get("url", "") + wordlist = params.get("wordlist", "/usr/share/wordlists/dirb/common.txt") + threads = params.get("threads", 10) + additional_args = params.get("additional_args", "") + + if not url: + logger.warning("๐ŸŒ Feroxbuster called without URL parameter") + return jsonify({ + "error": "URL parameter is required" + }), 400 + + command = f"feroxbuster -u {url} -w {wordlist} -t {threads}" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ” Starting Feroxbuster scan: {url}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š Feroxbuster scan completed for {url}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in feroxbuster endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/dotdotpwn", methods=["POST"]) +def dotdotpwn(): + """Execute DotDotPwn for directory traversal testing with enhanced logging""" + try: + params = request.json + target = params.get("target", "") + module = params.get("module", "http") + additional_args = params.get("additional_args", "") + + if not target: + logger.warning("๐ŸŽฏ DotDotPwn called without target parameter") + return jsonify({ + "error": "Target parameter is required" + }), 400 + + command = f"dotdotpwn -m {module} -h {target}" + + if additional_args: + command += f" {additional_args}" + + command += " -b" + + logger.info(f"๐Ÿ” Starting DotDotPwn scan: {target}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š DotDotPwn scan completed for {target}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in dotdotpwn endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/xsser", methods=["POST"]) +def xsser(): + """Execute XSSer for XSS vulnerability testing with enhanced logging""" + try: + params = request.json + url = params.get("url", "") + params_str = params.get("params", "") + additional_args = params.get("additional_args", "") + + if not url: + logger.warning("๐ŸŒ XSSer called without URL parameter") + return jsonify({ + "error": "URL parameter is required" + }), 400 + + command = f"xsser --url '{url}'" + + if params_str: + command += f" --param='{params_str}'" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ” Starting XSSer scan: {url}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š XSSer scan completed for {url}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in xsser endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/wfuzz", methods=["POST"]) +def wfuzz(): + """Execute Wfuzz for web application fuzzing with enhanced logging""" + try: + params = request.json + url = params.get("url", "") + wordlist = params.get("wordlist", "/usr/share/wordlists/dirb/common.txt") + additional_args = params.get("additional_args", "") + + if not url: + logger.warning("๐ŸŒ Wfuzz called without URL parameter") + return jsonify({ + "error": "URL parameter is required" + }), 400 + + command = f"wfuzz -w {wordlist} '{url}'" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ” Starting Wfuzz scan: {url}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š Wfuzz scan completed for {url}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in wfuzz endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +# ============================================================================ +# ADVANCED WEB SECURITY TOOLS CONTINUED +# ============================================================================ + +@app.route("/api/tools/burpsuite", methods=["POST"]) +def burpsuite(): + """Execute Burp Suite with enhanced logging""" + try: + params = request.json + project_file = params.get("project_file", "") + config_file = params.get("config_file", "") + target = params.get("target", "") + additional_args = params.get("additional_args", "") + headless = params.get("headless", False) + scan_type = params.get("scan_type", "") + scan_config = params.get("scan_config", "") + output_file = params.get("output_file", "") + + command = "burpsuite" + + if headless: + command += " --headless" + + if project_file: + command += f" --project-file=\"{project_file}\"" + + if config_file: + command += f" --config-file=\"{config_file}\"" + + if target: + command += f" --target=\"{target}\"" + + if headless and scan_type: + command += f" --{scan_type}" + + if scan_config: + command += f" --scan-config=\"{scan_config}\"" + + if output_file: + command += f" --output-file=\"{output_file}\"" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ” Starting Burp Suite scan") + result = execute_command(command) + logger.info(f"๐Ÿ“Š Burp Suite scan completed") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in burpsuite endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/zap", methods=["POST"]) +def zap(): + """Execute OWASP ZAP with enhanced logging""" + try: + params = request.json + target = params.get("target", "") + scan_type = params.get("scan_type", "baseline") + api_key = params.get("api_key", "") + daemon = params.get("daemon", False) + port = params.get("port", "8090") + host = params.get("host", "0.0.0.0") + format_type = params.get("format", "xml") + output_file = params.get("output_file", "") + additional_args = params.get("additional_args", "") + + if not target and scan_type != "daemon": + logger.warning("๐ŸŽฏ ZAP called without target parameter") + return jsonify({ + "error": "Target parameter is required for scans" + }), 400 + + if daemon: + command = f"zaproxy -daemon -host {host} -port {port}" + if api_key: + command += f" -config api.key={api_key}" + else: + command = f"zaproxy -cmd -quickurl {target}" + + if format_type: + command += f" -quickout {format_type}" + + if output_file: + command += f" -quickprogress -dir \"{output_file}\"" + + if api_key: + command += f" -config api.key={api_key}" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ” Starting ZAP scan: {target}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š ZAP scan completed for {target}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in zap endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/arjun", methods=["POST"]) +def arjun(): + """Execute Arjun for parameter discovery with enhanced logging""" + try: + params = request.json + url = params.get("url", "") + method = params.get("method", "GET") + data = params.get("data", "") + headers = params.get("headers", "") + timeout = params.get("timeout", "") + output_file = params.get("output_file", "") + additional_args = params.get("additional_args", "") + + if not url: + logger.warning("๐ŸŽฏ Arjun called without URL parameter") + return jsonify({ + "error": "URL parameter is required" + }), 400 + + command = f"arjun -u \"{url}\" -m {method}" + + if data and method.upper() == "POST": + command += f" -d \"{data}\"" + + if headers: + command += f" -H \"{headers}\"" + + if timeout: + command += f" -t {timeout}" + + if output_file: + command += f" -o \"{output_file}\"" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ” Starting Arjun parameter discovery: {url}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š Arjun completed for {url}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in arjun endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/wafw00f", methods=["POST"]) +def wafw00f(): + """Execute wafw00f to identify and fingerprint WAF products with enhanced logging""" + try: + params = request.json + target = params.get("target", "") + additional_args = params.get("additional_args", "") + + if not target: + logger.warning("๐Ÿ›ก๏ธ Wafw00f called without target parameter") + return jsonify({ + "error": "Target parameter is required" + }), 400 + + command = f"wafw00f {target}" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ›ก๏ธ Starting Wafw00f WAF detection: {target}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š Wafw00f completed for {target}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in wafw00f endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/fierce", methods=["POST"]) +def fierce(): + """Execute fierce for DNS reconnaissance with enhanced logging""" + try: + params = request.json + domain = params.get("domain", "") + dns_server = params.get("dns_server", "") + additional_args = params.get("additional_args", "") + + if not domain: + logger.warning("๐ŸŒ Fierce called without domain parameter") + return jsonify({ + "error": "Domain parameter is required" + }), 400 + + command = f"fierce --domain {domain}" + + if dns_server: + command += f" --dns-servers {dns_server}" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ” Starting Fierce DNS recon: {domain}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š Fierce completed for {domain}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in fierce endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/dnsenum", methods=["POST"]) +def dnsenum(): + """Execute dnsenum for DNS enumeration with enhanced logging""" + try: + params = request.json + domain = params.get("domain", "") + dns_server = params.get("dns_server", "") + wordlist = params.get("wordlist", "") + additional_args = params.get("additional_args", "") + + if not domain: + logger.warning("๐ŸŒ DNSenum called without domain parameter") + return jsonify({ + "error": "Domain parameter is required" + }), 400 + + command = f"dnsenum {domain}" + + if dns_server: + command += f" --dnsserver {dns_server}" + + if wordlist: + command += f" --file {wordlist}" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ” Starting DNSenum: {domain}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š DNSenum completed for {domain}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in dnsenum endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/autorecon", methods=["POST"]) +def autorecon(): + """Execute AutoRecon for comprehensive target enumeration with full parameter support.""" + try: + params = request.json + # Basic parameters + target = params.get("target", "") + target_file = params.get("target_file", "") + ports = params.get("ports", "") + output_dir = params.get("output_dir", "") + + # Scan control parameters + max_scans = params.get("max_scans", "") + max_port_scans = params.get("max_port_scans", "") + heartbeat = params.get("heartbeat", "") + timeout = params.get("timeout", "") + target_timeout = params.get("target_timeout", "") + + # Configuration parameters + config_file = params.get("config_file", "") + global_file = params.get("global_file", "") + plugins_dir = params.get("plugins_dir", "") + add_plugins_dir = params.get("add_plugins_dir", "") + + # Plugin selection parameters + tags = params.get("tags", "") + exclude_tags = params.get("exclude_tags", "") + port_scans = params.get("port_scans", "") + service_scans = params.get("service_scans", "") + reports = params.get("reports", "") + + # Directory structure options + single_target = params.get("single_target", False) + only_scans_dir = params.get("only_scans_dir", False) + no_port_dirs = params.get("no_port_dirs", False) + + # Nmap options + nmap = params.get("nmap", "") + nmap_append = params.get("nmap_append", "") + + # Misc options + proxychains = params.get("proxychains", False) + disable_sanity_checks = params.get("disable_sanity_checks", False) + disable_keyboard_control = params.get("disable_keyboard_control", False) + force_services = params.get("force_services", "") + accessible = params.get("accessible", False) + verbose = params.get("verbose", 0) # 0 for none, 1+ for increasing verbosity + + # Plugin-specific options + curl_path = params.get("curl_path", "") + dirbuster_tool = params.get("dirbuster_tool", "") + dirbuster_wordlist = params.get("dirbuster_wordlist", "") + dirbuster_threads = params.get("dirbuster_threads", "") + dirbuster_ext = params.get("dirbuster_ext", "") + onesixtyone_community_strings = params.get("onesixtyone_community_strings", "") + + # Global plugin options + global_username_wordlist = params.get("global_username_wordlist", "") + global_password_wordlist = params.get("global_password_wordlist", "") + global_domain = params.get("global_domain", "") + + # Additional arguments + additional_args = params.get("additional_args", "") + + if not target and not target_file: + logger.warning("๐ŸŽฏ AutoRecon called without target or target_file parameter") + return jsonify({ + "error": "Either target or target_file parameter is required" + }), 400 + + # Build the command + command = "autorecon" + + # Add target or target file + if target: + command += f" {target}" + + if target_file: + command += f" -t {target_file}" + + # Add basic scan options + if ports: + command += f" -p {ports}" + + if output_dir: + command += f" -o {output_dir}" + + # Add scan control parameters + if max_scans: + command += f" -m {max_scans}" + + if max_port_scans: + command += f" -mp {max_port_scans}" + + if heartbeat: + command += f" --heartbeat {heartbeat}" + + if timeout: + command += f" --timeout {timeout}" + + if target_timeout: + command += f" --target-timeout {target_timeout}" + + # Add configuration parameters + if config_file: + command += f" -c {config_file}" + + if global_file: + command += f" -g {global_file}" + + if plugins_dir: + command += f" --plugins-dir {plugins_dir}" + + if add_plugins_dir: + command += f" --add-plugins-dir {add_plugins_dir}" + + # Add plugin selection parameters + if tags: + command += f" --tags {tags}" + + if exclude_tags: + command += f" --exclude-tags {exclude_tags}" + + if port_scans: + command += f" --port-scans {port_scans}" + + if service_scans: + command += f" --service-scans {service_scans}" + + if reports: + command += f" --reports {reports}" + + # Add directory structure options + if single_target: + command += " --single-target" + + if only_scans_dir: + command += " --only-scans-dir" + + if no_port_dirs: + command += " --no-port-dirs" + + # Add nmap options + if nmap: + command += f" --nmap \"{nmap}\"" + + if nmap_append: + command += f" --nmap-append \"{nmap_append}\"" + + # Add misc options + if proxychains: + command += " --proxychains" + + if disable_sanity_checks: + command += " --disable-sanity-checks" + + if disable_keyboard_control: + command += " --disable-keyboard-control" + + if force_services: + command += f" --force-services {force_services}" + + if accessible: + command += " --accessible" + + # Add verbosity + for _ in range(verbose): + command += " -v" + + # Add plugin-specific options + if curl_path: + command += f" --curl.path {curl_path}" + + if dirbuster_tool: + command += f" --dirbuster.tool {dirbuster_tool}" + + if dirbuster_wordlist: + command += f" --dirbuster.wordlist {dirbuster_wordlist}" + + if dirbuster_threads: + command += f" --dirbuster.threads {dirbuster_threads}" + + if dirbuster_ext: + command += f" --dirbuster.ext {dirbuster_ext}" + + if onesixtyone_community_strings: + command += f" --onesixtyone.community-strings {onesixtyone_community_strings}" + + # Add global plugin options + if global_username_wordlist: + command += f" --global.username-wordlist {global_username_wordlist}" + + if global_password_wordlist: + command += f" --global.password-wordlist {global_password_wordlist}" + + if global_domain: + command += f" --global.domain {global_domain}" + + # Add any additional arguments + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ” Starting AutoRecon comprehensive enumeration: {target}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š AutoRecon comprehensive enumeration completed for {target}") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in autorecon endpoint: {str(e)}") + logger.error(traceback.format_exc()) + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +# Python Environment Management Endpoints +@app.route("/api/python/install", methods=["POST"]) +def install_python_package(): + """Install a Python package in a virtual environment""" + try: + params = request.json + package = params.get("package", "") + env_name = params.get("env_name", "default") + + if not package: + return jsonify({"error": "Package name is required"}), 400 + + logger.info(f"๐Ÿ“ฆ Installing Python package: {package} in env {env_name}") + success = env_manager.install_package(env_name, package) + + if success: + return jsonify({ + "success": True, + "message": f"Package {package} installed successfully", + "env_name": env_name + }) + else: + return jsonify({ + "success": False, + "error": f"Failed to install package {package}" + }), 500 + + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error installing Python package: {str(e)}") + return jsonify({"error": f"Server error: {str(e)}"}), 500 + +@app.route("/api/python/execute", methods=["POST"]) +def execute_python_script(): + """Execute a Python script in a virtual environment""" + try: + params = request.json + script = params.get("script", "") + env_name = params.get("env_name", "default") + filename = params.get("filename", f"script_{int(time.time())}.py") + + if not script: + return jsonify({"error": "Script content is required"}), 400 + + # Create script file + script_result = file_manager.create_file(filename, script) + if not script_result["success"]: + return jsonify(script_result), 500 + + # Get Python path for environment + python_path = env_manager.get_python_path(env_name) + script_path = script_result["path"] + + # Execute script + command = f"{python_path} {script_path}" + logger.info(f"๐Ÿ Executing Python script in env {env_name}: {filename}") + result = execute_command(command, use_cache=False) + + # Clean up script file + file_manager.delete_file(filename) + + result["env_name"] = env_name + result["script_filename"] = filename + logger.info(f"๐Ÿ“Š Python script execution completed") + return jsonify(result) + + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error executing Python script: {str(e)}") + return jsonify({"error": f"Server error: {str(e)}"}), 500 + +# ============================================================================ +# AI-POWERED PAYLOAD GENERATION (v5.0 ENHANCEMENT) UNDER DEVELOPMENT +# ============================================================================ + +class AIPayloadGenerator: + """AI-powered payload generation system with contextual intelligence""" + + def __init__(self): + self.payload_templates = { + "xss": { + "basic": ["", "javascript:alert('XSS')", "'>"], + "advanced": [ + "", + "", + "';alert(String.fromCharCode(88,83,83))//';alert(String.fromCharCode(88,83,83))//", + "\">"] + + def _enhance_with_context(self, payloads: list, tech_context: str) -> list: + """Enhance payloads with contextual information""" + enhanced = [] + + for payload in payloads: + # Basic payload + enhanced.append({ + "payload": payload, + "context": "basic", + "encoding": "none", + "risk_level": self._assess_risk_level(payload) + }) + + # URL encoded version + url_encoded = payload.replace(" ", "%20").replace("<", "%3C").replace(">", "%3E") + enhanced.append({ + "payload": url_encoded, + "context": "url_encoded", + "encoding": "url", + "risk_level": self._assess_risk_level(payload) + }) + + return enhanced + + def _generate_test_cases(self, payloads: list, attack_type: str) -> list: + """Generate test cases for the payloads""" + test_cases = [] + + for i, payload_info in enumerate(payloads[:5]): # Limit to 5 test cases + test_case = { + "id": f"test_{i+1}", + "payload": payload_info["payload"], + "method": "GET" if len(payload_info["payload"]) < 100 else "POST", + "expected_behavior": self._get_expected_behavior(attack_type), + "risk_level": payload_info["risk_level"] + } + test_cases.append(test_case) + + return test_cases + + def _get_expected_behavior(self, attack_type: str) -> str: + """Get expected behavior for attack type""" + behaviors = { + "xss": "JavaScript execution or popup alert", + "sqli": "Database error or data extraction", + "lfi": "File content disclosure", + "cmd_injection": "Command execution on server", + "ssti": "Template expression evaluation", + "xxe": "XML external entity processing" + } + return behaviors.get(attack_type, "Unexpected application behavior") + + def _assess_risk_level(self, payload: str) -> str: + """Assess risk level of payload""" + high_risk_indicators = ["system", "exec", "eval", "cmd", "shell", "passwd", "etc"] + medium_risk_indicators = ["script", "alert", "union", "select"] + + payload_lower = payload.lower() + + if any(indicator in payload_lower for indicator in high_risk_indicators): + return "HIGH" + elif any(indicator in payload_lower for indicator in medium_risk_indicators): + return "MEDIUM" + else: + return "LOW" + + def _get_recommendations(self, attack_type: str) -> list: + """Get testing recommendations""" + recommendations = { + "xss": [ + "Test in different input fields and parameters", + "Try both reflected and stored XSS scenarios", + "Test with different browsers for compatibility" + ], + "sqli": [ + "Test different SQL injection techniques", + "Try both error-based and blind injection", + "Test various database-specific payloads" + ], + "lfi": [ + "Test various directory traversal depths", + "Try different encoding techniques", + "Test for log file inclusion" + ], + "cmd_injection": [ + "Test different command separators", + "Try both direct and blind injection", + "Test with various payloads for different OS" + ] + } + + return recommendations.get(attack_type, ["Test thoroughly", "Monitor responses"]) + +# Global AI payload generator +ai_payload_generator = AIPayloadGenerator() + +@app.route("/api/ai/generate_payload", methods=["POST"]) +def ai_generate_payload(): + """Generate AI-powered contextual payloads for security testing""" + try: + params = request.json + target_info = { + "attack_type": params.get("attack_type", "xss"), + "complexity": params.get("complexity", "basic"), + "technology": params.get("technology", ""), + "url": params.get("url", "") + } + + logger.info(f"๐Ÿค– Generating AI payloads for {target_info['attack_type']} attack") + result = ai_payload_generator.generate_contextual_payload(target_info) + + logger.info(f"โœ… Generated {result['payload_count']} contextual payloads") + + return jsonify({ + "success": True, + "ai_payload_generation": result, + "timestamp": datetime.now().isoformat() + }) + + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in AI payload generation: {str(e)}") + return jsonify({ + "success": False, + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/ai/test_payload", methods=["POST"]) +def ai_test_payload(): + """Test generated payload against target with AI analysis""" + try: + params = request.json + payload = params.get("payload", "") + target_url = params.get("target_url", "") + method = params.get("method", "GET") + + if not payload or not target_url: + return jsonify({ + "success": False, + "error": "Payload and target_url are required" + }), 400 + + logger.info(f"๐Ÿงช Testing AI-generated payload against {target_url}") + + # Create test command based on method and payload + if method.upper() == "GET": + encoded_payload = payload.replace(" ", "%20").replace("'", "%27") + test_command = f"curl -s '{target_url}?test={encoded_payload}'" + else: + test_command = f"curl -s -X POST -d 'test={payload}' '{target_url}'" + + # Execute test + result = execute_command(test_command, use_cache=False) + + # AI analysis of results + analysis = { + "payload_tested": payload, + "target_url": target_url, + "method": method, + "response_size": len(result.get("stdout", "")), + "success": result.get("success", False), + "potential_vulnerability": payload.lower() in result.get("stdout", "").lower(), + "recommendations": [ + "Analyze response for payload reflection", + "Check for error messages indicating vulnerability", + "Monitor application behavior changes" + ] + } + + logger.info(f"๐Ÿ” Payload test completed | Potential vuln: {analysis['potential_vulnerability']}") + + return jsonify({ + "success": True, + "test_result": result, + "ai_analysis": analysis, + "timestamp": datetime.now().isoformat() + }) + + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in AI payload testing: {str(e)}") + return jsonify({ + "success": False, + "error": f"Server error: {str(e)}" + }), 500 + +# ============================================================================ +# ADVANCED API TESTING TOOLS (v5.0 ENHANCEMENT) +# ============================================================================ + +@app.route("/api/tools/api_fuzzer", methods=["POST"]) +def api_fuzzer(): + """Advanced API endpoint fuzzing with intelligent parameter discovery""" + try: + params = request.json + base_url = params.get("base_url", "") + endpoints = params.get("endpoints", []) + methods = params.get("methods", ["GET", "POST", "PUT", "DELETE"]) + wordlist = params.get("wordlist", "/usr/share/wordlists/api/api-endpoints.txt") + + if not base_url: + logger.warning("๐ŸŒ API Fuzzer called without base_url parameter") + return jsonify({ + "error": "Base URL parameter is required" + }), 400 + + # Create comprehensive API fuzzing command + if endpoints: + # Test specific endpoints + results = [] + for endpoint in endpoints: + for method in methods: + test_url = f"{base_url.rstrip('/')}/{endpoint.lstrip('/')}" + command = f"curl -s -X {method} -w '%{{http_code}}|%{{size_download}}' '{test_url}'" + result = execute_command(command, use_cache=False) + results.append({ + "endpoint": endpoint, + "method": method, + "result": result + }) + + logger.info(f"๐Ÿ” API endpoint testing completed for {len(endpoints)} endpoints") + return jsonify({ + "success": True, + "fuzzing_type": "endpoint_testing", + "results": results + }) + else: + # Discover endpoints using wordlist + command = f"ffuf -u {base_url}/FUZZ -w {wordlist} -mc 200,201,202,204,301,302,307,401,403,405 -t 50" + + logger.info(f"๐Ÿ” Starting API endpoint discovery: {base_url}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š API endpoint discovery completed") + + return jsonify({ + "success": True, + "fuzzing_type": "endpoint_discovery", + "result": result + }) + + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in API fuzzer: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/graphql_scanner", methods=["POST"]) +def graphql_scanner(): + """Advanced GraphQL security scanning and introspection""" + try: + params = request.json + endpoint = params.get("endpoint", "") + introspection = params.get("introspection", True) + query_depth = params.get("query_depth", 10) + mutations = params.get("test_mutations", True) + + if not endpoint: + logger.warning("๐ŸŒ GraphQL Scanner called without endpoint parameter") + return jsonify({ + "error": "GraphQL endpoint parameter is required" + }), 400 + + logger.info(f"๐Ÿ” Starting GraphQL security scan: {endpoint}") + + results = { + "endpoint": endpoint, + "tests_performed": [], + "vulnerabilities": [], + "recommendations": [] + } + + # Test 1: Introspection query + if introspection: + introspection_query = ''' + { + __schema { + types { + name + fields { + name + type { + name + } + } + } + } + } + ''' + + clean_query = introspection_query.replace('\n', ' ').replace(' ', ' ').strip() + command = f"curl -s -X POST -H 'Content-Type: application/json' -d '{{\"query\":\"{clean_query}\"}}' '{endpoint}'" + result = execute_command(command, use_cache=False) + + results["tests_performed"].append("introspection_query") + + if "data" in result.get("stdout", ""): + results["vulnerabilities"].append({ + "type": "introspection_enabled", + "severity": "MEDIUM", + "description": "GraphQL introspection is enabled" + }) + + # Test 2: Query depth analysis + deep_query = "{ " * query_depth + "field" + " }" * query_depth + command = f"curl -s -X POST -H 'Content-Type: application/json' -d '{{\"query\":\"{deep_query}\"}}' {endpoint}" + depth_result = execute_command(command, use_cache=False) + + results["tests_performed"].append("query_depth_analysis") + + if "error" not in depth_result.get("stdout", "").lower(): + results["vulnerabilities"].append({ + "type": "no_query_depth_limit", + "severity": "HIGH", + "description": f"No query depth limiting detected (tested depth: {query_depth})" + }) + + # Test 3: Batch query testing + batch_query = '[' + ','.join(['{\"query\":\"{field}\"}' for _ in range(10)]) + ']' + command = f"curl -s -X POST -H 'Content-Type: application/json' -d '{batch_query}' {endpoint}" + batch_result = execute_command(command, use_cache=False) + + results["tests_performed"].append("batch_query_testing") + + if "data" in batch_result.get("stdout", "") and batch_result.get("success"): + results["vulnerabilities"].append({ + "type": "batch_queries_allowed", + "severity": "MEDIUM", + "description": "Batch queries are allowed without rate limiting" + }) + + # Generate recommendations + if results["vulnerabilities"]: + results["recommendations"] = [ + "Disable introspection in production", + "Implement query depth limiting", + "Add rate limiting for batch queries", + "Implement query complexity analysis", + "Add authentication for sensitive operations" + ] + + logger.info(f"๐Ÿ“Š GraphQL scan completed | Vulnerabilities found: {len(results['vulnerabilities'])}") + + return jsonify({ + "success": True, + "graphql_scan_results": results + }) + + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in GraphQL scanner: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/jwt_analyzer", methods=["POST"]) +def jwt_analyzer(): + """Advanced JWT token analysis and vulnerability testing""" + try: + params = request.json + jwt_token = params.get("jwt_token", "") + target_url = params.get("target_url", "") + + if not jwt_token: + logger.warning("๐Ÿ” JWT Analyzer called without jwt_token parameter") + return jsonify({ + "error": "JWT token parameter is required" + }), 400 + + logger.info(f"๐Ÿ” Starting JWT security analysis") + + results = { + "token": jwt_token[:50] + "..." if len(jwt_token) > 50 else jwt_token, + "vulnerabilities": [], + "token_info": {}, + "attack_vectors": [] + } + + # Decode JWT header and payload (basic analysis) + try: + parts = jwt_token.split('.') + if len(parts) >= 2: + # Decode header + import base64 + import json + + # Add padding if needed + header_b64 = parts[0] + '=' * (4 - len(parts[0]) % 4) + payload_b64 = parts[1] + '=' * (4 - len(parts[1]) % 4) + + try: + header = json.loads(base64.b64decode(header_b64)) + payload = json.loads(base64.b64decode(payload_b64)) + + results["token_info"] = { + "header": header, + "payload": payload, + "algorithm": header.get("alg", "unknown") + } + + # Check for vulnerabilities + algorithm = header.get("alg", "").lower() + + if algorithm == "none": + results["vulnerabilities"].append({ + "type": "none_algorithm", + "severity": "CRITICAL", + "description": "JWT uses 'none' algorithm - no signature verification" + }) + + if algorithm in ["hs256", "hs384", "hs512"]: + results["attack_vectors"].append("hmac_key_confusion") + results["vulnerabilities"].append({ + "type": "hmac_algorithm", + "severity": "MEDIUM", + "description": "HMAC algorithm detected - vulnerable to key confusion attacks" + }) + + # Check token expiration + exp = payload.get("exp") + if not exp: + results["vulnerabilities"].append({ + "type": "no_expiration", + "severity": "HIGH", + "description": "JWT token has no expiration time" + }) + + except Exception as decode_error: + results["vulnerabilities"].append({ + "type": "malformed_token", + "severity": "HIGH", + "description": f"Token decoding failed: {str(decode_error)}" + }) + + except Exception as e: + results["vulnerabilities"].append({ + "type": "invalid_format", + "severity": "HIGH", + "description": "Invalid JWT token format" + }) + + # Test token manipulation if target URL provided + if target_url: + # Test none algorithm attack + none_token_parts = jwt_token.split('.') + if len(none_token_parts) >= 2: + # Create none algorithm token + none_header = base64.b64encode('{"alg":"none","typ":"JWT"}'.encode()).decode().rstrip('=') + none_token = f"{none_header}.{none_token_parts[1]}." + + command = f"curl -s -H 'Authorization: Bearer {none_token}' '{target_url}'" + none_result = execute_command(command, use_cache=False) + + if "200" in none_result.get("stdout", "") or "success" in none_result.get("stdout", "").lower(): + results["vulnerabilities"].append({ + "type": "none_algorithm_accepted", + "severity": "CRITICAL", + "description": "Server accepts tokens with 'none' algorithm" + }) + + logger.info(f"๐Ÿ“Š JWT analysis completed | Vulnerabilities found: {len(results['vulnerabilities'])}") + + return jsonify({ + "success": True, + "jwt_analysis_results": results + }) + + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in JWT analyzer: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/api_schema_analyzer", methods=["POST"]) +def api_schema_analyzer(): + """Analyze API schemas and identify potential security issues""" + try: + params = request.json + schema_url = params.get("schema_url", "") + schema_type = params.get("schema_type", "openapi") # openapi, swagger, graphql + + if not schema_url: + logger.warning("๐Ÿ“‹ API Schema Analyzer called without schema_url parameter") + return jsonify({ + "error": "Schema URL parameter is required" + }), 400 + + logger.info(f"๐Ÿ” Starting API schema analysis: {schema_url}") + + # Fetch schema + command = f"curl -s '{schema_url}'" + result = execute_command(command, use_cache=True) + + if not result.get("success"): + return jsonify({ + "error": "Failed to fetch API schema" + }), 400 + + schema_content = result.get("stdout", "") + + analysis_results = { + "schema_url": schema_url, + "schema_type": schema_type, + "endpoints_found": [], + "security_issues": [], + "recommendations": [] + } + + # Parse schema based on type + try: + import json + schema_data = json.loads(schema_content) + + if schema_type.lower() in ["openapi", "swagger"]: + # OpenAPI/Swagger analysis + paths = schema_data.get("paths", {}) + + for path, methods in paths.items(): + for method, details in methods.items(): + if isinstance(details, dict): + endpoint_info = { + "path": path, + "method": method.upper(), + "summary": details.get("summary", ""), + "parameters": details.get("parameters", []), + "security": details.get("security", []) + } + analysis_results["endpoints_found"].append(endpoint_info) + + # Check for security issues + if not endpoint_info["security"]: + analysis_results["security_issues"].append({ + "endpoint": f"{method.upper()} {path}", + "issue": "no_authentication", + "severity": "MEDIUM", + "description": "Endpoint has no authentication requirements" + }) + + # Check for sensitive data in parameters + for param in endpoint_info["parameters"]: + param_name = param.get("name", "").lower() + if any(sensitive in param_name for sensitive in ["password", "token", "key", "secret"]): + analysis_results["security_issues"].append({ + "endpoint": f"{method.upper()} {path}", + "issue": "sensitive_parameter", + "severity": "HIGH", + "description": f"Sensitive parameter detected: {param_name}" + }) + + # Generate recommendations + if analysis_results["security_issues"]: + analysis_results["recommendations"] = [ + "Implement authentication for all endpoints", + "Use HTTPS for all API communications", + "Validate and sanitize all input parameters", + "Implement rate limiting", + "Add proper error handling", + "Use secure headers (CORS, CSP, etc.)" + ] + + except json.JSONDecodeError: + analysis_results["security_issues"].append({ + "endpoint": "schema", + "issue": "invalid_json", + "severity": "HIGH", + "description": "Schema is not valid JSON" + }) + + logger.info(f"๐Ÿ“Š Schema analysis completed | Issues found: {len(analysis_results['security_issues'])}") + + return jsonify({ + "success": True, + "schema_analysis_results": analysis_results + }) + + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in API schema analyzer: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +# ============================================================================ +# ADVANCED CTF TOOLS (v5.0 ENHANCEMENT) +# ============================================================================ + +@app.route("/api/tools/volatility3", methods=["POST"]) +def volatility3(): + """Execute Volatility3 for advanced memory forensics with enhanced logging""" + try: + params = request.json + memory_file = params.get("memory_file", "") + plugin = params.get("plugin", "") + output_file = params.get("output_file", "") + additional_args = params.get("additional_args", "") + + if not memory_file: + logger.warning("๐Ÿง  Volatility3 called without memory_file parameter") + return jsonify({ + "error": "Memory file parameter is required" + }), 400 + + if not plugin: + logger.warning("๐Ÿง  Volatility3 called without plugin parameter") + return jsonify({ + "error": "Plugin parameter is required" + }), 400 + + command = f"vol.py -f {memory_file} {plugin}" + + if output_file: + command += f" -o {output_file}" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿง  Starting Volatility3 analysis: {plugin}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š Volatility3 analysis completed") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in volatility3 endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/foremost", methods=["POST"]) +def foremost(): + """Execute Foremost for file carving with enhanced logging""" + try: + params = request.json + input_file = params.get("input_file", "") + output_dir = params.get("output_dir", "/tmp/foremost_output") + file_types = params.get("file_types", "") + additional_args = params.get("additional_args", "") + + if not input_file: + logger.warning("๐Ÿ“ Foremost called without input_file parameter") + return jsonify({ + "error": "Input file parameter is required" + }), 400 + + # Ensure output directory exists + Path(output_dir).mkdir(parents=True, exist_ok=True) + + command = f"foremost -o {output_dir}" + + if file_types: + command += f" -t {file_types}" + + if additional_args: + command += f" {additional_args}" + + command += f" {input_file}" + + logger.info(f"๐Ÿ“ Starting Foremost file carving: {input_file}") + result = execute_command(command) + result["output_directory"] = output_dir + logger.info(f"๐Ÿ“Š Foremost carving completed") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in foremost endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/steghide", methods=["POST"]) +def steghide(): + """Execute Steghide for steganography analysis with enhanced logging""" + try: + params = request.json + action = params.get("action", "extract") # extract, embed, info + cover_file = params.get("cover_file", "") + embed_file = params.get("embed_file", "") + passphrase = params.get("passphrase", "") + output_file = params.get("output_file", "") + additional_args = params.get("additional_args", "") + + if not cover_file: + logger.warning("๐Ÿ–ผ๏ธ Steghide called without cover_file parameter") + return jsonify({ + "error": "Cover file parameter is required" + }), 400 + + if action == "extract": + command = f"steghide extract -sf {cover_file}" + if output_file: + command += f" -xf {output_file}" + elif action == "embed": + if not embed_file: + return jsonify({"error": "Embed file required for embed action"}), 400 + command = f"steghide embed -cf {cover_file} -ef {embed_file}" + elif action == "info": + command = f"steghide info {cover_file}" + else: + return jsonify({"error": "Invalid action. Use: extract, embed, info"}), 400 + + if passphrase: + command += f" -p {passphrase}" + else: + command += " -p ''" # Empty passphrase + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ–ผ๏ธ Starting Steghide {action}: {cover_file}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š Steghide {action} completed") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in steghide endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/exiftool", methods=["POST"]) +def exiftool(): + """Execute ExifTool for metadata extraction with enhanced logging""" + try: + params = request.json + file_path = params.get("file_path", "") + output_format = params.get("output_format", "") # json, xml, csv + tags = params.get("tags", "") + additional_args = params.get("additional_args", "") + + if not file_path: + logger.warning("๐Ÿ“ท ExifTool called without file_path parameter") + return jsonify({ + "error": "File path parameter is required" + }), 400 + + command = f"exiftool" + + if output_format: + command += f" -{output_format}" + + if tags: + command += f" -{tags}" + + if additional_args: + command += f" {additional_args}" + + command += f" {file_path}" + + logger.info(f"๐Ÿ“ท Starting ExifTool analysis: {file_path}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š ExifTool analysis completed") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in exiftool endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/hashpump", methods=["POST"]) +def hashpump(): + """Execute HashPump for hash length extension attacks with enhanced logging""" + try: + params = request.json + signature = params.get("signature", "") + data = params.get("data", "") + key_length = params.get("key_length", "") + append_data = params.get("append_data", "") + additional_args = params.get("additional_args", "") + + if not all([signature, data, key_length, append_data]): + logger.warning("๐Ÿ” HashPump called without required parameters") + return jsonify({ + "error": "Signature, data, key_length, and append_data parameters are required" + }), 400 + + command = f"hashpump -s {signature} -d '{data}' -k {key_length} -a '{append_data}'" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ” Starting HashPump attack") + result = execute_command(command) + logger.info(f"๐Ÿ“Š HashPump attack completed") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in hashpump endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +# ============================================================================ +# BUG BOUNTY RECONNAISSANCE TOOLS (v5.0 ENHANCEMENT) +# ============================================================================ + +@app.route("/api/tools/hakrawler", methods=["POST"]) +def hakrawler(): + """Execute Hakrawler for web endpoint discovery with enhanced logging""" + try: + params = request.json + url = params.get("url", "") + depth = params.get("depth", 2) + forms = params.get("forms", True) + robots = params.get("robots", True) + sitemap = params.get("sitemap", True) + wayback = params.get("wayback", False) + additional_args = params.get("additional_args", "") + + if not url: + logger.warning("๐Ÿ•ท๏ธ Hakrawler called without URL parameter") + return jsonify({ + "error": "URL parameter is required" + }), 400 + + command = f"hakrawler -url {url} -depth {depth}" + + if forms: + command += " -forms" + if robots: + command += " -robots" + if sitemap: + command += " -sitemap" + if wayback: + command += " -wayback" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ•ท๏ธ Starting Hakrawler crawling: {url}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š Hakrawler crawling completed") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in hakrawler endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/httpx", methods=["POST"]) +def httpx(): + """Execute HTTPx for HTTP probing with enhanced logging""" + try: + params = request.json + targets = params.get("targets", "") + target_file = params.get("target_file", "") + ports = params.get("ports", "") + methods = params.get("methods", "GET") + status_code = params.get("status_code", "") + content_length = params.get("content_length", False) + output_file = params.get("output_file", "") + additional_args = params.get("additional_args", "") + + if not targets and not target_file: + logger.warning("๐ŸŒ HTTPx called without targets or target_file parameter") + return jsonify({ + "error": "Either targets or target_file parameter is required" + }), 400 + + command = "httpx" + + if targets: + command += f" -u {targets}" + if target_file: + command += f" -l {target_file}" + if ports: + command += f" -p {ports}" + if methods: + command += f" -X {methods}" + if status_code: + command += f" -mc {status_code}" + if content_length: + command += " -cl" + if output_file: + command += f" -o {output_file}" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐ŸŒ Starting HTTPx probing") + result = execute_command(command) + logger.info(f"๐Ÿ“Š HTTPx probing completed") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in httpx endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +@app.route("/api/tools/paramspider", methods=["POST"]) +def paramspider(): + """Execute ParamSpider for parameter discovery with enhanced logging""" + try: + params = request.json + domain = params.get("domain", "") + exclude = params.get("exclude", "") + output_file = params.get("output_file", "") + level = params.get("level", 2) + additional_args = params.get("additional_args", "") + + if not domain: + logger.warning("๐Ÿ” ParamSpider called without domain parameter") + return jsonify({ + "error": "Domain parameter is required" + }), 400 + + command = f"paramspider -d {domain} -l {level}" + + if exclude: + command += f" -e {exclude}" + if output_file: + command += f" -o {output_file}" + + if additional_args: + command += f" {additional_args}" + + logger.info(f"๐Ÿ” Starting ParamSpider discovery: {domain}") + result = execute_command(command) + logger.info(f"๐Ÿ“Š ParamSpider discovery completed") + return jsonify(result) + except Exception as e: + logger.error(f"๐Ÿ’ฅ Error in paramspider endpoint: {str(e)}") + return jsonify({ + "error": f"Server error: {str(e)}" + }), 500 + +if __name__ == "__main__": + print(f"{Colors.RED}{Colors.BOLD}{BANNER}{Colors.RESET}") + + parser = argparse.ArgumentParser(description="Run the HexStrike AI API Server") + parser.add_argument("--debug", action="store_true", help="Enable debug mode") + parser.add_argument("--port", type=int, default=API_PORT, help=f"Port for the API server (default: {API_PORT})") + args = parser.parse_args() + + if args.debug: + DEBUG_MODE = True + logger.setLevel(logging.DEBUG) + + if args.port != API_PORT: + API_PORT = args.port + + logger.info(f"๐Ÿš€ Starting HexStrike AI Tools API Server on port {API_PORT}") + logger.info(f"๐Ÿ”ง Debug Mode: {DEBUG_MODE}") + logger.info(f"๐Ÿ’พ Cache Size: {CACHE_SIZE} | TTL: {CACHE_TTL}s") + logger.info(f"โฑ๏ธ Command Timeout: {COMMAND_TIMEOUT}s") + + app.run(host="0.0.0.0", port=API_PORT, debug=DEBUG_MODE) \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..ebb431b --- /dev/null +++ b/requirements.txt @@ -0,0 +1,4 @@ +flask>=2.3.0 +psutil>=5.9.0 +requests>=2.31.0 +fastmcp>=0.2.0